code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Mon May 27 22:05:19 2019
@author: <NAME>
"""
import pandas as pd
from keras.layers import Input, TimeDistributed, Bidirectional, Conv2D, BatchNormalization, MaxPooling2D, Flatten, LSTM, Dense, Lambda, GRU, Activation, Dropout
from keras import applications
from keras.optimizers import SGD
from keras.models import Model
from keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard
from keras.layers.merge import add, concatenate
from config import height, width
import numpy as np
import cv2
#%%
model = applications.VGG16(weights = "imagenet", include_top=False, input_shape = (width, height, 3))
for layer in model.layers[:14]:
layer.trainable = False
model.summary()
x = model.output
x = Flatten()(x)
x = Dense(32, activation="relu")(x)
x = Dropout(0.4)(x)
x = Dense(32, activation="relu")(x)
x = Dropout(0.4)(x)
x = Dense(32, activation="relu")(x)
dense = Dense(5, name = "dense")(x)
y_pred = Activation('softmax', name='label')(dense)
model_final = Model(input = model.input, output = y_pred)
model_final.summary()
#%%
filepath="gdrive/My Drive/Colab/Ship/Checkpoints_tl/weights.0.997a-0.019l.hdf5"
model_final.load_weights(filepath)
#%%
def read_one_image(filename):
image = cv2.imread(filename)
h, w, f = np.shape(image)
image = cv2.resize(image, (width, height), interpolation = cv2.INTER_AREA)
h, w, f = np.shape(image)
return image
#%%
test = pd.read_csv("gdrive/My Drive/Colab/Ship/Data/test_ApKoW4T.csv")
image_out = np.zeros((len(test), height, width, 3))
for i in range(0, len(test)):
path = "gdrive/My Drive/Colab/Ship/Data/Image/" + test["image"][i]
print(path)
image_out[i] = read_one_image(path)
#%%
arr = model_final.predict(image_out)
predx = (np.argmax(arr, 1) + 1)
#%%
test["category"] = predx
test.to_csv("gdrive/My Drive/Colab/Ship/Data/Submission1.csv", index = False)
| [
"cv2.imread",
"keras.layers.Flatten",
"pandas.read_csv",
"cv2.resize",
"numpy.argmax",
"keras.models.Model",
"keras.layers.Activation",
"keras.layers.Dense",
"numpy.shape",
"keras.layers.Dropout",
"keras.applications.VGG16"
] | [((579, 673), 'keras.applications.VGG16', 'applications.VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_shape': '(width, height, 3)'}), "(weights='imagenet', include_top=False, input_shape=(\n width, height, 3))\n", (597, 673), False, 'from keras import applications\n'), ((1054, 1093), 'keras.models.Model', 'Model', ([], {'input': 'model.input', 'output': 'y_pred'}), '(input=model.input, output=y_pred)\n', (1059, 1093), False, 'from keras.models import Model\n'), ((1508, 1571), 'pandas.read_csv', 'pd.read_csv', (['"""gdrive/My Drive/Colab/Ship/Data/test_ApKoW4T.csv"""'], {}), "('gdrive/My Drive/Colab/Ship/Data/test_ApKoW4T.csv')\n", (1519, 1571), True, 'import pandas as pd\n'), ((781, 790), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (788, 790), False, 'from keras.layers import Input, TimeDistributed, Bidirectional, Conv2D, BatchNormalization, MaxPooling2D, Flatten, LSTM, Dense, Lambda, GRU, Activation, Dropout\n'), ((799, 827), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (804, 827), False, 'from keras.layers import Input, TimeDistributed, Bidirectional, Conv2D, BatchNormalization, MaxPooling2D, Flatten, LSTM, Dense, Lambda, GRU, Activation, Dropout\n'), ((836, 848), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (843, 848), False, 'from keras.layers import Input, TimeDistributed, Bidirectional, Conv2D, BatchNormalization, MaxPooling2D, Flatten, LSTM, Dense, Lambda, GRU, Activation, Dropout\n'), ((857, 885), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (862, 885), False, 'from keras.layers import Input, TimeDistributed, Bidirectional, Conv2D, BatchNormalization, MaxPooling2D, Flatten, LSTM, Dense, Lambda, GRU, Activation, Dropout\n'), ((894, 906), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (901, 906), False, 'from keras.layers import Input, TimeDistributed, Bidirectional, Conv2D, BatchNormalization, MaxPooling2D, Flatten, LSTM, Dense, Lambda, GRU, Activation, Dropout\n'), ((915, 943), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (920, 943), False, 'from keras.layers import Input, TimeDistributed, Bidirectional, Conv2D, BatchNormalization, MaxPooling2D, Flatten, LSTM, Dense, Lambda, GRU, Activation, Dropout\n'), ((956, 978), 'keras.layers.Dense', 'Dense', (['(5)'], {'name': '"""dense"""'}), "(5, name='dense')\n", (961, 978), False, 'from keras.layers import Input, TimeDistributed, Bidirectional, Conv2D, BatchNormalization, MaxPooling2D, Flatten, LSTM, Dense, Lambda, GRU, Activation, Dropout\n'), ((994, 1029), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {'name': '"""label"""'}), "('softmax', name='label')\n", (1004, 1029), False, 'from keras.layers import Input, TimeDistributed, Bidirectional, Conv2D, BatchNormalization, MaxPooling2D, Flatten, LSTM, Dense, Lambda, GRU, Activation, Dropout\n'), ((1300, 1320), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (1310, 1320), False, 'import cv2\n'), ((1336, 1351), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (1344, 1351), True, 'import numpy as np\n'), ((1371, 1435), 'cv2.resize', 'cv2.resize', (['image', '(width, height)'], {'interpolation': 'cv2.INTER_AREA'}), '(image, (width, height), interpolation=cv2.INTER_AREA)\n', (1381, 1435), False, 'import cv2\n'), ((1453, 1468), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (1461, 1468), True, 'import numpy as np\n'), ((1847, 1864), 'numpy.argmax', 'np.argmax', (['arr', '(1)'], {}), '(arr, 1)\n', (1856, 1864), True, 'import numpy as np\n')] |
import unittest
from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack
from cantera import Solution, one_atm, gas_constant
import numpy as np
from spitfire import ChemicalMechanismSpec
from os.path import join, abspath
from subprocess import getoutput
test_mech_directory = abspath(join('tests', 'test_mechanisms', 'old_xmls'))
mechs = [x.replace('.xml', '') for x in getoutput('ls ' + test_mech_directory + ' | grep .xml').split('\n')]
def validate_on_mechanism(mech, temperature, pressure, test_rhs=True, test_jac=True):
xml = join(test_mech_directory, mech + '.xml')
r = ChemicalMechanismSpec(xml, 'gas').griffon
gas = Solution(xml)
ns = gas.n_species
T = temperature
p = pressure
gas.TPX = T, p, ones(ns)
y = gas.Y
rho = gas.density_mass
state = hstack((rho, T, y[:-1]))
rhsGR = np.empty(ns + 1)
rhsGRTemporary = np.empty(ns + 1)
jacGR = np.empty((ns + 1) * (ns + 1))
r.reactor_rhs_isochoric(state, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, rhsGR)
r.reactor_jac_isochoric(state, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, 0, rhsGRTemporary, jacGR)
jacGR = jacGR.reshape((ns + 1, ns + 1), order='F')
def cantera_rhs(rho_arg, T_arg, Y_arg):
gas.TDY = T_arg, rho_arg, Y_arg
w = gas.net_production_rates * gas.molecular_weights
e = gas.standard_int_energies_RT * gas.T * gas_constant / gas.molecular_weights
cv = gas.cv_mass
rhs = zeros(ns + 1)
rhs[0] = 0.
rhs[1] = - sum(w * e) / (rho_arg * cv)
rhs[2:] = w[:-1] / rho
return rhs
rhsCN = cantera_rhs(rho, T, y)
if test_rhs:
pass_rhs = max(abs(rhsGR - rhsCN) / (abs(rhsCN) + 1.)) < 100. * sqrt(np.finfo(float).eps)
if test_jac:
jacFD = zeros((ns + 1, ns + 1))
wm1 = zeros(ns + 1)
wp1 = zeros(ns + 1)
drho = 1.e-4
dT = 1.e-2
dY = 1.e-6
state_m = hstack((rho - drho, T, y[:-1]))
state_p = hstack((rho + drho, T, y[:-1]))
r.reactor_rhs_isochoric(state_m, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, wm1)
r.reactor_rhs_isochoric(state_p, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, wp1)
jacFD[:, 0] = (- wm1 + wp1) / (2. * drho)
state_m = hstack((rho, T - dT, y[:-1]))
state_p = hstack((rho, T + dT, y[:-1]))
r.reactor_rhs_isochoric(state_m, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, wm1)
r.reactor_rhs_isochoric(state_p, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, wp1)
jacFD[:, 1] = (- wm1 + wp1) / (2. * dT)
for i in range(ns - 1):
y_m1, y_p1 = copy(y), copy(y)
y_m1[i] += - dY
y_m1[-1] -= - dY
y_p1[i] += dY
y_p1[-1] -= dY
state_m = hstack((rho, T, y_m1[:-1]))
state_p = hstack((rho, T, y_p1[:-1]))
r.reactor_rhs_isochoric(state_m, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, wm1)
r.reactor_rhs_isochoric(state_p, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, wp1)
jacFD[:, 2 + i] = (- wm1 + wp1) / (2. * dY)
gas.TDY = T, rho, y
cv = gas.cv_mass
cvi = gas.standard_cp_R * gas_constant / gas.molecular_weights
w = gas.net_production_rates * gas.molecular_weights
e = gas.standard_int_energies_RT * gas.T * gas_constant / gas.molecular_weights
gas.TDY = T + dT, rho, y
wp = gas.net_production_rates * gas.molecular_weights
cvp = gas.cv_mass
gas.TDY = T - dT, rho, y
wm = gas.net_production_rates * gas.molecular_weights
cvm = gas.cv_mass
wsensT = (wp - wm) / (2. * dT)
cvsensT = (cvp - cvm) / (2. * dT)
jacFD11 = np.copy(jacFD[1, 1])
jacSemiFD11 = - 1. / cv * (1. / rho * (sum(wsensT * e) + sum(cvi * w)) + cvsensT * rhsGR[1])
pass_jac = max(abs(jacGR - jacFD) / (abs(jacGR) + 1.)) < 1.e-3
if not pass_jac:
print('fd:')
for i in range(ns + 1):
for j in range(ns + 1):
print(f'{jacFD[i, j]:12.2e}', end=', ')
print('')
print('gr:')
for i in range(ns + 1):
for j in range(ns + 1):
print(f'{jacGR[i, j]:12.2e}', end=', ')
print('')
print('gr-fd:')
for i in range(ns + 1):
for j in range(ns + 1):
df = (jacGR[i, j] - jacFD[i, j]) / (abs(jacFD[i, j]) + 1.0)
if df > 1.e-3:
print(f'{df:12.2e}', end=', ')
else:
print(f'{"":16}', end=', ')
print('')
print('')
if test_rhs:
return pass_rhs
if test_jac:
return pass_jac
def create_test(m, T, p, test_rhs, test_jac):
def test(self):
self.assertTrue(validate_on_mechanism(m, T, p, test_rhs, test_jac))
return test
class Accuracy(unittest.TestCase):
pass
temperature_dict = {'600K': 600., '1200K': 1200.}
pressure_dict = {'1atm': one_atm, '2atm': 2. * one_atm}
for mech in mechs:
for temperature in temperature_dict:
for pressure in pressure_dict:
rhsname = 'test_rhs_' + mech + '_' + temperature + '_' + pressure
jacname = 'test_jac_' + mech + '_' + temperature + '_' + pressure
jsdname = 'test_jac_sparse_vs_dense_' + mech + '_' + temperature + '_' + pressure
setattr(Accuracy, rhsname, create_test(mech, temperature_dict[temperature], pressure_dict[pressure],
test_rhs=True, test_jac=False))
if 'methane' not in mech: # skip methane in the finite difference Jacobian test
setattr(Accuracy, jacname, create_test(mech, temperature_dict[temperature], pressure_dict[pressure],
test_rhs=False, test_jac=True))
if __name__ == '__main__':
unittest.main()
| [
"numpy.copy",
"subprocess.getoutput",
"numpy.abs",
"numpy.ones",
"numpy.hstack",
"spitfire.ChemicalMechanismSpec",
"os.path.join",
"numpy.sum",
"numpy.zeros",
"numpy.empty",
"numpy.ndarray",
"numpy.finfo",
"unittest.main",
"cantera.Solution"
] | [((293, 337), 'os.path.join', 'join', (['"""tests"""', '"""test_mechanisms"""', '"""old_xmls"""'], {}), "('tests', 'test_mechanisms', 'old_xmls')\n", (297, 337), False, 'from os.path import join, abspath\n'), ((546, 586), 'os.path.join', 'join', (['test_mech_directory', "(mech + '.xml')"], {}), "(test_mech_directory, mech + '.xml')\n", (550, 586), False, 'from os.path import join, abspath\n'), ((649, 662), 'cantera.Solution', 'Solution', (['xml'], {}), '(xml)\n', (657, 662), False, 'from cantera import Solution, one_atm, gas_constant\n'), ((808, 832), 'numpy.hstack', 'hstack', (['(rho, T, y[:-1])'], {}), '((rho, T, y[:-1]))\n', (814, 832), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((846, 862), 'numpy.empty', 'np.empty', (['(ns + 1)'], {}), '(ns + 1)\n', (854, 862), True, 'import numpy as np\n'), ((884, 900), 'numpy.empty', 'np.empty', (['(ns + 1)'], {}), '(ns + 1)\n', (892, 900), True, 'import numpy as np\n'), ((913, 942), 'numpy.empty', 'np.empty', (['((ns + 1) * (ns + 1))'], {}), '((ns + 1) * (ns + 1))\n', (921, 942), True, 'import numpy as np\n'), ((6027, 6042), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6040, 6042), False, 'import unittest\n'), ((596, 629), 'spitfire.ChemicalMechanismSpec', 'ChemicalMechanismSpec', (['xml', '"""gas"""'], {}), "(xml, 'gas')\n", (617, 629), False, 'from spitfire import ChemicalMechanismSpec\n'), ((745, 753), 'numpy.ones', 'ones', (['ns'], {}), '(ns)\n', (749, 753), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((984, 997), 'numpy.ndarray', 'np.ndarray', (['(1)'], {}), '(1)\n', (994, 997), True, 'import numpy as np\n'), ((1075, 1088), 'numpy.ndarray', 'np.ndarray', (['(1)'], {}), '(1)\n', (1085, 1088), True, 'import numpy as np\n'), ((1472, 1485), 'numpy.zeros', 'zeros', (['(ns + 1)'], {}), '(ns + 1)\n', (1477, 1485), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((1789, 1812), 'numpy.zeros', 'zeros', (['(ns + 1, ns + 1)'], {}), '((ns + 1, ns + 1))\n', (1794, 1812), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((1827, 1840), 'numpy.zeros', 'zeros', (['(ns + 1)'], {}), '(ns + 1)\n', (1832, 1840), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((1855, 1868), 'numpy.zeros', 'zeros', (['(ns + 1)'], {}), '(ns + 1)\n', (1860, 1868), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((1947, 1978), 'numpy.hstack', 'hstack', (['(rho - drho, T, y[:-1])'], {}), '((rho - drho, T, y[:-1]))\n', (1953, 1978), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((1997, 2028), 'numpy.hstack', 'hstack', (['(rho + drho, T, y[:-1])'], {}), '((rho + drho, T, y[:-1]))\n', (2003, 2028), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((2288, 2317), 'numpy.hstack', 'hstack', (['(rho, T - dT, y[:-1])'], {}), '((rho, T - dT, y[:-1]))\n', (2294, 2317), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((2336, 2365), 'numpy.hstack', 'hstack', (['(rho, T + dT, y[:-1])'], {}), '((rho, T + dT, y[:-1]))\n', (2342, 2365), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((3762, 3782), 'numpy.copy', 'np.copy', (['jacFD[1, 1]'], {}), '(jacFD[1, 1])\n', (3769, 3782), True, 'import numpy as np\n'), ((2076, 2089), 'numpy.ndarray', 'np.ndarray', (['(1)'], {}), '(1)\n', (2086, 2089), True, 'import numpy as np\n'), ((2171, 2184), 'numpy.ndarray', 'np.ndarray', (['(1)'], {}), '(1)\n', (2181, 2184), True, 'import numpy as np\n'), ((2413, 2426), 'numpy.ndarray', 'np.ndarray', (['(1)'], {}), '(1)\n', (2423, 2426), True, 'import numpy as np\n'), ((2508, 2521), 'numpy.ndarray', 'np.ndarray', (['(1)'], {}), '(1)\n', (2518, 2521), True, 'import numpy as np\n'), ((2811, 2838), 'numpy.hstack', 'hstack', (['(rho, T, y_m1[:-1])'], {}), '((rho, T, y_m1[:-1]))\n', (2817, 2838), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((2861, 2888), 'numpy.hstack', 'hstack', (['(rho, T, y_p1[:-1])'], {}), '((rho, T, y_p1[:-1]))\n', (2867, 2888), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((379, 434), 'subprocess.getoutput', 'getoutput', (["('ls ' + test_mech_directory + ' | grep .xml')"], {}), "('ls ' + test_mech_directory + ' | grep .xml')\n", (388, 434), False, 'from subprocess import getoutput\n'), ((1525, 1535), 'numpy.sum', 'sum', (['(w * e)'], {}), '(w * e)\n', (1528, 1535), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((2662, 2669), 'numpy.copy', 'copy', (['y'], {}), '(y)\n', (2666, 2669), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((2671, 2678), 'numpy.copy', 'copy', (['y'], {}), '(y)\n', (2675, 2678), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((2940, 2953), 'numpy.ndarray', 'np.ndarray', (['(1)'], {}), '(1)\n', (2950, 2953), True, 'import numpy as np\n'), ((3039, 3052), 'numpy.ndarray', 'np.ndarray', (['(1)'], {}), '(1)\n', (3049, 3052), True, 'import numpy as np\n'), ((1680, 1698), 'numpy.abs', 'abs', (['(rhsGR - rhsCN)'], {}), '(rhsGR - rhsCN)\n', (1683, 1698), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((3907, 3925), 'numpy.abs', 'abs', (['(jacGR - jacFD)'], {}), '(jacGR - jacFD)\n', (3910, 3925), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((1702, 1712), 'numpy.abs', 'abs', (['rhsCN'], {}), '(rhsCN)\n', (1705, 1712), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((1734, 1749), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (1742, 1749), True, 'import numpy as np\n'), ((3830, 3845), 'numpy.sum', 'sum', (['(wsensT * e)'], {}), '(wsensT * e)\n', (3833, 3845), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((3848, 3860), 'numpy.sum', 'sum', (['(cvi * w)'], {}), '(cvi * w)\n', (3851, 3860), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((3929, 3939), 'numpy.abs', 'abs', (['jacGR'], {}), '(jacGR)\n', (3932, 3939), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n'), ((4515, 4531), 'numpy.abs', 'abs', (['jacFD[i, j]'], {}), '(jacFD[i, j])\n', (4518, 4531), False, 'from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack\n')] |
'''
Plots accuracy orig vs. accuracy projected
'''
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
sns.set()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--results',
default='results.txt')
return parser.parse_args()
def main():
args = parse_args()
with open(args.results) as f:
results = [x.strip().split()[-1].split('/') for x in f.readlines()]
results = [(float(x[0]), float(x[1]), float(x[-1])) for x in results]
orig_acc = np.array([r[0] for r in results])
proj_acc = np.array([r[1] for r in results])
mean_acc = np.array([r[2] for r in results])
print('Orig acc: {:.2f}, proj acc: {:.2f}, const acc: {:.2f}'.format(
np.mean(orig_acc), np.mean(proj_acc), np.mean(mean_acc)))
orig_better = np.sum(orig_acc > proj_acc)
plt.scatter(orig_acc, proj_acc)
plt.plot([0, 100], [0, 100], linestyle='--', linewidth=3, color='r')
start = min(np.min(orig_acc) - 1, np.min(proj_acc) - 1)
end = max(np.max(orig_acc) + 1, np.max(orig_acc) + 1)
plt.xlim(start, end)
plt.ylim(start, end)
plt.xlabel('Accuracy Original (win rate={:.0f}%)'.format(orig_better / len(orig_acc) * 100))
plt.ylabel('Accuracy Projected')
plt.tight_layout()
plt.savefig('vqa_results.pdf')
if __name__ == '__main__':
main()
| [
"numpy.mean",
"seaborn.set",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"matplotlib.pyplot.plot",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.tight_layout",
"matplotlib.pypl... | [((85, 106), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (99, 106), False, 'import matplotlib\n'), ((180, 189), 'seaborn.set', 'sns.set', ([], {}), '()\n', (187, 189), True, 'import seaborn as sns\n'), ((223, 248), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (246, 248), False, 'import argparse\n'), ((594, 627), 'numpy.array', 'np.array', (['[r[0] for r in results]'], {}), '([r[0] for r in results])\n', (602, 627), True, 'import numpy as np\n'), ((643, 676), 'numpy.array', 'np.array', (['[r[1] for r in results]'], {}), '([r[1] for r in results])\n', (651, 676), True, 'import numpy as np\n'), ((692, 725), 'numpy.array', 'np.array', (['[r[2] for r in results]'], {}), '([r[2] for r in results])\n', (700, 725), True, 'import numpy as np\n'), ((884, 911), 'numpy.sum', 'np.sum', (['(orig_acc > proj_acc)'], {}), '(orig_acc > proj_acc)\n', (890, 911), True, 'import numpy as np\n'), ((916, 947), 'matplotlib.pyplot.scatter', 'plt.scatter', (['orig_acc', 'proj_acc'], {}), '(orig_acc, proj_acc)\n', (927, 947), True, 'import matplotlib.pyplot as plt\n'), ((952, 1020), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 100]', '[0, 100]'], {'linestyle': '"""--"""', 'linewidth': '(3)', 'color': '"""r"""'}), "([0, 100], [0, 100], linestyle='--', linewidth=3, color='r')\n", (960, 1020), True, 'import matplotlib.pyplot as plt\n'), ((1143, 1163), 'matplotlib.pyplot.xlim', 'plt.xlim', (['start', 'end'], {}), '(start, end)\n', (1151, 1163), True, 'import matplotlib.pyplot as plt\n'), ((1168, 1188), 'matplotlib.pyplot.ylim', 'plt.ylim', (['start', 'end'], {}), '(start, end)\n', (1176, 1188), True, 'import matplotlib.pyplot as plt\n'), ((1290, 1322), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy Projected"""'], {}), "('Accuracy Projected')\n", (1300, 1322), True, 'import matplotlib.pyplot as plt\n'), ((1327, 1345), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1343, 1345), True, 'import matplotlib.pyplot as plt\n'), ((1350, 1380), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""vqa_results.pdf"""'], {}), "('vqa_results.pdf')\n", (1361, 1380), True, 'import matplotlib.pyplot as plt\n'), ((808, 825), 'numpy.mean', 'np.mean', (['orig_acc'], {}), '(orig_acc)\n', (815, 825), True, 'import numpy as np\n'), ((827, 844), 'numpy.mean', 'np.mean', (['proj_acc'], {}), '(proj_acc)\n', (834, 844), True, 'import numpy as np\n'), ((846, 863), 'numpy.mean', 'np.mean', (['mean_acc'], {}), '(mean_acc)\n', (853, 863), True, 'import numpy as np\n'), ((1037, 1053), 'numpy.min', 'np.min', (['orig_acc'], {}), '(orig_acc)\n', (1043, 1053), True, 'import numpy as np\n'), ((1059, 1075), 'numpy.min', 'np.min', (['proj_acc'], {}), '(proj_acc)\n', (1065, 1075), True, 'import numpy as np\n'), ((1095, 1111), 'numpy.max', 'np.max', (['orig_acc'], {}), '(orig_acc)\n', (1101, 1111), True, 'import numpy as np\n'), ((1117, 1133), 'numpy.max', 'np.max', (['orig_acc'], {}), '(orig_acc)\n', (1123, 1133), True, 'import numpy as np\n')] |
"""A collection of Terrestrial Reference Frame sites
Description:
------------
"""
# Standard library imports
import collections
import abc
# External library imports
import numpy as np
# Where imports
from where.lib import config
from where.lib import exceptions
from where.lib import log
from where.apriori import trf
class Trf(collections.UserDict):
"""A collection of Terrestrial Reference Frame sites
"""
def __init__(self, time, reference_frames=None):
"""Set up a new Terrestrial Reference Frame object, does not parse any data
Here we only set up the Trf-object. The individual sites are added to self.data lazily using the
__missing__-method which is called for each key not found in self.data.
Args:
time (Time): Time epochs for which to calculate positions.
reference_frames (String): Prioritized list of reference frames
"""
super().__init__()
self.time = time
self.reference_frames = config.tech.get("reference_frames", reference_frames).list
# Add factory for each reference frame
self._factories = list()
for reference_frame in self.reference_frames:
self._factories.append(trf.get_trf_factory(time, reference_frame))
def __missing__(self, key):
"""A TRF site identified by key
"""
# Find site in the prioritized list of factories
for factory in self._factories:
try:
site = factory.site(key)
break # Use site from this factory if it is found
except exceptions.UnknownSiteError:
continue # If site is not in factory, continue to next factory
else: # Exit if site is not found in any factories
log.fatal(f"Site {key} not found in the reference frames {', '.join(self.reference_frames)}")
# Add site to cache
self.data[key] = site
return site
@property
def sites(self):
"""List all sites available in the reference frames
"""
sites = set()
for factory in self._factories:
sites.update(factory.sites)
return sorted(sites)
def closest(self, pos, max_distance=None):
"""Find site closest to the given position
Args:
pos (Array): 3-vector with x, y and z-coordinates.
max_distance (float): Maximum distance around `pos` to look for a site [meter].
Returns:
TrfSite: Site closest to the given position. Raises `ValueError` if no site is found within `max_distance`.
"""
distances = {k: self[k].distance_to(pos) for k in self.sites if self[k].real}
closest = min(distances, key=distances.get)
# Check that distance is within threshold
if not max_distance:
return self[closest]
if distances[closest] < max_distance:
return self[closest]
else:
raise ValueError(
"No site found within {} meters of ({:.2f}, {:.2f}, {:.2f}) in '{!r}'"
"".format(max_distance, *np.mean(pos.itrs, axis=0), self)
)
def named_site(self, name):
"""Find site with given name
"""
for k in self.sites:
if name == self[k].name:
return self[k]
raise ValueError(f"No site found with name {name} in '{self!r}'")
#
# Dunder-methods
#
def __str__(self):
return "Reference frame based on {}".format(", ".join(str(f) for f in self._factories))
def __repr__(self):
reference_frames = ", ".join(str(f) for f in self._factories)
return f"{type(self).__name__}({self.time!r}, {reference_frames!r})"
def __contains__(self, item):
if item in self.data:
return True
else:
return item in self.sites
def __iter__(self):
for site in self.sites:
yield self[site]
class TrfFactory(abc.ABC):
def __init__(self, time, version=None):
self.name = self.__class__.__module__.split(".")[-1]
self.time = time
self.version = version
self._data = None
@property
def data(self):
"""Data needed by this Reference Frame, lazily read by self._read_data when needed
"""
if self._data is None:
self._data = self._read_data()
return self._data
@property
def sites(self):
"""List of all sites known by this reference frame
"""
return self.data.keys()
def site(self, key):
"""Positions and information about one site in the reference frame
Args:
key (String): Key specifying which site to calculate position for.
Returns:
TrfSite: Object with positions and information about site.
"""
if key not in self.data:
raise exceptions.UnknownSiteError(f"Unknown site {key} in reference frame '{self}'")
pos_trs = self._calculate_pos_trs(key)
site_info = self.data[key]
trf_site = TrfSite(key, time=self.time, pos=pos_trs, source=str(self), **site_info)
log.debug(f"Found site {trf_site}")
return trf_site
#
# Abstract methods, must be implemented by subclasses
#
@abc.abstractmethod
def _read_data(self):
"""Read data needed by this Reference Frame for calculating positions of sites
Returns:
Dict: Dictionary containing data about each site defined in this reference frame.
"""
@abc.abstractmethod
def _calculate_pos_trs(self, site):
"""Calculate ITRS position of one site
Args:
site (String): Key specifying which site to calculate position for, must be key in self.data.
Returns:
Array: Positions, one 3-vector for each time epoch.
"""
#
# Dunder methods
#
def __str__(self):
"""String representation of factory recreates 'name:version'-string
"""
if self.version:
return f"{self.name}:{self.version}"
else:
return self.name
def __repr__(self):
return f"{type(self).__name__}({self.time!r}, {self.version!r})"
class TrfSite:
def __init__(self, key, time, pos, source, real=True, name=None, **meta_args):
"""Constructor
Args:
key: Unique identifier for the site
time: Epochs the site has observations for
pos: The position (in a TRF) of the site at each epoch
source: The source of the position information
real: Flag indicating whether the site is a real site or a dummy site
meta: Additional meta information
"""
self.key = key
self.time = time
self.pos = pos
self.source = source
self.real = real
self.name = name
self.meta = meta_args
if self.name is None:
self.name = meta_args.get(name, self.key)
def distance_to(self, other_pos):
"""Calculate distance to some other position
Args:
other_pos (Array): Other position, either as 1x3 array or nx3 with equal to length of self.time.
Returns:
Array: 1 or self.time.size distances.
"""
if self.time.size == 1:
return np.linalg.norm(self.pos.trs - other_pos)
return np.linalg.norm(self.pos.trs - other_pos, axis=1).mean()
#
# Dunder methods
#
def __repr__(self):
if self.pos.trs.ndim == 1:
pos = self.pos.trs
else:
pos = self.pos.trs.mean(axis=0)
pos_str = "({:.2f}, {:.2f}, {:.2f})".format(*pos)
return f"{type(self).__name__}({self.name!r}, {pos_str}, {self.source!r})"
| [
"numpy.mean",
"where.apriori.trf.get_trf_factory",
"where.lib.log.debug",
"where.lib.exceptions.UnknownSiteError",
"numpy.linalg.norm",
"where.lib.config.tech.get"
] | [((5207, 5242), 'where.lib.log.debug', 'log.debug', (['f"""Found site {trf_site}"""'], {}), "(f'Found site {trf_site}')\n", (5216, 5242), False, 'from where.lib import log\n'), ((1023, 1076), 'where.lib.config.tech.get', 'config.tech.get', (['"""reference_frames"""', 'reference_frames'], {}), "('reference_frames', reference_frames)\n", (1038, 1076), False, 'from where.lib import config\n'), ((4945, 5023), 'where.lib.exceptions.UnknownSiteError', 'exceptions.UnknownSiteError', (['f"""Unknown site {key} in reference frame \'{self}\'"""'], {}), '(f"Unknown site {key} in reference frame \'{self}\'")\n', (4972, 5023), False, 'from where.lib import exceptions\n'), ((7428, 7468), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.pos.trs - other_pos)'], {}), '(self.pos.trs - other_pos)\n', (7442, 7468), True, 'import numpy as np\n'), ((1252, 1294), 'where.apriori.trf.get_trf_factory', 'trf.get_trf_factory', (['time', 'reference_frame'], {}), '(time, reference_frame)\n', (1271, 1294), False, 'from where.apriori import trf\n'), ((7484, 7532), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.pos.trs - other_pos)'], {'axis': '(1)'}), '(self.pos.trs - other_pos, axis=1)\n', (7498, 7532), True, 'import numpy as np\n'), ((3150, 3175), 'numpy.mean', 'np.mean', (['pos.itrs'], {'axis': '(0)'}), '(pos.itrs, axis=0)\n', (3157, 3175), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
# -----------------------------------
# Regression
# -----------------------------------
# rmse
from sklearn.metrics import mean_squared_error
# y_true are the true values、y_pred are the predictions
y_true = [1.0, 1.5, 2.0, 1.2, 1.8]
y_pred = [0.8, 1.5, 1.8, 1.3, 3.0]
rmse = np.sqrt(mean_squared_error(y_true, y_pred))
print(rmse)
# 0.5532
# -----------------------------------
# Binary classification
# -----------------------------------
# Confusion matrix
from sklearn.metrics import confusion_matrix
# True values and predicted values are binary, i.e. either 0 or 1
y_true = [1, 0, 1, 1, 0, 1, 1, 0]
y_pred = [0, 0, 1, 1, 0, 0, 1, 1]
tp = np.sum((np.array(y_true) == 1) & (np.array(y_pred) == 1))
tn = np.sum((np.array(y_true) == 0) & (np.array(y_pred) == 0))
fp = np.sum((np.array(y_true) == 0) & (np.array(y_pred) == 1))
fn = np.sum((np.array(y_true) == 1) & (np.array(y_pred) == 0))
confusion_matrix1 = np.array([[tp, fp],
[fn, tn]])
print(confusion_matrix1)
# array([[3, 1],
# [2, 2]])
# Can also be created using the confusion_matrix() function from scikit-learn's metrics, but
# be aware that the arrangement of the confusion matrix elements may be different
confusion_matrix2 = confusion_matrix(y_true, y_pred)
print(confusion_matrix2)
# array([[2, 1],
# [2, 3]])
# -----------------------------------
# accuracy
from sklearn.metrics import accuracy_score
# True values and predicted values are binary, i.e. either 0 or 1
y_true = [1, 0, 1, 1, 0, 1, 1, 0]
y_pred = [0, 0, 1, 1, 0, 0, 1, 1]
accuracy = accuracy_score(y_true, y_pred)
print(accuracy)
# 0.625
# -----------------------------------
# logloss
from sklearn.metrics import log_loss
# True values are binary (0 or 1), predicted values are probabilities
y_true = [1, 0, 1, 1, 0, 1]
y_prob = [0.1, 0.2, 0.8, 0.8, 0.1, 0.3]
logloss = log_loss(y_true, y_prob)
print(logloss)
# 0.7136
# -----------------------------------
# Multi-class classification
# -----------------------------------
# multi-class logloss
from sklearn.metrics import log_loss
# True values are 3-class classifiers, predicted values are probabilities for each class
y_true = np.array([0, 2, 1, 2, 2])
y_pred = np.array([[0.68, 0.32, 0.00],
[0.00, 0.00, 1.00],
[0.60, 0.40, 0.00],
[0.00, 0.00, 1.00],
[0.28, 0.12, 0.60]])
logloss = log_loss(y_true, y_pred)
print(logloss)
# 0.3626
# -----------------------------------
# Multi-label classification
# -----------------------------------
# mean_f1, macro_f1, micro_f1
from sklearn.metrics import f1_score
# For calculating performance metric of multi-label classification, it is easier to handle the true / predicted values as binary matrices of record x class
# True values - [[1,2], [1], [1,2,3], [2,3], [3]]
y_true = np.array([[1, 1, 0],
[1, 0, 0],
[1, 1, 1],
[0, 1, 1],
[0, 0, 1]])
# Predicted values - [[1,3], [2], [1,3], [3], [3]]
y_pred = np.array([[1, 0, 1],
[0, 1, 0],
[1, 0, 1],
[0, 0, 1],
[0, 0, 1]])
# mean_f1 is the mean of the F1-scores for each record
mean_f1 = np.mean([f1_score(y_true[i, :], y_pred[i, :]) for i in range(len(y_true))])
# macro_f1 is the mean of the F1-scores for each class
n_class = 3
macro_f1 = np.mean([f1_score(y_true[:, c], y_pred[:, c]) for c in range(n_class)])
# micro-f1 is the F1-score calculate using the true/predicted values for each record-class pair
micro_f1 = f1_score(y_true.reshape(-1), y_pred.reshape(-1))
print(mean_f1, macro_f1, micro_f1)
# 0.5933, 0.5524, 0.6250
# Can also be calculated using a scikit-learn function
mean_f1 = f1_score(y_true, y_pred, average='samples')
macro_f1 = f1_score(y_true, y_pred, average='macro')
micro_f1 = f1_score(y_true, y_pred, average='micro')
# -----------------------------------
# Multi-class classification with ordered classes
# -----------------------------------
# quadratic weighted kappa
from sklearn.metrics import confusion_matrix, cohen_kappa_score
# Function for calculating quadratic weighted kappa
def quadratic_weighted_kappa(c_matrix):
numer = 0.0
denom = 0.0
for i in range(c_matrix.shape[0]):
for j in range(c_matrix.shape[1]):
n = c_matrix.shape[0]
wij = ((i - j) ** 2.0)
oij = c_matrix[i, j]
eij = c_matrix[i, :].sum() * c_matrix[:, j].sum() / c_matrix.sum()
numer += wij * oij
denom += wij * eij
return 1.0 - numer / denom
# y_true is the true class list, y_pred is the predicted class list
y_true = [1, 2, 3, 4, 3]
y_pred = [2, 2, 4, 4, 5]
# Calculate the confusion matrix
c_matrix = confusion_matrix(y_true, y_pred, labels=[1, 2, 3, 4, 5])
# Calculate quadratic weighted kappa
kappa = quadratic_weighted_kappa(c_matrix)
print(kappa)
# 0.6153
# Can also be calculated using a scikit-learn function
kappa = cohen_kappa_score(y_true, y_pred, weights='quadratic')
# -----------------------------------
# Recommendation
# -----------------------------------
# MAP@K
# K=3, with 5 records and 4 class types
K = 3
# True values for each record
y_true = [[1, 2], [1, 2], [4], [1, 2, 3, 4], [3, 4]]
# Predicted values for each record - as K=3, usually predict order of 3 records for each class
y_pred = [[1, 2, 4], [4, 1, 2], [1, 4, 3], [1, 2, 3], [1, 2, 4]]
# Function to calculate the average precision for each record
def apk(y_i_true, y_i_pred):
# Length of y_pred must be less than or equal to K, and all elements must be unique
assert (len(y_i_pred) <= K)
assert (len(np.unique(y_i_pred)) == len(y_i_pred))
sum_precision = 0.0
num_hits = 0.0
for i, p in enumerate(y_i_pred):
if p in y_i_true:
num_hits += 1
precision = num_hits / (i + 1)
sum_precision += precision
return sum_precision / min(len(y_i_true), K)
# Function for calculating MAP@K
def mapk(y_true, y_pred):
return np.mean([apk(y_i_true, y_i_pred) for y_i_true, y_i_pred in zip(y_true, y_pred)])
# Calculate MAP@K
print(mapk(y_true, y_pred))
# 0.65
# Even if the number of true values is the same, if the order is different then the score will be different
print(apk(y_true[0], y_pred[0]))
print(apk(y_true[1], y_pred[1]))
# 1.0, 0.5833
| [
"sklearn.metrics.f1_score",
"numpy.unique",
"sklearn.metrics.cohen_kappa_score",
"sklearn.metrics.mean_squared_error",
"numpy.array",
"sklearn.metrics.log_loss",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.confusion_matrix"
] | [((959, 989), 'numpy.array', 'np.array', (['[[tp, fp], [fn, tn]]'], {}), '([[tp, fp], [fn, tn]])\n', (967, 989), True, 'import numpy as np\n'), ((1276, 1308), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1292, 1308), False, 'from sklearn.metrics import confusion_matrix, cohen_kappa_score\n'), ((1609, 1639), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1623, 1639), False, 'from sklearn.metrics import accuracy_score\n'), ((1901, 1925), 'sklearn.metrics.log_loss', 'log_loss', (['y_true', 'y_prob'], {}), '(y_true, y_prob)\n', (1909, 1925), False, 'from sklearn.metrics import log_loss\n'), ((2215, 2240), 'numpy.array', 'np.array', (['[0, 2, 1, 2, 2]'], {}), '([0, 2, 1, 2, 2])\n', (2223, 2240), True, 'import numpy as np\n'), ((2250, 2354), 'numpy.array', 'np.array', (['[[0.68, 0.32, 0.0], [0.0, 0.0, 1.0], [0.6, 0.4, 0.0], [0.0, 0.0, 1.0], [\n 0.28, 0.12, 0.6]]'], {}), '([[0.68, 0.32, 0.0], [0.0, 0.0, 1.0], [0.6, 0.4, 0.0], [0.0, 0.0, \n 1.0], [0.28, 0.12, 0.6]])\n', (2258, 2354), True, 'import numpy as np\n'), ((2447, 2471), 'sklearn.metrics.log_loss', 'log_loss', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2455, 2471), False, 'from sklearn.metrics import log_loss\n'), ((2886, 2951), 'numpy.array', 'np.array', (['[[1, 1, 0], [1, 0, 0], [1, 1, 1], [0, 1, 1], [0, 0, 1]]'], {}), '([[1, 1, 0], [1, 0, 0], [1, 1, 1], [0, 1, 1], [0, 0, 1]])\n', (2894, 2951), True, 'import numpy as np\n'), ((3089, 3154), 'numpy.array', 'np.array', (['[[1, 0, 1], [0, 1, 0], [1, 0, 1], [0, 0, 1], [0, 0, 1]]'], {}), '([[1, 0, 1], [0, 1, 0], [1, 0, 1], [0, 0, 1], [0, 0, 1]])\n', (3097, 3154), True, 'import numpy as np\n'), ((3809, 3852), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'y_pred'], {'average': '"""samples"""'}), "(y_true, y_pred, average='samples')\n", (3817, 3852), False, 'from sklearn.metrics import f1_score\n'), ((3864, 3905), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'y_pred'], {'average': '"""macro"""'}), "(y_true, y_pred, average='macro')\n", (3872, 3905), False, 'from sklearn.metrics import f1_score\n'), ((3917, 3958), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'y_pred'], {'average': '"""micro"""'}), "(y_true, y_pred, average='micro')\n", (3925, 3958), False, 'from sklearn.metrics import f1_score\n'), ((4827, 4883), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {'labels': '[1, 2, 3, 4, 5]'}), '(y_true, y_pred, labels=[1, 2, 3, 4, 5])\n', (4843, 4883), False, 'from sklearn.metrics import confusion_matrix, cohen_kappa_score\n'), ((5051, 5105), 'sklearn.metrics.cohen_kappa_score', 'cohen_kappa_score', (['y_true', 'y_pred'], {'weights': '"""quadratic"""'}), "(y_true, y_pred, weights='quadratic')\n", (5068, 5105), False, 'from sklearn.metrics import confusion_matrix, cohen_kappa_score\n'), ((327, 361), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (345, 361), False, 'from sklearn.metrics import mean_squared_error\n'), ((3306, 3342), 'sklearn.metrics.f1_score', 'f1_score', (['y_true[i, :]', 'y_pred[i, :]'], {}), '(y_true[i, :], y_pred[i, :])\n', (3314, 3342), False, 'from sklearn.metrics import f1_score\n'), ((3461, 3497), 'sklearn.metrics.f1_score', 'f1_score', (['y_true[:, c]', 'y_pred[:, c]'], {}), '(y_true[:, c], y_pred[:, c])\n', (3469, 3497), False, 'from sklearn.metrics import f1_score\n'), ((699, 715), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (707, 715), True, 'import numpy as np\n'), ((725, 741), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (733, 741), True, 'import numpy as np\n'), ((762, 778), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (770, 778), True, 'import numpy as np\n'), ((788, 804), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (796, 804), True, 'import numpy as np\n'), ((825, 841), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (833, 841), True, 'import numpy as np\n'), ((851, 867), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (859, 867), True, 'import numpy as np\n'), ((888, 904), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (896, 904), True, 'import numpy as np\n'), ((914, 930), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (922, 930), True, 'import numpy as np\n'), ((5729, 5748), 'numpy.unique', 'np.unique', (['y_i_pred'], {}), '(y_i_pred)\n', (5738, 5748), True, 'import numpy as np\n')] |
# Calibration methods including Histogram Binning and Temperature Scaling
import time
import keras
import keras.backend as K
import numpy as np
import pandas as pd
import sklearn.metrics as metrics
from keras.callbacks import EarlyStopping
from keras.layers import Activation
from keras.layers import Dense
from keras.layers import Lambda
from keras.layers import Layer
from keras.models import Sequential
from keras.utils import to_categorical
from sklearn.preprocessing import OneHotEncoder
from scipy.optimize import minimize
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
import tensorflow
from calibration.evaluation import ECE, MCE, Brier, evaluate
def softmax(x):
"""
Compute softmax values for each sets of scores in x.
Parameters:
x (numpy.ndarray): array containing m samples with n-dimensions (m,n)
Returns:
x_softmax (numpy.ndarray) softmaxed values for initial (m,n) array
"""
e_x = np.exp(x - np.max(x)) # Subtract max so biggest is 0 to avoid numerical instability
# Axis 0 if only one dimensional array
axis = 0 if len(e_x.shape) == 1 else 1
return e_x / e_x.sum(axis=axis, keepdims=1)
class HistogramBinning():
"""
Histogram Binning as a calibration method. The bins are divided into equal lengths.
The class contains two methods:
- fit(probs, true), that should be used with validation data to train the calibration model.
- predict(probs), this method is used to calibrate the confidences.
"""
def __init__(self, M=15):
"""
M (int): the number of equal-length bins used
"""
self.bin_size = 1. / M # Calculate bin size
self.conf = [] # Initiate confidence list
self.upper_bounds = np.arange(self.bin_size, 1 + self.bin_size, self.bin_size) # Set bin bounds for intervals
def _get_conf(self, conf_thresh_lower, conf_thresh_upper, probs, true):
"""
Inner method to calculate optimal confidence for certain probability range
Params:
- conf_thresh_lower (float): start of the interval (not included)
- conf_thresh_upper (float): end of the interval (included)
- probs : list of probabilities.
- true : list with true labels, where 1 is positive class and 0 is negative).
"""
# Filter labels within probability range
filtered = [x[0] for x in zip(true, probs) if x[1] > conf_thresh_lower and x[1] <= conf_thresh_upper]
nr_elems = len(filtered) # Number of elements in the list.
if nr_elems < 1:
return 0
else:
# In essence the confidence equals to the average accuracy of a bin
conf = sum(filtered) / nr_elems # Sums positive classes
return conf
def fit(self, probs, true):
"""
Fit the calibration model, finding optimal confidences for all the bins.
Params:
probs: probabilities of data
true: true labels of data
"""
conf = []
# Got through intervals and add confidence to list
for conf_thresh in self.upper_bounds:
temp_conf = self._get_conf((conf_thresh - self.bin_size), conf_thresh, probs=probs, true=true)
conf.append(temp_conf)
self.conf = conf
# Fit based on predicted confidence
def predict(self, probs):
"""
Calibrate the confidences
Param:
probs: probabilities of the data (shape [samples, classes])
Returns:
Calibrated probabilities (shape [samples, classes])
"""
# Go through all the probs and check what confidence is suitable for it.
for i, prob in enumerate(probs):
idx = np.searchsorted(self.upper_bounds, prob)
probs[i] = self.conf[idx]
return probs
class TemperatureScaling():
def __init__(self, temp=1, maxiter=50, solver="BFGS"):
"""
Initialize class
Params:
temp (float): starting temperature, default 1
maxiter (int): maximum iterations done by optimizer, however 8 iterations have been maximum.
"""
self.temp = temp
self.maxiter = maxiter
self.solver = solver
def _loss_fun(self, x, probs, true):
# Calculates the loss using log-loss (cross-entropy loss)
scaled_probs = self.predict(probs, x)
print(f'predicted prob: {scaled_probs}')
loss = log_loss(y_true=true, y_pred=scaled_probs)
return loss
# Find the temperature
def fit(self, logits, true, verbose=False):
"""
Trains the model and finds optimal temperature
Params:
logits: the output from neural network for each class (shape [samples, classes])
true: one-hot-encoding of true labels.
Returns:
the results of optimizer after minimizing is finished.
"""
true = true.flatten() # Flatten y_val
opt = minimize(self._loss_fun, x0=1, args=(logits, true), options={'maxiter': self.maxiter}, method=self.solver)
self.temp = opt.x[0]
if verbose:
print("Temperature:", 1 / self.temp)
return opt
def predict(self, logits, temp=None):
"""
Scales logits based on the temperature and returns calibrated probabilities
Params:
logits: logits values of data (output from neural network) for each class (shape [samples, classes])
temp: if not set use temperatures find by model or previously set.
Returns:
calibrated probabilities (nd.array with shape [samples, classes])
"""
if not temp:
return softmax(logits / self.temp)
else:
return softmax(logits / temp)
class VectorScaling():
def __init__(self, classes=1, W=[], bias=[], maxiter=100, solver="BFGS", use_bias=True):
"""
Initialize class
Params:
maxiter (int): maximum iterations done by optimizer.
classes (int): how many classes in given data set. (based on logits )
W (np.ndarray): matrix with temperatures for all the classes
bias ( np.array): vector with biases
"""
self.W = W
self.bias = bias
self.maxiter = maxiter
self.solver = solver
self.classes = classes
self.use_bias = use_bias
def _loss_fun(self, x, logits, true):
# Calculates the loss using log-loss (cross-entropy loss)
W = np.diag(x[:self.classes])
if self.use_bias:
bias = x[self.classes:]
else:
bias = np.zeros(self.classes)
scaled_probs = self.predict(logits, W, bias)
loss = log_loss(y_true=true, y_pred=scaled_probs)
return loss
# Find the temperature
def fit(self, logits, true):
"""
Trains the model and finds optimal temperature
Params:
logits: the output from neural network for each class (shape [samples, classes])
true: one-hot-encoding of true labels.
Returns:
the results of optimizer after minimizing is finished.
"""
true = true.flatten() # Flatten y_val
self.classes = logits.shape[1]
x0 = np.concatenate([np.repeat(1, self.classes), np.repeat(0, self.classes)])
opt = minimize(self._loss_fun, x0=x0, args=(logits, true), options={'maxiter': self.maxiter},
method=self.solver)
self.W = np.diag(opt.x[:logits.shape[1]])
self.bias = opt.x[logits.shape[1]:]
return opt
def predict(self, logits, W=[], bias=[]):
"""
Scales logits based on the temperature and returns calibrated probabilities
Params:
logits: logits values of data (output from neural network) for each class (shape [samples, classes])
temp: if not set use temperatures find by model or previously set.
Returns:
calibrated probabilities (nd.array with shape [samples, classes])
"""
if len(W) == 0 or len(bias) == 0: # Use class variables
scaled_logits = np.dot(logits, self.W) + self.bias
else: # Take variables W and bias from arguments
scaled_logits = np.dot(logits, W) + bias
return softmax(scaled_logits)
class MatrixScaling():
def __init__(self, classes=-1, max_epochs=1000, patience=5):
"""
Initialize class
Params:
max_epochs (int): maximum iterations done by optimizer.
classes (int): how many classes in given data set. (based on logits )
patience (int): how many worse epochs before early stopping
"""
if classes >= 1:
self.model = self.create_model(classes)
else:
self.model = None
self.max_epochs = max_epochs
self.patience = patience
self.classes = classes
def create_model(self, classes, verbose=True):
model = Sequential()
model.add(Dense(classes, use_bias=True, input_dim=classes, activation="softmax"))
if verbose:
model.summary()
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam")
return model
# Find the temperature
def fit(self, logits, true):
"""
Trains the model and finds optimal parameters
Params:
logits: the output from neural network for each class (shape [samples, classes])
true: one-hot-encoding of true labels.
Returns:
the model after minimizing is finished.
"""
self.model = self.create_model(logits.shape[1])
early_stop = EarlyStopping(monitor='loss', min_delta=0, patience=self.patience, verbose=0, mode='auto')
cbs = [early_stop]
hist = self.model.fit(logits, true, epochs=self.max_epochs, callbacks=cbs)
return hist
def predict(self, logits):
"""
Scales logits based on the model and returns calibrated probabilities
Params:
logits: logits values of data (output from neural network) for each class (shape [samples, classes])
Returns:
calibrated probabilities (nd.array with shape [samples, classes])
"""
return self.model.predict(logits)
@property
def coef_(self):
if self.model:
return self.model.get_weights()[0].T
@property
def intercept_(self):
if self.model:
return self.model.get_weights()[1]
class Dirichlet_NN():
def __init__(self, l2=0., mu=None, classes=-1, max_epochs=500, comp=True,
patience=15, lr=0.001, weights=[], random_state=15, loss="sparse_categorical_crossentropy",
double_fit=True, use_logits=False):
"""
Initialize class
Params:
l2 (float): regularization for off-diag regularization.
mu (float): regularization for bias. (if None, then it is set equal to lambda of L2)
classes (int): how many classes in given data set. (based on logits)
max_epochs (int): maximum iterations done by optimizer.
comp (bool): whether use complementary (off_diag) regularization or not.
patience (int): how many worse epochs before early stopping
lr (float): learning rate of Adam optimizer
weights (array): initial weights of model ([k,k], [k]) - weights + bias
random_state (int): random seed for numpy and tensorflow
loss (string/class): loss function to optimize
double_fit (bool): fit twice the model, in the beginning with lr (default=0.001), and the second time 10x lower lr (lr/10)
use_logits (bool): Using logits as input of model, leave out conversion to logarithmic scale.
"""
if classes >= 1:
self.model = self.create_model(classes, weights)
else:
self.model = None
self.max_epochs = max_epochs
self.patience = patience
self.classes = classes
self.l2 = l2
self.lr = lr
self.weights = weights
self.random_state = random_state
self.loss = loss
self.double_fit = double_fit
self.use_logits = use_logits
if mu:
self.mu = mu
else:
self.mu = l2
if comp:
self.regularizer = self.L2_offdiag(l2=self.l2)
else:
self.regularizer = keras.regularizers.l2(l=self.l2)
tensorflow.random.set_seed(random_state)
np.random.seed(random_state)
def create_model(self, classes, weights=[], verbose=False):
"""
Create model and add loss to it
Params:
classes (int): number of classes, used for input layer shape and output shape
weights (array): starting weights in shape of ([k,k], [k]), (weights, bias)
verbose (bool): whether to print out anything or not
Returns:
model (object): Keras model
"""
model = Sequential()
if not self.use_logits: # Leave out converting to logarithmic scale if logits are used as input.
model.add(Lambda(self._logFunc, input_shape=[classes]))
model.add(Dense(classes, activation="softmax"
, kernel_initializer=keras.initializers.Identity(gain=1)
, bias_initializer="zeros",
kernel_regularizer=self.regularizer, bias_regularizer=keras.regularizers.l2(l=self.mu)))
else:
model.add(Dense(classes, input_shape=[classes], activation="softmax"
, kernel_initializer=keras.initializers.Identity(gain=1)
, bias_initializer="zeros",
kernel_regularizer=self.regularizer, bias_regularizer=keras.regularizers.l2(l=self.mu)))
if len(weights) != 0: # Weights that are set from fitting
model.set_weights(weights)
elif len(self.weights) != 0: # Weights that are given from initialisation
model.set_weights(self.weights)
adam = keras.optimizers.Adam(lr=self.lr)
model.compile(loss=self.loss, optimizer=adam)
if verbose:
model.summary()
return model
def fit(self, probs, true, weights=[], verbose=False, double_fit=None, batch_size=128):
"""
Trains the model and finds optimal parameters
Params:
probs: the output from neural network for each class (shape [samples, classes])
true: one-hot-encoding of true labels.
weights (array): starting weights in shape of ([k,k], [k]), (weights, bias)
verbose (bool): whether to print out anything or not
double_fit (bool): fit twice the model, in the beginning with lr (default=0.001), and the second time 10x lower lr (lr/10)
Returns:
hist: Keras history of learning process
"""
if len(weights) != 0:
self.weights = weights
if "sparse" not in self.loss: # Check if need to make Y categorical; TODO Make it more see-through
true = to_categorical(true)
if double_fit == None:
double_fit = self.double_fit
self.model = self.create_model(probs.shape[1], self.weights, verbose)
early_stop = EarlyStopping(monitor='loss', min_delta=0, patience=self.patience, verbose=verbose, mode='auto')
cbs = [early_stop]
hist = self.model.fit(probs, true, epochs=self.max_epochs, callbacks=cbs, batch_size=batch_size,
verbose=verbose)
if double_fit: # In case of my experiments it gave better results to start with default learning rate (0.001) and then fit again (0.0001) learning rate.
if verbose:
print("Fit with 10x smaller learning rate")
self.lr = self.lr / 10
self.fit(probs, true, weights=self.model.get_weights(), verbose=verbose, double_fit=False,
batch_size=batch_size) # Fit 2 times
return hist
def predict(self, probs): # TODO change it to return only the best prediction
"""
Scales logits based on the model and returns calibrated probabilities
Params:
logits: logits values of data (output from neural network) for each class (shape [samples, classes])
Returns:
calibrated probabilities (nd.array with shape [samples, classes])
"""
return self.model.predict(probs)
def predict_proba(self, probs):
"""
Scales logits based on the model and returns calibrated probabilities
Params:
logits: logits values of data (output from neural network) for each class (shape [samples, classes])
Returns:
calibrated probabilities (nd.array with shape [samples, classes])
"""
return self.model.predict(probs)
@property
def coef_(self):
"""
Actually weights of neurons, but to keep similar notation to original Dirichlet we name it coef_
"""
if self.model:
return self.model.get_weights()[0].T # Transposed to match with full dirichlet weights.
@property
def intercept_(self):
"""
Actually bias values, but to keep similar notation to original Dirichlet we name it intercept_
"""
if self.model:
return self.model.get_weights()[1]
def _logFunc(self, x):
"""
Find logarith of x (tensor)
"""
eps = np.finfo(float).eps # 1e-16
return K.log(K.clip(x, eps, 1 - eps)) # How this clip works? K.clip(x, K.epsilon(), None) + 1.)
# Inner classes for off diagonal regularization
class Regularizer(object):
"""
Regularizer base class.
"""
def __call__(self, x):
return 0.0
@classmethod
def from_config(cls, config):
return cls(**config)
class L2_offdiag(Regularizer):
"""
Regularizer for L2 regularization off diagonal.
"""
def __init__(self, l2=0.0):
"""
Params:
l: (float) lambda, L2 regularization factor.
"""
self.l2 = K.cast_to_floatx(l2)
def __call__(self, x):
"""
Off-diagonal regularization (complementary regularization)
"""
reg = 0
for i in range(0, x.shape[0]):
reg += K.sum(self.l2 * K.square(x[0:i, i]))
reg += K.sum(self.l2 * K.square(x[i + 1:, i]))
return reg
def get_config(self):
return {'l2': float(self.l2)}
class Dirichlet_diag_NN():
def __init__(self, classes=-1, max_epochs=500, patience=15, random_state=15):
"""
Initialize class
Params:
max_epochs (int): maximum iterations done by optimizer.
classes (int): how many classes in given data set. (based on logits )
patience (int): how many worse epochs before early stopping
"""
if classes >= 1:
self.model = self.create_model(classes)
else:
self.model = None
self.max_epochs = max_epochs
self.patience = patience
self.classes = classes
tensorflow.random.set_seed(random_state)
np.random.seed(random_state)
def create_model(self, classes, verbose=True):
model = Sequential()
model.add(Lambda(self._logFunc, input_shape=[classes]))
model.add(DiagonalLayer(output_dim=classes))
model.add(Activation('softmax'))
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam")
if verbose:
model.summary()
return model
# Find the temperature
def fit(self, logits, true, verbose=True, batch_size=128):
"""
Trains the model and finds optimal parameters
Params:
logits: the output from neural network for each class (shape [samples, classes])
true: one-hot-encoding of true labels.
Returns:
the model after minimizing is finished.
"""
self.model = self.create_model(logits.shape[1], verbose)
early_stop = EarlyStopping(monitor='loss', min_delta=0, patience=self.patience, verbose=0, mode='auto')
cbs = [early_stop]
hist = self.model.fit(logits, true, epochs=self.max_epochs, callbacks=cbs, verbose=verbose,
batch_size=batch_size)
return hist
def predict(self, logits):
"""
Scales logits based on the model and returns calibrated probabilities
Params:
logits: logits values of data (output from neural network) for each class (shape [samples, classes])
Returns:
calibrated probabilities (nd.array with shape [samples, classes])
"""
return self.model.predict(logits)
@property
def coef_(self):
if self.model:
return self.model.get_weights()[0].T
@property
def intercept_(self):
if self.model:
return self.model.get_weights()[1]
def _logFunc(self, x):
"""
Find logarith of x (tensor)
"""
eps = np.finfo(float).eps # 1e-16
return K.log(K.clip(x, eps, 1 - eps)) # How this clip works? K.clip(x, K.epsilon(), None) + 1.)
class VectorScaling_NN():
def __init__(self, classes=-1, max_epochs=500, patience=15, random_state=15):
"""
Initialize class
Params:
max_epochs (int): maximum iterations done by optimizer.
classes (int): how many classes in given data set. (based on logits )
patience (int): how many worse epochs before early stopping
"""
if classes >= 1:
self.model = self.create_model(classes)
else:
self.model = None
self.max_epochs = max_epochs
self.patience = patience
self.classes = classes
tensorflow.random.set_seed(random_state)
np.random.seed(random_state)
def create_model(self, classes, verbose=True):
model = Sequential()
model.add(DiagonalLayer(input_shape=[classes], output_dim=classes))
model.add(Activation('softmax'))
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam")
if verbose:
model.summary()
return model
# Find the temperature
def fit(self, logits, true, verbose=True, batch_size=128):
"""
Trains the model and finds optimal parameters
Params:
logits: the output from neural network for each class (shape [samples, classes])
true: one-hot-encoding of true labels.
Returns:
the model after minimizing is finished.
"""
self.model = self.create_model(logits.shape[1], verbose)
early_stop = EarlyStopping(monitor='loss', min_delta=0, patience=self.patience, verbose=0, mode='auto')
cbs = [early_stop]
hist = self.model.fit(logits, true, epochs=self.max_epochs, callbacks=cbs, verbose=verbose,
batch_size=batch_size)
return hist
def predict(self, logits):
"""
Scales logits based on the model and returns calibrated probabilities
Params:
logits: logits values of data (output from neural network) for each class (shape [samples, classes])
Returns:
calibrated probabilities (nd.array with shape [samples, classes])
"""
return self.model.predict(logits)
@property
def coef_(self):
if self.model:
return self.model.get_weights()[0].T
@property
def intercept_(self):
if self.model:
return self.model.get_weights()[1]
class DiagonalLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(DiagonalLayer, self).__init__(**kwargs)
def build(self, input_shape, activation="softmax"):
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='kernel',
shape=[self.output_dim],
initializer="ones",
trainable=True)
self.bias = self.add_weight(name='bias',
shape=[self.output_dim],
initializer='zeros',
trainable=True)
super(DiagonalLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
return K.np.multiply(x, self.kernel) + self.bias
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
def log_encode(x):
eps = np.finfo(x.dtype).eps
x = np.clip(x, eps, 1)
return np.log(x)
class LogisticCalibration(LogisticRegression):
def __init__(self, C=1.0, solver='lbfgs', multi_class='multinomial',
log_transform=True):
self.C_grid = C
self.C = C if isinstance(C, float) else C[0]
self.solver = solver
self.log_transform = log_transform
self.encode = log_encode
self.multiclass = multi_class
super(LogisticCalibration, self).__init__(C=C, solver=solver,
multi_class=multi_class)
def fit(self, scores, y, X_val=None, y_val=None, *args, **kwargs):
if isinstance(self.C_grid, list):
calibrators = []
losses = np.zeros(len(self.C_grid))
for i, C in enumerate(self.C_grid):
cal = LogisticCalibration(C=C, solver=self.solver,
multi_class=self.multi_class,
log_transform=self.log_transform)
cal.fit(scores, y)
losses[i] = log_loss(y_val, cal.predict_proba(X_val))
calibrators.append(cal)
best_idx = losses.argmin()
self.C = calibrators[best_idx].C
return super(LogisticCalibration, self).fit(self.encode(scores), y,
*args, **kwargs)
def predict_proba(self, scores, *args, **kwargs):
return super(LogisticCalibration,
self).predict_proba(self.encode(scores), *args, **kwargs)
def predict(self, scores, *args, **kwargs):
return super(LogisticCalibration, self).predict(self.encode(scores),
*args, **kwargs)
# TODO mode this ending part to other file
def get_preds_all(y_probs, y_true, axis=1, normalize=False, flatten=True):
y_preds = np.argmax(y_probs, axis=axis) # Take maximum confidence as prediction
y_preds = y_preds.reshape(-1, 1)
if normalize:
y_probs /= np.sum(y_probs, axis=axis)
enc = OneHotEncoder(handle_unknown='ignore', sparse=False)
enc.fit(y_preds)
y_preds = enc.transform(y_preds)
y_true = enc.transform(y_true)
if flatten:
y_preds = y_preds.flatten()
y_true = y_true.flatten()
y_probs = y_probs.flatten()
return y_preds, y_probs, y_true
def evaluate_legacy(probs, y_true, verbose=False, normalize=True, bins=15):
"""
Evaluate model using various scoring measures: Error Rate, ECE, MCE, NLL, Brier Score
Params:
probs: a list containing probabilities for all the classes with a shape of (samples, classes)
y_true: a list containing the actual class labels
verbose: (bool) are the scores printed out. (default = False)
normalize: (bool) in case of 1-vs-K calibration, the probabilities need to be normalized.
bins: (int) - into how many bins are probabilities divided (default = 15)
Returns:
(error, ece, mce, loss, brier), returns various scoring measures
"""
preds = np.argmax(probs, axis=1) # Take maximum confidence as prediction
if normalize:
confs = np.max(probs, axis=1) / np.sum(probs, axis=1)
# Check if everything below or equal to 1?
else:
confs = np.max(probs, axis=1) # Take only maximum confidence
accuracy = metrics.accuracy_score(y_true, preds) * 100
error = 100 - accuracy
# get predictions, confidences and true labels for all classes
preds2, confs2, y_true2 = get_preds_all(probs, y_true, normalize=False, flatten=True)
# Calculate ECE and ECE2
ece = ECE(confs, preds, y_true, bin_size=1 / bins)
ece2 = ECE(confs2, preds2, y_true2, bin_size=1 / bins, ece_full=True)
# Calculate MCE
mce = MCE(confs, preds, y_true, bin_size=1 / bins)
mce2 = MCE(confs2, preds2, y_true2, bin_size=1 / bins, ece_full=True)
loss = log_loss(y_true=y_true, y_pred=probs)
# y_prob_true = np.array([probs[i, idx] for i, idx in enumerate(y_true)]) # Probability of positive class
# brier = brier_score_loss(y_true=y_true, y_prob=y_prob_true) # Brier Score (MSE), NB! not correct
brier = Brier(probs, y_true)
if verbose:
print("Accuracy:", accuracy)
print("Error:", error)
print("ECE:", ece)
print("ECE2:", ece2)
print("MCE:", mce)
print("MCE2:", mce2)
print("Loss:", loss)
print("brier:", brier)
return error, ece, ece2, mce, mce2, loss, brier
def cal_results(name, method, files, m_kwargs={}, approach="all", input="logits"):
"""
Calibrate models scores, using output from logits files and given function (fn).
There are implemented to different approaches "all" and "1-vs-K" for calibration,
the approach of calibration should match with function used for calibration.
Params:
fn (class): class of the calibration method used. It must contain methods "fit" and "predict",
where first fits the models and second outputs calibrated probabilities.
path (string): path to the folder with logits files
files (list of strings): pickled logits files ((logits_val, y_val), (logits_test, y_test))
m_kwargs (dictionary): keyword arguments for the calibration class initialization
approach (string): "all" for multiclass calibration and "1-vs-K" for 1-vs-K approach.
input (string): "probabilities" or "logits", specific to calibration method
Returns:
df (pandas.DataFrame): dataframe with calibrated and uncalibrated results for all the input files.
"""
df = pd.DataFrame(
columns=["Name", "Error", "ECE", "ECE2", "ECE_CW", "ECE_CW2", "ECE_FULL", "ECE_FULL2", "MCE", "MCE2", "Loss",
"Brier"])
t1 = time.time()
val_df = pd.read_csv(files[0], sep='\t')
test_df = pd.read_csv(files[1], sep='\t')
logits_val = val_df.iloc[:, 2:6].to_numpy()
y_val = val_df.iloc[:, 1:2].to_numpy()
logits_test = test_df.iloc[:, 2:6].to_numpy()
y_test = test_df.iloc[:, 1:2].to_numpy()
input_val = logits_val
input_test = logits_test
# Train and test model based on the approach "all" or "1-vs-K"
if approach == "all":
y_val_flat = y_val.flatten()
model = TemperatureScaling(**m_kwargs)
opt = model.fit(input_val, y_val_flat)
probs_val = model.predict(input_val)
probs_test = model.predict(input_test)
error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier = evaluate(
softmax(logits_val), y_val, verbose=False) # Uncalibrated results
error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1 = evaluate(
softmax(logits_test), y_test, verbose=False) # Uncalibrated results
error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2, brier2 = evaluate(
probs_test, y_test, verbose=False)
error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3, brier3 = evaluate(
probs_val, y_val, verbose=False)
print(
"Uncal Valid Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; "
"loss %f, brier %f" % (
error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier))
print(
"Uncal Test Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; "
"loss %f, brier %f" % (
error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1))
print(
"Test Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; "
"loss %f, brier %f" % (
error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2,
brier2))
print(
"Validation Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 "
"%f; loss %f, brier %f" % (
error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3,
brier3))
else: # 1-vs-k models
K = input_test.shape[1]
probs_val = np.zeros_like(input_val)
probs_test = np.zeros_like(input_test)
# Go through all the classes
for k in range(K):
# Prep class labels (1 fixed true class, 0 other classes)
y_cal = np.array(y_val == k, dtype="int")[:, 0]
# Train model
model = TemperatureScaling(**m_kwargs)
model.fit(input_val[:, k], y_cal) # Get only one column with probs for given class "k"
probs_val[:, k] = model.predict(input_val[:, k]) # Predict new values based on the fittting
probs_test[:, k] = model.predict(input_test[:, k])
error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier = evaluate(
softmax(logits_val), y_val, verbose=False) # Uncalibrated results
error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1 = evaluate(
softmax(logits_test), y_test, verbose=False) # Uncalibrated results
error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2, brier2 = evaluate(
probs_test, y_test, verbose=False)
error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3, brier3 = evaluate(
probs_val, y_val, verbose=False)
print(
"Uncal Valid Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; "
"loss %f, brier %f" % (
error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier))
print(
"Uncal Test Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; "
"loss %f, brier %f" % (
error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1))
print(
"Test Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; "
"loss %f, brier %f" % (
error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2,
brier2))
print(
"Validation Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 "
"%f; loss %f, brier %f" % (
error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3,
brier3))
with open(f'result/{name}_{method}_val_TS.txt', "wb") as f:
np.savetxt(f, softmax(logits_val))
np.savetxt(f, probs_val)
with open(f'result/{name}_{method}test_TS.txt', "wb") as f2:
np.savetxt(f2, softmax(logits_test))
np.savetxt(f2, probs_test)
df.loc[0] = [(name+"_val_uncalib"), error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]
df.loc[1] = [(name + "_test_uncalib"), error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2,
mce1_1, mce1_2, loss1, brier1]
df.loc[2] = [(name + "_test_calib"), error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2,
mce2_1, mce2_2, loss2, brier2]
df.loc[3] = [(name + "_val_calib"), error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2,
mce3_1, mce3_2, loss3, brier3]
t2 = time.time()
print("Time taken:", (t2 - t1), "\n")
return df
| [
"numpy.clip",
"pandas.read_csv",
"keras.initializers.Identity",
"numpy.log",
"keras.backend.cast_to_floatx",
"keras.utils.to_categorical",
"numpy.array",
"keras.backend.np.multiply",
"sklearn.metrics.log_loss",
"keras.layers.Activation",
"keras.layers.Dense",
"numpy.arange",
"calibration.eva... | [((25557, 25575), 'numpy.clip', 'np.clip', (['x', 'eps', '(1)'], {}), '(x, eps, 1)\n', (25564, 25575), True, 'import numpy as np\n'), ((25587, 25596), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (25593, 25596), True, 'import numpy as np\n'), ((27464, 27493), 'numpy.argmax', 'np.argmax', (['y_probs'], {'axis': 'axis'}), '(y_probs, axis=axis)\n', (27473, 27493), True, 'import numpy as np\n'), ((27648, 27700), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""', 'sparse': '(False)'}), "(handle_unknown='ignore', sparse=False)\n", (27661, 27700), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((28674, 28698), 'numpy.argmax', 'np.argmax', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (28683, 28698), True, 'import numpy as np\n'), ((29239, 29283), 'calibration.evaluation.ECE', 'ECE', (['confs', 'preds', 'y_true'], {'bin_size': '(1 / bins)'}), '(confs, preds, y_true, bin_size=1 / bins)\n', (29242, 29283), False, 'from calibration.evaluation import ECE, MCE, Brier, evaluate\n'), ((29295, 29357), 'calibration.evaluation.ECE', 'ECE', (['confs2', 'preds2', 'y_true2'], {'bin_size': '(1 / bins)', 'ece_full': '(True)'}), '(confs2, preds2, y_true2, bin_size=1 / bins, ece_full=True)\n', (29298, 29357), False, 'from calibration.evaluation import ECE, MCE, Brier, evaluate\n'), ((29389, 29433), 'calibration.evaluation.MCE', 'MCE', (['confs', 'preds', 'y_true'], {'bin_size': '(1 / bins)'}), '(confs, preds, y_true, bin_size=1 / bins)\n', (29392, 29433), False, 'from calibration.evaluation import ECE, MCE, Brier, evaluate\n'), ((29445, 29507), 'calibration.evaluation.MCE', 'MCE', (['confs2', 'preds2', 'y_true2'], {'bin_size': '(1 / bins)', 'ece_full': '(True)'}), '(confs2, preds2, y_true2, bin_size=1 / bins, ece_full=True)\n', (29448, 29507), False, 'from calibration.evaluation import ECE, MCE, Brier, evaluate\n'), ((29520, 29557), 'sklearn.metrics.log_loss', 'log_loss', ([], {'y_true': 'y_true', 'y_pred': 'probs'}), '(y_true=y_true, y_pred=probs)\n', (29528, 29557), False, 'from sklearn.metrics import log_loss\n'), ((29787, 29807), 'calibration.evaluation.Brier', 'Brier', (['probs', 'y_true'], {}), '(probs, y_true)\n', (29792, 29807), False, 'from calibration.evaluation import ECE, MCE, Brier, evaluate\n'), ((31239, 31375), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Name', 'Error', 'ECE', 'ECE2', 'ECE_CW', 'ECE_CW2', 'ECE_FULL',\n 'ECE_FULL2', 'MCE', 'MCE2', 'Loss', 'Brier']"}), "(columns=['Name', 'Error', 'ECE', 'ECE2', 'ECE_CW', 'ECE_CW2',\n 'ECE_FULL', 'ECE_FULL2', 'MCE', 'MCE2', 'Loss', 'Brier'])\n", (31251, 31375), True, 'import pandas as pd\n'), ((31408, 31419), 'time.time', 'time.time', ([], {}), '()\n', (31417, 31419), False, 'import time\n'), ((31434, 31465), 'pandas.read_csv', 'pd.read_csv', (['files[0]'], {'sep': '"""\t"""'}), "(files[0], sep='\\t')\n", (31445, 31465), True, 'import pandas as pd\n'), ((31480, 31511), 'pandas.read_csv', 'pd.read_csv', (['files[1]'], {'sep': '"""\t"""'}), "(files[1], sep='\\t')\n", (31491, 31511), True, 'import pandas as pd\n'), ((37440, 37451), 'time.time', 'time.time', ([], {}), '()\n', (37449, 37451), False, 'import time\n'), ((1800, 1858), 'numpy.arange', 'np.arange', (['self.bin_size', '(1 + self.bin_size)', 'self.bin_size'], {}), '(self.bin_size, 1 + self.bin_size, self.bin_size)\n', (1809, 1858), True, 'import numpy as np\n'), ((4573, 4615), 'sklearn.metrics.log_loss', 'log_loss', ([], {'y_true': 'true', 'y_pred': 'scaled_probs'}), '(y_true=true, y_pred=scaled_probs)\n', (4581, 4615), False, 'from sklearn.metrics import log_loss\n'), ((5119, 5229), 'scipy.optimize.minimize', 'minimize', (['self._loss_fun'], {'x0': '(1)', 'args': '(logits, true)', 'options': "{'maxiter': self.maxiter}", 'method': 'self.solver'}), "(self._loss_fun, x0=1, args=(logits, true), options={'maxiter':\n self.maxiter}, method=self.solver)\n", (5127, 5229), False, 'from scipy.optimize import minimize\n'), ((6698, 6723), 'numpy.diag', 'np.diag', (['x[:self.classes]'], {}), '(x[:self.classes])\n', (6705, 6723), True, 'import numpy as np\n'), ((6911, 6953), 'sklearn.metrics.log_loss', 'log_loss', ([], {'y_true': 'true', 'y_pred': 'scaled_probs'}), '(y_true=true, y_pred=scaled_probs)\n', (6919, 6953), False, 'from sklearn.metrics import log_loss\n'), ((7567, 7678), 'scipy.optimize.minimize', 'minimize', (['self._loss_fun'], {'x0': 'x0', 'args': '(logits, true)', 'options': "{'maxiter': self.maxiter}", 'method': 'self.solver'}), "(self._loss_fun, x0=x0, args=(logits, true), options={'maxiter':\n self.maxiter}, method=self.solver)\n", (7575, 7678), False, 'from scipy.optimize import minimize\n'), ((7715, 7747), 'numpy.diag', 'np.diag', (['opt.x[:logits.shape[1]]'], {}), '(opt.x[:logits.shape[1]])\n', (7722, 7747), True, 'import numpy as np\n'), ((9250, 9262), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (9260, 9262), False, 'from keras.models import Sequential\n'), ((9974, 10069), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""loss"""', 'min_delta': '(0)', 'patience': 'self.patience', 'verbose': '(0)', 'mode': '"""auto"""'}), "(monitor='loss', min_delta=0, patience=self.patience, verbose=\n 0, mode='auto')\n", (9987, 10069), False, 'from keras.callbacks import EarlyStopping\n'), ((12847, 12887), 'tensorflow.random.set_seed', 'tensorflow.random.set_seed', (['random_state'], {}), '(random_state)\n', (12873, 12887), False, 'import tensorflow\n'), ((12896, 12924), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (12910, 12924), True, 'import numpy as np\n'), ((13406, 13418), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (13416, 13418), False, 'from keras.models import Sequential\n'), ((14514, 14547), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': 'self.lr'}), '(lr=self.lr)\n', (14535, 14547), False, 'import keras\n'), ((15771, 15872), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""loss"""', 'min_delta': '(0)', 'patience': 'self.patience', 'verbose': 'verbose', 'mode': '"""auto"""'}), "(monitor='loss', min_delta=0, patience=self.patience, verbose=\n verbose, mode='auto')\n", (15784, 15872), False, 'from keras.callbacks import EarlyStopping\n'), ((19835, 19875), 'tensorflow.random.set_seed', 'tensorflow.random.set_seed', (['random_state'], {}), '(random_state)\n', (19861, 19875), False, 'import tensorflow\n'), ((19884, 19912), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (19898, 19912), True, 'import numpy as np\n'), ((19982, 19994), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (19992, 19994), False, 'from keras.models import Sequential\n'), ((20812, 20907), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""loss"""', 'min_delta': '(0)', 'patience': 'self.patience', 'verbose': '(0)', 'mode': '"""auto"""'}), "(monitor='loss', min_delta=0, patience=self.patience, verbose=\n 0, mode='auto')\n", (20825, 20907), False, 'from keras.callbacks import EarlyStopping\n'), ((22623, 22663), 'tensorflow.random.set_seed', 'tensorflow.random.set_seed', (['random_state'], {}), '(random_state)\n', (22649, 22663), False, 'import tensorflow\n'), ((22672, 22700), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (22686, 22700), True, 'import numpy as np\n'), ((22770, 22782), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (22780, 22782), False, 'from keras.models import Sequential\n'), ((23559, 23654), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""loss"""', 'min_delta': '(0)', 'patience': 'self.patience', 'verbose': '(0)', 'mode': '"""auto"""'}), "(monitor='loss', min_delta=0, patience=self.patience, verbose=\n 0, mode='auto')\n", (23572, 23654), False, 'from keras.callbacks import EarlyStopping\n'), ((25527, 25544), 'numpy.finfo', 'np.finfo', (['x.dtype'], {}), '(x.dtype)\n', (25535, 25544), True, 'import numpy as np\n'), ((27610, 27636), 'numpy.sum', 'np.sum', (['y_probs'], {'axis': 'axis'}), '(y_probs, axis=axis)\n', (27616, 27636), True, 'import numpy as np\n'), ((28898, 28919), 'numpy.max', 'np.max', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (28904, 28919), True, 'import numpy as np\n'), ((28968, 29005), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_true', 'preds'], {}), '(y_true, preds)\n', (28990, 29005), True, 'import sklearn.metrics as metrics\n'), ((32570, 32613), 'calibration.evaluation.evaluate', 'evaluate', (['probs_test', 'y_test'], {'verbose': '(False)'}), '(probs_test, y_test, verbose=False)\n', (32578, 32613), False, 'from calibration.evaluation import ECE, MCE, Brier, evaluate\n'), ((32739, 32780), 'calibration.evaluation.evaluate', 'evaluate', (['probs_val', 'y_val'], {'verbose': '(False)'}), '(probs_val, y_val, verbose=False)\n', (32747, 32780), False, 'from calibration.evaluation import ECE, MCE, Brier, evaluate\n'), ((34043, 34067), 'numpy.zeros_like', 'np.zeros_like', (['input_val'], {}), '(input_val)\n', (34056, 34067), True, 'import numpy as np\n'), ((34089, 34114), 'numpy.zeros_like', 'np.zeros_like', (['input_test'], {}), '(input_test)\n', (34102, 34114), True, 'import numpy as np\n'), ((35149, 35192), 'calibration.evaluation.evaluate', 'evaluate', (['probs_test', 'y_test'], {'verbose': '(False)'}), '(probs_test, y_test, verbose=False)\n', (35157, 35192), False, 'from calibration.evaluation import ECE, MCE, Brier, evaluate\n'), ((35318, 35359), 'calibration.evaluation.evaluate', 'evaluate', (['probs_val', 'y_val'], {'verbose': '(False)'}), '(probs_val, y_val, verbose=False)\n', (35326, 35359), False, 'from calibration.evaluation import ECE, MCE, Brier, evaluate\n'), ((36656, 36680), 'numpy.savetxt', 'np.savetxt', (['f', 'probs_val'], {}), '(f, probs_val)\n', (36666, 36680), True, 'import numpy as np\n'), ((36800, 36826), 'numpy.savetxt', 'np.savetxt', (['f2', 'probs_test'], {}), '(f2, probs_test)\n', (36810, 36826), True, 'import numpy as np\n'), ((999, 1008), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (1005, 1008), True, 'import numpy as np\n'), ((3842, 3882), 'numpy.searchsorted', 'np.searchsorted', (['self.upper_bounds', 'prob'], {}), '(self.upper_bounds, prob)\n', (3857, 3882), True, 'import numpy as np\n'), ((6820, 6842), 'numpy.zeros', 'np.zeros', (['self.classes'], {}), '(self.classes)\n', (6828, 6842), True, 'import numpy as np\n'), ((9281, 9351), 'keras.layers.Dense', 'Dense', (['classes'], {'use_bias': '(True)', 'input_dim': 'classes', 'activation': '"""softmax"""'}), "(classes, use_bias=True, input_dim=classes, activation='softmax')\n", (9286, 9351), False, 'from keras.layers import Dense\n'), ((12805, 12837), 'keras.regularizers.l2', 'keras.regularizers.l2', ([], {'l': 'self.l2'}), '(l=self.l2)\n', (12826, 12837), False, 'import keras\n'), ((15576, 15596), 'keras.utils.to_categorical', 'to_categorical', (['true'], {}), '(true)\n', (15590, 15596), False, 'from keras.utils import to_categorical\n'), ((18043, 18058), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (18051, 18058), True, 'import numpy as np\n'), ((18094, 18117), 'keras.backend.clip', 'K.clip', (['x', 'eps', '(1 - eps)'], {}), '(x, eps, 1 - eps)\n', (18100, 18117), True, 'import keras.backend as K\n'), ((18754, 18774), 'keras.backend.cast_to_floatx', 'K.cast_to_floatx', (['l2'], {}), '(l2)\n', (18770, 18774), True, 'import keras.backend as K\n'), ((20013, 20057), 'keras.layers.Lambda', 'Lambda', (['self._logFunc'], {'input_shape': '[classes]'}), '(self._logFunc, input_shape=[classes])\n', (20019, 20057), False, 'from keras.layers import Lambda\n'), ((20130, 20151), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (20140, 20151), False, 'from keras.layers import Activation\n'), ((21849, 21864), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (21857, 21864), True, 'import numpy as np\n'), ((21900, 21923), 'keras.backend.clip', 'K.clip', (['x', 'eps', '(1 - eps)'], {}), '(x, eps, 1 - eps)\n', (21906, 21923), True, 'import keras.backend as K\n'), ((22877, 22898), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (22887, 22898), False, 'from keras.layers import Activation\n'), ((25355, 25384), 'keras.backend.np.multiply', 'K.np.multiply', (['x', 'self.kernel'], {}), '(x, self.kernel)\n', (25368, 25384), True, 'import keras.backend as K\n'), ((28775, 28796), 'numpy.max', 'np.max', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (28781, 28796), True, 'import numpy as np\n'), ((28799, 28820), 'numpy.sum', 'np.sum', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (28805, 28820), True, 'import numpy as np\n'), ((7496, 7522), 'numpy.repeat', 'np.repeat', (['(1)', 'self.classes'], {}), '(1, self.classes)\n', (7505, 7522), True, 'import numpy as np\n'), ((7524, 7550), 'numpy.repeat', 'np.repeat', (['(0)', 'self.classes'], {}), '(0, self.classes)\n', (7533, 7550), True, 'import numpy as np\n'), ((8386, 8408), 'numpy.dot', 'np.dot', (['logits', 'self.W'], {}), '(logits, self.W)\n', (8392, 8408), True, 'import numpy as np\n'), ((8507, 8524), 'numpy.dot', 'np.dot', (['logits', 'W'], {}), '(logits, W)\n', (8513, 8524), True, 'import numpy as np\n'), ((13547, 13591), 'keras.layers.Lambda', 'Lambda', (['self._logFunc'], {'input_shape': '[classes]'}), '(self._logFunc, input_shape=[classes])\n', (13553, 13591), False, 'from keras.layers import Lambda\n'), ((34270, 34303), 'numpy.array', 'np.array', (['(y_val == k)'], {'dtype': '"""int"""'}), "(y_val == k, dtype='int')\n", (34278, 34303), True, 'import numpy as np\n'), ((13701, 13736), 'keras.initializers.Identity', 'keras.initializers.Identity', ([], {'gain': '(1)'}), '(gain=1)\n', (13728, 13736), False, 'import keras\n'), ((13875, 13907), 'keras.regularizers.l2', 'keras.regularizers.l2', ([], {'l': 'self.mu'}), '(l=self.mu)\n', (13896, 13907), False, 'import keras\n'), ((14055, 14090), 'keras.initializers.Identity', 'keras.initializers.Identity', ([], {'gain': '(1)'}), '(gain=1)\n', (14082, 14090), False, 'import keras\n'), ((14229, 14261), 'keras.regularizers.l2', 'keras.regularizers.l2', ([], {'l': 'self.mu'}), '(l=self.mu)\n', (14250, 14261), False, 'import keras\n'), ((19014, 19033), 'keras.backend.square', 'K.square', (['x[0:i, i]'], {}), '(x[0:i, i])\n', (19022, 19033), True, 'import keras.backend as K\n'), ((19074, 19096), 'keras.backend.square', 'K.square', (['x[i + 1:, i]'], {}), '(x[i + 1:, i])\n', (19082, 19096), True, 'import keras.backend as K\n')] |
# pythonpath modification to make hytra available
# for import without requiring it to be installed
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# standard imports
import logging
import time
import h5py
import numpy as np
import configargparse
import hytra.core.hypothesesgraph as hypothesesgraph
import hytra.core.ilastikhypothesesgraph as ilastikhypothesesgraph
from hytra.pluginsystem.plugin_manager import TrackingPluginManager
import hytra.core.jsongraph
def getConfigAndCommandLineArguments():
parser = configargparse.ArgumentParser(description="""
Given raw data, segmentation, and trained classifiers,
this script creates a hypotheses graph to be used with the tracking tools. """,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--config', is_config_file=True, help='config file path')
parser.add_argument('--method', type=str, default='conservation',
help='conservation or conservation-dynprog')
parser.add_argument('--x-scale', type=float, dest='x_scale', default=1.)
parser.add_argument('--y-scale', type=float, dest='y_scale', default=1.)
parser.add_argument('--z-scale', type=float, dest='z_scale', default=1.)
parser.add_argument('--random-forest', type=str, dest='rf_fn', default=None,
help='use cellness prediction instead of indicator function for (mis-)detection energy')
parser.add_argument('-f', '--forbidden_cost', type=float, dest='forbidden_cost', default=0,
help='forbidden cost')
parser.add_argument('--min-ts', type=int, dest='mints', default=0)
parser.add_argument('--max-ts', type=int, dest='maxts', default=-1)
parser.add_argument('--min-size', type=int, dest='minsize', default=0,
help='minimal size of objects to be tracked')
parser.add_argument('--max-size', type=int, dest='maxsize', default=100000,
help='maximal size of objects to be tracked')
parser.add_argument('--dump-traxelstore', type=str, dest='dump_traxelstore', default=None,
help='dump traxelstore to file')
parser.add_argument('--load-traxelstore', type=str, dest='load_traxelstore', default=None,
help='load traxelstore from file')
parser.add_argument('--raw-data-file', type=str, dest='raw_filename', default=None,
help='filename to the raw h5 file')
parser.add_argument('--raw-data-path', type=str, dest='raw_path', default='volume/data',
help='Path inside the raw h5 file to the data')
parser.add_argument("--raw-data-axes", dest='raw_axes', type=str, default='txyzc',
help="axes ordering of the produced raw image, e.g. xyztc.")
parser.add_argument('--dump-hypotheses-graph', type=str, dest='hypotheses_graph_filename', default=None,
help='save hypotheses graph so it can be loaded later')
parser.add_argument('--label-image-file', type=str, dest='label_image_file', default=None,
help='if a label image separate to the one in the ILP should be used, set it here')
parser.add_argument('--label-image-path', dest='label_img_path', type=str,
default='/TrackingFeatureExtraction/LabelImage/0000/[[%d, 0, 0, 0, 0], [%d, %d, %d, %d, 1]]',
help='internal hdf5 path to label image')
parser.add_argument('--image-provider', type=str, dest='image_provider_name', default="LocalImageLoader")
parser.add_argument('--graph-json-file', type=str, required=True, dest='json_filename', default=None,
help='filename where to save the generated JSON file to')
parser.add_argument('--max-number-objects', dest='max_num_objects', type=float, default=2,
help='Give maximum number of objects one connected component may consist of')
parser.add_argument('--max-neighbor-distance', dest='mnd', type=float, default=200)
parser.add_argument('--max-nearest-neighbors', dest='max_nearest_neighbors', type=int, default=1)
parser.add_argument('--division-threshold', dest='division_threshold', type=float, default=0.1)
# detection_rf_filename in general parser options
parser.add_argument('--size-dependent-detection-prob', dest='size_dependent_detection_prob', action='store_true')
# forbidden_cost in general parser options
parser.add_argument('--ep_gap', type=float, dest='ep_gap', default=0.01,
help='stop optimization as soon as a feasible integer solution is found proved to be within the given percent of the optimal solution')
parser.add_argument('--average-obj-size', dest='avg_obj_size', type=float, default=0)
parser.add_argument('--without-tracklets', dest='without_tracklets', action='store_true')
parser.add_argument('--motion-model-weight', dest='motionModelWeight', type=float, default=0.0,
help='motion model weight')
parser.add_argument('--without-divisions', dest='without_divisions', action='store_true')
parser.add_argument('--means', dest='means', type=float, default=0.0,
help='means for detection')
parser.add_argument('--sigma', dest='sigma', type=float, default=0.0,
help='sigma for detection')
parser.add_argument('--with-merger-resolution', dest='with_merger_resolution', action='store_true', default=False)
parser.add_argument('--without-constraints', dest='woconstr', action='store_true', default=False)
parser.add_argument('--trans-par', dest='trans_par', type=float, default=5.0,
help='alpha for the transition prior')
parser.add_argument('--border-width', dest='border_width', type=float, default=10.0,
help='absolute border margin in which the appearance/disappearance costs are linearly decreased')
parser.add_argument('--ext-probs', dest='ext_probs', type=str, default=None,
help='provide a path to hdf5 files containing detection probabilities')
parser.add_argument('--object-count-classifier-path', dest='obj_count_path', type=str,
default='/CountClassification/Probabilities/0/',
help='internal hdf5 path to object count probabilities')
parser.add_argument('--object-count-classifier-file', dest='obj_count_file', type=str, default=None,
help='Filename of the HDF file containing the object count classifier. If None, will be taken from ILP')
parser.add_argument('--division-classifier-path', dest='div_prob_path', type=str, default='/DivisionDetection/Probabilities/0/',
help='internal hdf5 path to division probabilities')
parser.add_argument('--division-classifier-file', dest='div_file', type=str, default=None,
help='Filename of the HDF file containing the division classifier. If None, will be taken from ILP')
parser.add_argument('--featsPath', dest='feats_path', type=str,
default='/TrackingFeatureExtraction/RegionFeaturesVigra/0000/[[%d], [%d]]/Default features/%s',
help='internal hdf5 path to object features')
parser.add_argument('--transition-classifier-file', dest='transition_classifier_filename', type=str,
default=None)
parser.add_argument('--transition-classifier-path', dest='transition_classifier_path', type=str, default='/')
parser.add_argument('--disable-multiprocessing', dest='disableMultiprocessing', action='store_true',
help='Do not use multiprocessing to speed up computation',
default=False)
parser.add_argument('--turn-off-features', dest='turnOffFeatures', type=str, nargs='+', default=[])
parser.add_argument('--skip-links', dest='skipLinks', type=int, default=1)
parser.add_argument('--skip-links-bias', dest='skipLinksBias', type=int, default=20)
parser.add_argument('--verbose', dest='verbose', action='store_true',
help='Turn on verbose logging', default=False)
parser.add_argument('--plugin-paths', dest='pluginPaths', type=str, nargs='+',
default=[os.path.abspath('../hytra/plugins')],
help='A list of paths to search for plugins for the tracking pipeline.')
options, unknown = parser.parse_known_args()
return options, unknown
def generate_traxelstore(h5file,
options,
feature_path,
time_range,
x_range,
y_range,
z_range,
size_range,
x_scale=1.0,
y_scale=1.0,
z_scale=1.0,
with_div=True,
median_object_size=None,
max_traxel_id_at=None,
with_merger_prior=True,
max_num_mergers=1,
ext_probs=None
):
"""
Legacy way of creating the "traxelstore", that can handle the old drosophila and rapoport
ilastik project files and load the stored probabilities.
"""
logging.getLogger('hypotheses_graph_to_json.py').info("generating traxels")
logging.getLogger('hypotheses_graph_to_json.py').info("filling traxelstore")
try:
import pgmlink as track
ts = track.TraxelStore()
fs = track.FeatureStore()
max_traxel_id_at = track.VectorOfInt()
withPgmlink = True
except:
import hytra.core.probabilitygenerator as track
withPgmlink = False
ts, fs = None, None
max_traxel_id_at = []
logging.getLogger('hypotheses_graph_to_json.py').info("fetching region features and division probabilities")
logging.getLogger('hypotheses_graph_to_json.py').debug("region features path: {}".format(options.obj_count_path))
logging.getLogger('hypotheses_graph_to_json.py').debug("division features path: {}".format(options.div_prob_path))
logging.getLogger('hypotheses_graph_to_json.py').debug("{}, {}".format(h5file.filename, feature_path))
detection_probabilities = []
division_probabilities = []
if with_div:
logging.getLogger('hypotheses_graph_to_json.py').debug(options.div_prob_path)
divProbs = h5file[options.div_prob_path]
if with_merger_prior:
detProbs = h5file[options.obj_count_path]
if x_range is None:
x_range = [0, sys.maxint]
if y_range is None:
y_range = [0, sys.maxint]
if z_range is None:
z_range = [0, sys.maxint]
shape_t = len(h5file[options.obj_count_path].keys())
keys_sorted = range(shape_t)
if time_range is not None:
if time_range[1] == -1:
time_range[1] = shape_t
keys_sorted = [key for key in keys_sorted if time_range[0] <= int(key) < time_range[1]]
else:
time_range = (0, shape_t)
# use this as Traxelstore dummy if we're not using pgmlink
if not withPgmlink:
class TSDummy:
traxels = []
def bounding_box(self):
return [time_range[0], 0,0,0, time_range[1], 1,1,1]
def add(self, fs, traxel):
self.traxels.append(traxel)
ts = TSDummy()
filtered_labels = {}
obj_sizes = []
total_count = 0
empty_frame = False
for t in keys_sorted:
feats_name = options.feats_path % (t, t + 1, 'RegionCenter')
# region_centers = np.array(feats[t]['0']['RegionCenter'])
region_centers = np.array(h5file[feats_name])
feats_name = options.feats_path % (t, t + 1, 'Coord<Minimum>')
lower = np.array(h5file[feats_name])
feats_name = options.feats_path % (t, t + 1, 'Coord<Maximum>')
upper = np.array(h5file[feats_name])
if region_centers.size:
region_centers = region_centers[1:, ...]
lower = lower[1:, ...]
upper = upper[1:, ...]
feats_name = options.feats_path % (t, t + 1, 'Count')
# pixel_count = np.array(feats[t]['0']['Count'])
pixel_count = np.array(h5file[feats_name])
if pixel_count.size:
pixel_count = pixel_count[1:, ...]
logging.getLogger('hypotheses_graph_to_json.py').info("at timestep {}, {} traxels found".format(t, region_centers.shape[0]))
count = 0
filtered_labels[t] = []
for idx in range(region_centers.shape[0]):
if len(region_centers[idx]) == 2:
x, y = region_centers[idx]
z = 0
elif len(region_centers[idx]) == 3:
x, y, z = region_centers[idx]
else:
raise Exception("The RegionCenter feature must have dimensionality 2 or 3.")
size = pixel_count[idx]
if (x < x_range[0] or x >= x_range[1] or
y < y_range[0] or y >= y_range[1] or
z < z_range[0] or z >= z_range[1] or
size < size_range[0] or size >= size_range[1]):
filtered_labels[t].append(int(idx + 1))
continue
else:
count += 1
traxel = track.Traxel()
if withPgmlink:
traxel.set_feature_store(fs)
traxel.set_x_scale(x_scale)
traxel.set_y_scale(y_scale)
traxel.set_z_scale(z_scale)
traxel.Id = int(idx + 1)
traxel.Timestep = int(t)
traxel.add_feature_array("com", 3)
for i, v in enumerate([x, y, z]):
traxel.set_feature_value('com', i, float(v))
if with_div:
traxel.add_feature_array("divProb", 1)
prob = 0.0
prob = float(divProbs[str(t)][idx + 1][1])
# idx+1 because region_centers and pixel_count start from 1, divProbs starts from 0
traxel.set_feature_value("divProb", 0, prob)
division_probabilities.append(prob)
if with_merger_prior and ext_probs is None:
traxel.add_feature_array("detProb", max_num_mergers + 1)
probs = []
for i in range(len(detProbs[str(t)][idx + 1])):
probs.append(float(detProbs[str(t)][idx + 1][i]))
probs[max_num_mergers] = sum(probs[max_num_mergers:])
for i in range(max_num_mergers + 1):
traxel.set_feature_value("detProb", i, float(probs[i]))
detection_probabilities.append([traxel.get_feature_value("detProb", i) for i in range(max_num_mergers + 1)])
traxel.add_feature_array("count", 1)
traxel.set_feature_value("count", 0, float(size))
if median_object_size is not None:
obj_sizes.append(float(size))
ts.add(fs, traxel)
logging.getLogger('hypotheses_graph_to_json.py').info("at timestep {}, {} traxels passed filter".format(t, count))
max_traxel_id_at.append(int(region_centers.shape[0]))
if count == 0:
empty_frame = True
total_count += count
if median_object_size is not None:
median_object_size[0] = np.median(np.array(obj_sizes), overwrite_input=True)
logging.getLogger('hypotheses_graph_to_json.py').info('median object size = {}'.format(median_object_size[0]))
return ts, fs, max_traxel_id_at, division_probabilities, detection_probabilities
def getH5Dataset(h5group, ds_name):
if ds_name in h5group.keys():
return np.array(h5group[ds_name])
return np.array([])
def getRegionFeatures(ndim):
region_features = [
("RegionCenter", ndim),
("Count", 1),
("Variance", 1),
("Sum", 1),
("Mean", 1),
("RegionRadii", ndim),
("Central< PowerSum<2> >", 1),
("Central< PowerSum<3> >", 1),
("Central< PowerSum<4> >", 1),
("Kurtosis", 1),
("Maximum", 1),
("Minimum", 1),
("RegionAxes", ndim ** 2),
("Skewness", 1),
("Weighted<PowerSum<0> >", 1),
("Coord< Minimum >", ndim),
("Coord< Maximum >", ndim)
]
return region_features
def getTraxelStore(options, ilp_fn, time_range, shape):
max_traxel_id_at = []
with h5py.File(ilp_fn, 'r') as h5file:
ndim = 3
logging.getLogger('hypotheses_graph_to_json.py').debug('/'.join(options.label_img_path.strip('/').split('/')[:-1]))
if list(h5file['/'.join(options.label_img_path.strip('/').split('/')[:-1])].values())[0].shape[3] == 1:
ndim = 2
logging.getLogger('hypotheses_graph_to_json.py').debug('ndim={}'.format(ndim))
logging.getLogger('hypotheses_graph_to_json.py').info("Time Range: {}".format(time_range))
if options.load_traxelstore:
logging.getLogger('hypotheses_graph_to_json.py').info('loading traxelstore from file')
import pickle
with open(options.load_traxelstore, 'rb') as ts_in:
ts = pickle.load(ts_in)
fs = pickle.load(ts_in)
max_traxel_id_at = pickle.load(ts_in)
ts.set_feature_store(fs)
info = [int(x) for x in ts.bounding_box()]
t0, t1 = (info[0], info[4])
if info[0] != options.mints or (options.maxts != -1 and info[4] != options.maxts - 1):
if options.maxts == -1:
options.maxts = info[4] + 1
logging.getLogger('hypotheses_graph_to_json.py').warning("Traxelstore has different time range than requested FOV. Trimming traxels...")
fov = getFovFromOptions(options, shape, t0, t1)
fov.set_time_bounds(options.mints, options.maxts - 1)
new_ts = track.TraxelStore(turnOffFeatures=options.turnOffFeatures)
ts.filter_by_fov(new_ts, fov)
ts = new_ts
else:
max_num_mer = int(options.max_num_objects)
ts, fs, max_traxel_id_at, division_probabilities, detection_probabilities = generate_traxelstore(
h5file=h5file,
options=options,
feature_path=feature_path,
time_range=time_range,
x_range=None,
y_range=None,
z_range=None,
size_range=[options.minsize, options.maxsize],
x_scale=options.x_scale,
y_scale=options.y_scale,
z_scale=options.z_scale,
with_div=with_div,
median_object_size=obj_size,
max_traxel_id_at=max_traxel_id_at,
with_merger_prior=with_merger_prior,
max_num_mergers=max_num_mer,
ext_probs=options.ext_probs
)
info = [int(x) for x in ts.bounding_box()]
t0, t1 = (info[0], info[4])
logging.getLogger('hypotheses_graph_to_json.py').info("-> Traxelstore bounding box: " + str(info))
if options.dump_traxelstore:
logging.getLogger('hypotheses_graph_to_json.py').info('dumping traxelstore to file')
import pickle
with open(options.dump_traxelstore, 'wb') as ts_out:
pickle.dump(ts, ts_out)
pickle.dump(fs, ts_out)
pickle.dump(max_traxel_id_at, ts_out)
return ts, fs, max_traxel_id_at, ndim, t0, t1
def getFovFromOptions(options, shape, t0, t1):
import pgmlink as track
[xshape, yshape, zshape] = shape
fov = track.FieldOfView(t0, 0, 0, 0, t1, options.x_scale * (xshape - 1), options.y_scale * (yshape - 1),
options.z_scale * (zshape - 1))
return fov
def getPythonFovFromOptions(options, shape, t0, t1):
from hytra.core.fieldofview import FieldOfView
[xshape, yshape, zshape] = shape
fov = FieldOfView(t0, 0, 0, 0, t1, options.x_scale * (xshape - 1), options.y_scale * (yshape - 1),
options.z_scale * (zshape - 1))
return fov
def initializeConservationTracking(options, shape, t0, t1):
import pgmlink as track
ndim = 2 if shape[-1] == 1 else 3
rf_fn = 'none'
if options.rf_fn:
rf_fn = options.rf_fn
fov = getFovFromOptions(options, shape, t0, t1)
if ndim == 2:
[xshape, yshape, zshape] = shape
assert options.z_scale * (zshape - 1) == 0, "fov of z must be (0,0) if ndim == 2"
if options.method == 'conservation':
tracker = track.ConsTracking(int(options.max_num_objects),
bool(options.size_dependent_detection_prob),
options.avg_obj_size[0],
options.mnd,
not bool(options.without_divisions),
options.division_threshold,
str(rf_fn),
fov,
str("none"),
track.ConsTrackingSolverType.CplexSolver,
ndim)
elif options.method == 'conservation-dynprog':
tracker = track.ConsTracking(int(options.max_num_objects),
bool(options.size_dependent_detection_prob),
options.avg_obj_size[0],
options.mnd,
not bool(options.without_divisions),
options.division_threshold,
str(rf_fn),
fov,
str("none"),
track.ConsTrackingSolverType.DynProgSolver,
ndim)
else:
raise ValueError("Must be conservation or conservation-dynprog")
return tracker, fov
def loadProbabilityGenerator(options,
ilpFilename,
objectCountClassifierPath=None,
divisionClassifierPath=None,
time_range=None,
usePgmlink=True,
featuresOnly=False):
"""
Set up a python side traxel store: compute all features, but do not evaluate classifiers.
"""
import hytra.core.probabilitygenerator as traxelstore
from hytra.core.ilastik_project_options import IlastikProjectOptions
ilpOptions = IlastikProjectOptions()
ilpOptions.labelImagePath = options.label_img_path
ilpOptions.rawImagePath = options.raw_path
ilpOptions.rawImageFilename = options.raw_filename
ilpOptions.rawImageAxes = options.raw_axes
ilpOptions.sizeFilter = [options.minsize, options.maxsize]
if options.label_image_file is not None:
ilpOptions.labelImageFilename = options.label_image_file
else:
ilpOptions.labelImageFilename = ilpFilename
if featuresOnly:
ilpOptions.objectCountClassifierFilename = None
ilpOptions.divisionClassifierFilename = None
else:
ilpOptions.objectCountClassifierPath = objectCountClassifierPath
ilpOptions.divisionClassifierPath = divisionClassifierPath
if options.obj_count_file != None:
ilpOptions.objectCountClassifierFilename = options.obj_count_file
else:
ilpOptions.objectCountClassifierFilename = ilpFilename
if options.div_file != None:
ilpOptions.divisionClassifierFilename = options.div_file
else:
ilpOptions.divisionClassifierFilename = ilpFilename
probGenerator = traxelstore.IlpProbabilityGenerator(ilpOptions,
turnOffFeatures=options.turnOffFeatures,
pluginPaths=options.pluginPaths,
useMultiprocessing=not options.disableMultiprocessing)
if time_range is not None:
probGenerator.timeRange = time_range
a = probGenerator.fillTraxels(usePgmlink=usePgmlink, turnOffFeatures=options.turnOffFeatures)
if usePgmlink:
t, f = a
else:
t = None
f = None
return probGenerator, t, f
def loadTransitionClassifier(transitionClassifierFilename, transitionClassifierPath):
"""
Load a transition classifier random forest from a HDF5 file
"""
import hytra.core.probabilitygenerator as traxelstore
rf = traxelstore.RandomForestClassifier(transitionClassifierPath, transitionClassifierFilename)
return rf
def getDetectionFeatures(traxel, max_state):
return hypothesesgraph.getTraxelFeatureVector(traxel, "detProb", max_state)
def getDivisionFeatures(traxel):
prob = hypothesesgraph.getTraxelFeatureVector(traxel, "divProb", 1)[0]
return [1.0 - prob, prob]
def getTransitionFeaturesDist(traxelA, traxelB, transitionParam, max_state):
"""
Get the transition probabilities based on the object's distance
"""
positions = [np.array([t.X(), t.Y(), t.Z()]) for t in [traxelA, traxelB]]
dist = np.linalg.norm(positions[0] - positions[1])
prob = np.exp(-dist / transitionParam)
return [1.0 - prob] + [prob] * (max_state - 1)
def getTransitionFeaturesRF(traxelA, traxelB, transitionClassifier, probGenerator, max_state):
"""
Get the transition probabilities by predicting them with the classifier
"""
feats = [probGenerator.getTraxelFeatureDict(obj.Timestep, obj.Id) for obj in [traxelA, traxelB]]
featVec = probGenerator.getTransitionFeatureVector(feats[0], feats[1], transitionClassifier.selectedFeatures)
probs = transitionClassifier.predictProbabilities(featVec)[0]
return [probs[0]] + [probs[1]] * (max_state - 1)
def getBoundaryCostMultiplier(traxel, fov, margin, t0, t1, forAppearance):
if (traxel.Timestep <= t0 and forAppearance) or (traxel.Timestep >= t1 - 1 and not forAppearance):
return 0.0
dist = fov.spatial_distance_to_border(traxel.Timestep, traxel.X(), traxel.Y(), traxel.Z(), False)
if dist > margin:
return 1.0
else:
if margin > 0:
return float(dist) / margin
else:
return 1.0
def getHypothesesGraphAndIterators(options, shape, t0, t1, ts, probGenerator, transitionClassifier=None, skipLinks=1, skipLinksBias=20):
"""
Build the hypotheses graph either using pgmlink, or from the python traxelstore in python
"""
if probGenerator is not None:
logging.getLogger('hypotheses_graph_to_json.py').info("Building python hypotheses graph")
fov = getPythonFovFromOptions(options, shape, t0, t1)
maxNumObjects = int(options.max_num_objects)
margin = float(options.border_width)
hypotheses_graph = ilastikhypothesesgraph.IlastikHypothesesGraph(
probGenerator,
[t0, t1],
maxNumObjects=maxNumObjects,
numNearestNeighbors=options.max_nearest_neighbors,
fieldOfView=fov,
divisionThreshold=options.division_threshold,
withDivisions=not options.without_divisions,
borderAwareWidth=margin,
maxNeighborDistance=options.mnd,
transitionParameter=options.trans_par,
transitionClassifier=transitionClassifier,
skipLinks=skipLinks,
skipLinksBias=skipLinksBias)
if not options.without_tracklets:
hypotheses_graph = hypotheses_graph.generateTrackletGraph()
n_it = hypotheses_graph.nodeIterator()
a_it = hypotheses_graph.arcIterator()
else:
import pgmlink as track
logging.getLogger('hypotheses_graph_to_json.py').info("Building pgmlink hypotheses graph")
# initialize tracker to get hypotheses graph
tracker, fov = initializeConservationTracking(options, shape, t0, t1)
hypotheses_graph = tracker.buildGraph(ts, options.max_nearest_neighbors)
# create tracklet graph if necessary
if not options.without_tracklets:
hypotheses_graph = hypotheses_graph.generate_tracklet_graph()
n_it = track.NodeIt(hypotheses_graph)
a_it = track.ArcIt(hypotheses_graph)
return hypotheses_graph, n_it, a_it, fov
def insertProbsIntoProbabilityGenerator(options, probGenerator, ts):
for traxel in ts.traxels:
features = ['detProb']
if not options.without_divisions:
features.append('divProb')
for featName in features:
probGenerator.TraxelsPerFrame[traxel.Timestep][traxel.Id].Features[featName] = traxel.Features[featName]
def loadTraxelstoreAndTransitionClassifier(options, ilp_fn, time_range, shape):
"""
Load traxelstore either from ilp or by computing raw features,
loading random forests, and predicting for each object.
Also load the transition classifier if raw features were computed and a transitionClassifierFile is given
"""
transitionClassifier = None
try:
ts, fs, _, ndim, t0, t1 = getTraxelStore(options, ilp_fn, time_range, shape)
time_range = t0, t1
foundDetectionProbabilities = True
except Exception as e:
print("{}: {}".format(type(e), e))
foundDetectionProbabilities = False
logging.getLogger('hypotheses_graph_to_json.py').warning("could not load detection (and/or division) probabilities from ilastik project file")
if options.raw_filename != None:
if foundDetectionProbabilities:
probGenerator, _, _ = loadProbabilityGenerator(options, ilp_fn, time_range=time_range, usePgmlink=False, featuresOnly=True)
insertProbsIntoProbabilityGenerator(options, probGenerator, ts)
else:
# warning: assuming that the classifiers are top-level groups in HDF5
objectCountClassifierPath = '/' + [t for t in options.obj_count_path.split('/') if len(t) > 0][0]
if not options.without_divisions:
divisionClassifierPath = '/' + [t for t in options.div_prob_path.split('/') if len(t) > 0][0]
else:
divisionClassifierPath = None
probGenerator, ts, fs = loadProbabilityGenerator(options,
ilp_fn,
objectCountClassifierPath=objectCountClassifierPath,
divisionClassifierPath=divisionClassifierPath,
time_range=time_range,
usePgmlink=False)
t0, t1 = probGenerator.timeRange
ndim = probGenerator.getNumDimensions()
foundDetectionProbabilities = True
if options.transition_classifier_filename != None:
transitionClassifier = loadTransitionClassifier(options.transition_classifier_filename,
options.transition_classifier_path)
else:
probGenerator = None
assert(foundDetectionProbabilities)
return ts, fs, ndim, t0, t1, probGenerator, transitionClassifier
if __name__ == "__main__":
"""
Main loop of script
"""
options, unknown = getConfigAndCommandLineArguments()
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.getLogger('hypotheses_graph_to_json.py').debug("Ignoring unknown parameters: {}".format(unknown))
ilp_fn = options.label_image_file
# Do the tracking
start = time.time()
feature_path = options.feats_path
with_div = not bool(options.without_divisions)
with_merger_prior = True
# get selected time range
time_range = [options.mints, options.maxts]
if options.maxts == -1 and options.mints == 0:
time_range = None
try:
# find shape of dataset
pluginManager = TrackingPluginManager(verbose=options.verbose, pluginPaths=options.pluginPaths)
shape = pluginManager.getImageProvider().getImageShape(ilp_fn, options.label_img_path)
data_time_range = pluginManager.getImageProvider().getTimeRange(ilp_fn, options.label_img_path)
if time_range is not None and time_range[1] < 0:
time_range[1] += data_time_range[1]
except:
logging.warning("Could not read shape and time range from images")
shape=None
# set average object size if chosen
obj_size = [0]
if options.avg_obj_size != 0:
obj_size[0] = options.avg_obj_size
else:
options.avg_obj_size = obj_size
# load traxelstore
ts, fs, ndim, t0, t1, probGenerator, transitionClassifier = loadTraxelstoreAndTransitionClassifier(options, ilp_fn,
time_range,
shape)
# build hypotheses graph
hypotheses_graph, n_it, a_it, fov = getHypothesesGraphAndIterators(options, shape, t0, t1, ts, probGenerator, transitionClassifier, options.skipLinks, options.skipLinksBias)
if probGenerator is None:
import pgmlink
numElements = pgmlink.countNodes(hypotheses_graph) + pgmlink.countArcs(hypotheses_graph)
# get the map of node -> list(traxel) or just traxel
if options.without_tracklets:
traxelMap = hypotheses_graph.getNodeTraxelMap()
else:
traxelMap = hypotheses_graph.getNodeTrackletMap()
maxNumObjects = int(options.max_num_objects)
margin = float(options.border_width)
def detectionProbabilityFunc(traxel):
return getDetectionFeatures(traxel, maxNumObjects + 1)
def transitionProbabilityFunc(srcTraxel, destTraxel):
if transitionClassifier is None:
return getTransitionFeaturesDist(srcTraxel, destTraxel, options.trans_par, maxNumObjects + 1)
else:
return getTransitionFeaturesRF(srcTraxel, destTraxel, transitionClassifier, probGenerator, maxNumObjects + 1)
def boundaryCostMultiplierFunc(traxel, forAppearance):
return getBoundaryCostMultiplier(traxel, fov, margin, t0, t1, forAppearance)
def divisionProbabilityFunc(traxel):
try:
divisionFeatures = getDivisionFeatures(traxel)
if divisionFeatures[0] > options.division_threshold:
divisionFeatures = list(divisionFeatures)
else:
divisionFeatures = None
except Exception as e:
print(e)
divisionFeatures = None
return divisionFeatures
trackingGraph = ilastikhypothesesgraph.convertLegacyHypothesesGraphToJsonGraph(
hypotheses_graph,
n_it,
a_it,
not options.without_tracklets,
maxNumObjects,
numElements,
traxelMap,
detectionProbabilityFunc,
transitionProbabilityFunc,
boundaryCostMultiplierFunc,
divisionProbabilityFunc)
else:
hypotheses_graph.insertEnergies()
trackingGraph = hypotheses_graph.toTrackingGraph()
trackingGraph.model['settings']['optimizerEpGap'] = options.ep_gap
# write everything to JSON
hytra.core.jsongraph.writeToFormattedJSON(options.json_filename, trackingGraph.model)
| [
"logging.getLogger",
"hytra.core.ilastikhypothesesgraph.IlastikHypothesesGraph",
"hytra.pluginsystem.plugin_manager.TrackingPluginManager",
"numpy.array",
"pgmlink.countArcs",
"hytra.core.probabilitygenerator.ArcIt",
"hytra.core.fieldofview.FieldOfView",
"numpy.linalg.norm",
"configargparse.Argument... | [((141, 162), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (156, 162), False, 'import os\n'), ((533, 802), 'configargparse.ArgumentParser', 'configargparse.ArgumentParser', ([], {'description': '""" \n Given raw data, segmentation, and trained classifiers,\n this script creates a hypotheses graph to be used with the tracking tools. """', 'formatter_class': 'configargparse.ArgumentDefaultsHelpFormatter'}), '(description=\n """ \n Given raw data, segmentation, and trained classifiers,\n this script creates a hypotheses graph to be used with the tracking tools. """\n , formatter_class=configargparse.ArgumentDefaultsHelpFormatter)\n', (562, 802), False, 'import configargparse\n'), ((15821, 15833), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (15829, 15833), True, 'import numpy as np\n'), ((19777, 19912), 'hytra.core.probabilitygenerator.FieldOfView', 'track.FieldOfView', (['t0', '(0)', '(0)', '(0)', 't1', '(options.x_scale * (xshape - 1))', '(options.y_scale * (yshape - 1))', '(options.z_scale * (zshape - 1))'], {}), '(t0, 0, 0, 0, t1, options.x_scale * (xshape - 1), options.\n y_scale * (yshape - 1), options.z_scale * (zshape - 1))\n', (19794, 19912), True, 'import hytra.core.probabilitygenerator as track\n'), ((20105, 20234), 'hytra.core.fieldofview.FieldOfView', 'FieldOfView', (['t0', '(0)', '(0)', '(0)', 't1', '(options.x_scale * (xshape - 1))', '(options.y_scale * (yshape - 1))', '(options.z_scale * (zshape - 1))'], {}), '(t0, 0, 0, 0, t1, options.x_scale * (xshape - 1), options.\n y_scale * (yshape - 1), options.z_scale * (zshape - 1))\n', (20116, 20234), False, 'from hytra.core.fieldofview import FieldOfView\n'), ((22755, 22778), 'hytra.core.ilastik_project_options.IlastikProjectOptions', 'IlastikProjectOptions', ([], {}), '()\n', (22776, 22778), False, 'from hytra.core.ilastik_project_options import IlastikProjectOptions\n'), ((23907, 24093), 'hytra.core.probabilitygenerator.IlpProbabilityGenerator', 'traxelstore.IlpProbabilityGenerator', (['ilpOptions'], {'turnOffFeatures': 'options.turnOffFeatures', 'pluginPaths': 'options.pluginPaths', 'useMultiprocessing': '(not options.disableMultiprocessing)'}), '(ilpOptions, turnOffFeatures=options.\n turnOffFeatures, pluginPaths=options.pluginPaths, useMultiprocessing=\n not options.disableMultiprocessing)\n', (23942, 24093), True, 'import hytra.core.probabilitygenerator as traxelstore\n'), ((24739, 24833), 'hytra.core.probabilitygenerator.RandomForestClassifier', 'traxelstore.RandomForestClassifier', (['transitionClassifierPath', 'transitionClassifierFilename'], {}), '(transitionClassifierPath,\n transitionClassifierFilename)\n', (24773, 24833), True, 'import hytra.core.probabilitygenerator as traxelstore\n'), ((24902, 24970), 'hytra.core.hypothesesgraph.getTraxelFeatureVector', 'hypothesesgraph.getTraxelFeatureVector', (['traxel', '"""detProb"""', 'max_state'], {}), "(traxel, 'detProb', max_state)\n", (24940, 24970), True, 'import hytra.core.hypothesesgraph as hypothesesgraph\n'), ((25363, 25406), 'numpy.linalg.norm', 'np.linalg.norm', (['(positions[0] - positions[1])'], {}), '(positions[0] - positions[1])\n', (25377, 25406), True, 'import numpy as np\n'), ((25418, 25449), 'numpy.exp', 'np.exp', (['(-dist / transitionParam)'], {}), '(-dist / transitionParam)\n', (25424, 25449), True, 'import numpy as np\n'), ((31870, 31881), 'time.time', 'time.time', ([], {}), '()\n', (31879, 31881), False, 'import time\n'), ((9615, 9634), 'hytra.core.probabilitygenerator.TraxelStore', 'track.TraxelStore', ([], {}), '()\n', (9632, 9634), True, 'import hytra.core.probabilitygenerator as track\n'), ((9648, 9668), 'hytra.core.probabilitygenerator.FeatureStore', 'track.FeatureStore', ([], {}), '()\n', (9666, 9668), True, 'import hytra.core.probabilitygenerator as track\n'), ((9696, 9715), 'hytra.core.probabilitygenerator.VectorOfInt', 'track.VectorOfInt', ([], {}), '()\n', (9713, 9715), True, 'import hytra.core.probabilitygenerator as track\n'), ((11782, 11810), 'numpy.array', 'np.array', (['h5file[feats_name]'], {}), '(h5file[feats_name])\n', (11790, 11810), True, 'import numpy as np\n'), ((11899, 11927), 'numpy.array', 'np.array', (['h5file[feats_name]'], {}), '(h5file[feats_name])\n', (11907, 11927), True, 'import numpy as np\n'), ((12015, 12043), 'numpy.array', 'np.array', (['h5file[feats_name]'], {}), '(h5file[feats_name])\n', (12023, 12043), True, 'import numpy as np\n'), ((12342, 12370), 'numpy.array', 'np.array', (['h5file[feats_name]'], {}), '(h5file[feats_name])\n', (12350, 12370), True, 'import numpy as np\n'), ((15782, 15808), 'numpy.array', 'np.array', (['h5group[ds_name]'], {}), '(h5group[ds_name])\n', (15790, 15808), True, 'import numpy as np\n'), ((16526, 16548), 'h5py.File', 'h5py.File', (['ilp_fn', '"""r"""'], {}), "(ilp_fn, 'r')\n", (16535, 16548), False, 'import h5py\n'), ((25017, 25077), 'hytra.core.hypothesesgraph.getTraxelFeatureVector', 'hypothesesgraph.getTraxelFeatureVector', (['traxel', '"""divProb"""', '(1)'], {}), "(traxel, 'divProb', 1)\n", (25055, 25077), True, 'import hytra.core.hypothesesgraph as hypothesesgraph\n'), ((27044, 27519), 'hytra.core.ilastikhypothesesgraph.IlastikHypothesesGraph', 'ilastikhypothesesgraph.IlastikHypothesesGraph', (['probGenerator', '[t0, t1]'], {'maxNumObjects': 'maxNumObjects', 'numNearestNeighbors': 'options.max_nearest_neighbors', 'fieldOfView': 'fov', 'divisionThreshold': 'options.division_threshold', 'withDivisions': '(not options.without_divisions)', 'borderAwareWidth': 'margin', 'maxNeighborDistance': 'options.mnd', 'transitionParameter': 'options.trans_par', 'transitionClassifier': 'transitionClassifier', 'skipLinks': 'skipLinks', 'skipLinksBias': 'skipLinksBias'}), '(probGenerator, [t0, t1],\n maxNumObjects=maxNumObjects, numNearestNeighbors=options.\n max_nearest_neighbors, fieldOfView=fov, divisionThreshold=options.\n division_threshold, withDivisions=not options.without_divisions,\n borderAwareWidth=margin, maxNeighborDistance=options.mnd,\n transitionParameter=options.trans_par, transitionClassifier=\n transitionClassifier, skipLinks=skipLinks, skipLinksBias=skipLinksBias)\n', (27089, 27519), True, 'import hytra.core.ilastikhypothesesgraph as ilastikhypothesesgraph\n'), ((28391, 28421), 'hytra.core.probabilitygenerator.NodeIt', 'track.NodeIt', (['hypotheses_graph'], {}), '(hypotheses_graph)\n', (28403, 28421), True, 'import hytra.core.probabilitygenerator as track\n'), ((28437, 28466), 'hytra.core.probabilitygenerator.ArcIt', 'track.ArcIt', (['hypotheses_graph'], {}), '(hypotheses_graph)\n', (28448, 28466), True, 'import hytra.core.probabilitygenerator as track\n'), ((31587, 31627), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (31606, 31627), False, 'import logging\n'), ((31646, 31685), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (31665, 31685), False, 'import logging\n'), ((32227, 32306), 'hytra.pluginsystem.plugin_manager.TrackingPluginManager', 'TrackingPluginManager', ([], {'verbose': 'options.verbose', 'pluginPaths': 'options.pluginPaths'}), '(verbose=options.verbose, pluginPaths=options.pluginPaths)\n', (32248, 32306), False, 'from hytra.pluginsystem.plugin_manager import TrackingPluginManager\n'), ((35099, 35380), 'hytra.core.ilastikhypothesesgraph.convertLegacyHypothesesGraphToJsonGraph', 'ilastikhypothesesgraph.convertLegacyHypothesesGraphToJsonGraph', (['hypotheses_graph', 'n_it', 'a_it', '(not options.without_tracklets)', 'maxNumObjects', 'numElements', 'traxelMap', 'detectionProbabilityFunc', 'transitionProbabilityFunc', 'boundaryCostMultiplierFunc', 'divisionProbabilityFunc'], {}), '(hypotheses_graph\n , n_it, a_it, not options.without_tracklets, maxNumObjects, numElements,\n traxelMap, detectionProbabilityFunc, transitionProbabilityFunc,\n boundaryCostMultiplierFunc, divisionProbabilityFunc)\n', (35161, 35380), True, 'import hytra.core.ilastikhypothesesgraph as ilastikhypothesesgraph\n'), ((9404, 9452), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (9421, 9452), False, 'import logging\n'), ((9484, 9532), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (9501, 9532), False, 'import logging\n'), ((9902, 9950), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (9919, 9950), False, 'import logging\n'), ((10015, 10063), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (10032, 10063), False, 'import logging\n'), ((10133, 10181), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (10150, 10181), False, 'import logging\n'), ((10252, 10300), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (10269, 10300), False, 'import logging\n'), ((13428, 13442), 'hytra.core.probabilitygenerator.Traxel', 'track.Traxel', ([], {}), '()\n', (13440, 13442), True, 'import hytra.core.probabilitygenerator as track\n'), ((15447, 15466), 'numpy.array', 'np.array', (['obj_sizes'], {}), '(obj_sizes)\n', (15455, 15466), True, 'import numpy as np\n'), ((31691, 31739), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (31708, 31739), False, 'import logging\n'), ((32641, 32707), 'logging.warning', 'logging.warning', (['"""Could not read shape and time range from images"""'], {}), "('Could not read shape and time range from images')\n", (32656, 32707), False, 'import logging\n'), ((33567, 33603), 'pgmlink.countNodes', 'pgmlink.countNodes', (['hypotheses_graph'], {}), '(hypotheses_graph)\n', (33585, 33603), False, 'import pgmlink\n'), ((33606, 33641), 'pgmlink.countArcs', 'pgmlink.countArcs', (['hypotheses_graph'], {}), '(hypotheses_graph)\n', (33623, 33641), False, 'import pgmlink\n'), ((8317, 8352), 'os.path.abspath', 'os.path.abspath', (['"""../hytra/plugins"""'], {}), "('../hytra/plugins')\n", (8332, 8352), False, 'import os\n'), ((10447, 10495), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (10464, 10495), False, 'import logging\n'), ((12456, 12504), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (12473, 12504), False, 'import logging\n'), ((15104, 15152), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (15121, 15152), False, 'import logging\n'), ((15498, 15546), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (15515, 15546), False, 'import logging\n'), ((16586, 16634), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (16603, 16634), False, 'import logging\n'), ((16844, 16892), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (16861, 16892), False, 'import logging\n'), ((16932, 16980), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (16949, 16980), False, 'import logging\n'), ((17271, 17289), 'pickle.load', 'pickle.load', (['ts_in'], {}), '(ts_in)\n', (17282, 17289), False, 'import pickle\n'), ((17311, 17329), 'pickle.load', 'pickle.load', (['ts_in'], {}), '(ts_in)\n', (17322, 17329), False, 'import pickle\n'), ((17365, 17383), 'pickle.load', 'pickle.load', (['ts_in'], {}), '(ts_in)\n', (17376, 17383), False, 'import pickle\n'), ((18020, 18078), 'hytra.core.probabilitygenerator.TraxelStore', 'track.TraxelStore', ([], {'turnOffFeatures': 'options.turnOffFeatures'}), '(turnOffFeatures=options.turnOffFeatures)\n', (18037, 18078), True, 'import hytra.core.probabilitygenerator as track\n'), ((19141, 19189), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (19158, 19189), False, 'import logging\n'), ((19483, 19506), 'pickle.dump', 'pickle.dump', (['ts', 'ts_out'], {}), '(ts, ts_out)\n', (19494, 19506), False, 'import pickle\n'), ((19523, 19546), 'pickle.dump', 'pickle.dump', (['fs', 'ts_out'], {}), '(fs, ts_out)\n', (19534, 19546), False, 'import pickle\n'), ((19563, 19600), 'pickle.dump', 'pickle.dump', (['max_traxel_id_at', 'ts_out'], {}), '(max_traxel_id_at, ts_out)\n', (19574, 19600), False, 'import pickle\n'), ((26767, 26815), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (26784, 26815), False, 'import logging\n'), ((27910, 27958), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (27927, 27958), False, 'import logging\n'), ((17072, 17120), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (17089, 17120), False, 'import logging\n'), ((19290, 19338), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (19307, 19338), False, 'import logging\n'), ((29530, 29578), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (29547, 29578), False, 'import logging\n'), ((17724, 17772), 'logging.getLogger', 'logging.getLogger', (['"""hypotheses_graph_to_json.py"""'], {}), "('hypotheses_graph_to_json.py')\n", (17741, 17772), False, 'import logging\n')] |
from typing import Tuple, Union, Iterable, List, Callable, Dict, Optional
import os
import warnings
import numpy as np
from nnuncert.models._network import MakeNet
from nnuncert.models._model_base import BaseModel
from nnuncert.models._pred_base import BasePredKDE
from nnuncert.models.dnnc._dnnc import DNNCRidgeIWLS, DNNCHorseshoeIWLS
from nnuncert.models.dnnc._eval import DNNCDensity, DNNCEvaluate
from nnuncert.utils.dist import Dist
from nnuncert.utils.io import load_obj, save_obj
class DNNCModel(BaseModel):
def __init__(self,
net: MakeNet,
dnnc_type: Optional[str] = "ridge",
*args, **kwargs):
assert net.pred_var is False, "DNNC requires NN fit with MSE."
assert dnnc_type in ["ridge", "horseshoe"], \
"'dnnc_type' must be ridge or horseshoe"
super(DNNCModel, self).__init__(net, *args, **kwargs)
self.dnnc_type = dnnc_type
self._model_paras["dnnc_type"] = dnnc_type
def save(self, path: str, *args, **kwargs):
"""Save model to path."""
# save NN weights
super().save(path, *args, **kwargs)
# remove memory expensive attributes
self.dnnc._set_means_only() # remove samples for parameters betas, ...
self.B = None # can be retrieved easily with training x
# save dnnc data
save_obj(self.dnnc, os.path.join(path, "dnnc_" + self.dnnc_type))
def fit(self,
x_train: np.ndarray,
y_train: np.ndarray,
dist: Dist,
verbose: Optional[int] = 0,
fit_z_train: Optional[bool] = True,
set_dnnc: Optional[bool] = True,
path: Optional[str] = None,
dnnc_kwargs: Optional[dict] = {},
*args, **kwargs):
"""Fit DNNC to data.
Parameters
----------
x_train : np.ndarray
y_train : np.ndarray
dist : Dist
Estimated KDE of response.
verbose : Optional[int]
fit_z_train : Optional[bool] = True
Whether to fit NN on targets transformed by F_Y, True by default. If
False, NN will be fit to standardized targets.
set_dnnc : Optional[bool] = True
Whether to automatically run DNNC after the NN fit.
path : Optional[str]
If given, NN and DNNC data will be loaded from 'path', no fitting.
dnnc_kwargs : Optional[dict]
Parameters to pass to DNNC sampling.
"""
self._model_paras["fit_z_train"] = fit_z_train
dnnc_kwargs["dist"] = dist
dnnc_kwargs["verbose"] = verbose
kwargs["verbose"] = verbose
# load weights
if path is not None:
p = os.path.join(*[path, "variables", "variables"])
self.load_weights(p).expect_partial()
# fit NN to data
else:
# fit data on transformed z = F_y^(-1)(y)
if fit_z_train is True:
self.fitted_on = "z = transformed y"
# get transformed targets and fit NN to it
z_train = dist.get_z(y_train)
super().fit(x_train, z_train, *args, **kwargs)
# fit data on standardized y = (y_train - y_train_mean) / y_train_std
else:
self.fitted_on = "standardized y"
y_train_std = y_train.std()
y_train_mean = y_train.mean()
super().fit(x_train, (y_train - y_train_mean) / y_train_std,
*args, **kwargs)
# predict data matrix and get indices to be cleaned
self.psi_clean, self.psi_idx = self.predict_psi(x_train)
# if path is given, load dnnc
if path is not None:
self.dnnc = load_obj(os.path.join(path, "dnnc_" + self.dnnc_type))
self.dnnc.B = self.psi_clean
# compute dnnc
else:
if set_dnnc is True:
if self.dnnc_type == "ridge":
self.dnnc = DNNCRidgeIWLS(self.psi_clean, y_train, **dnnc_kwargs)
else:
self.dnnc = DNNCHorseshoeIWLS(self.psi_clean, y_train,
**dnnc_kwargs)
# keep track of model parameters
self._model_paras["J"] = self.dnnc.J
if self.dnnc_type == "ridge":
self._model_paras["theta"] = self.dnnc.theta
self._model_paras["tau2start"] = self.dnnc.tau2start
else:
self._model_paras["taustart"] = self.dnnc.taustart
def predict_psi_test(self, x: np.ndarray):
"""Predict psi values for some test data.
Psi values will be cleaned the same way as training data.
"""
# predict hidden output layer values
psi = super().predict_psi(x, clean=False)[0]
# use only columns identified in training process
psi = psi[:, self.psi_idx]
return psi
def make_prediction(self, x: np.ndarray, *args, **kwargs):
"""Get prediction object for x."""
return DNNCPred(self, x, *args, **kwargs)
class DNNCRidge(DNNCModel):
def __init__(self, net: MakeNet, *args, **kwargs):
super(DNNCRidge, self).__init__(net, dnnc_type="ridge", *args, **kwargs)
class DNNCHorseshoe(DNNCModel):
def __init__(self, net: MakeNet, *args, **kwargs):
warnings.warn("DNNC horseshoe may not function properly")
super(DNNCHorseshoe, self).__init__(net, dnnc_type="horseshoe", *args, **kwargs)
class DNNCPred(BasePredKDE):
def __init__(self,
model: DNNCModel,
x: np.ndarray,
psi_test: Optional[np.ndarray] = None):
super(DNNCPred, self).__init__(x.shape[0])
# predict data matrix for test data
psi = psi_test
if psi_test is None:
psi = model.predict_psi_test(x)
# compute densities, expected value, variance
dnnc = model.dnnc
if model.dnnc_type == "ridge":
self.eval = DNNCEvaluate(psi, dnnc.dist, betas=dnnc.betahat,
tau2s=dnnc.tau2hat,
dnnc_type=model.dnnc_type)
self.dens = DNNCDensity(psi, dnnc.dist, betas=dnnc.betahat,
tau2s=dnnc.tau2hat,
dnnc_type=model.dnnc_type)
elif model.dnnc_type == "horseshoe":
self.eval = DNNCEvaluate(psi, dnnc.dist, betas=dnnc.betahat,
taus=dnnc.tauhat, lambdas=dnnc.lambdahat,
dnnc_type=model.dnnc_type)
self.dens = DNNCDensity(psi, dnnc.dist, betas=dnnc.betahat,
taus=dnnc.tauhat, lambdas=dnnc.lambdahat,
dnnc_type=model.dnnc_type)
self.mean = np.array(self.eval.Ey)
self.y_var = np.array(self.eval.Vary)
py = np.exp(self.dens.lpy)
np.nan_to_num(py, copy=False)
# create proper prob. dist. for densities
self.dists = [Dist._from_fx(dnnc.dist.x_linspace, p) for p in py]
def log_score(self, y):
"""Compute log_score."""
assert len(y) == self.xlen, "x, y length missmatch."
# evaluate
self.eval.score_at(y)
# get log scores from eval
self._log_scores = self.eval.lpy.diagonal()
return self._log_scores.mean()
def log_score_x(self, y, frac=0.99):
N = int(len(y)*frac)
return pd.Series(self.logpdf(y)).nlargest(N).mean()
@property
def var_aleatoric(self):
"""Split of variance not available."""
return None
@property
def var_epistemic(self):
"""Split of variance not available."""
return None
@property
def var_total(self):
"""Get total predictive variance."""
return self.y_var
@property
def pred_mean(self):
"""Get predictive mean."""
return self.mean
| [
"nnuncert.models.dnnc._dnnc.DNNCRidgeIWLS",
"nnuncert.utils.dist.Dist._from_fx",
"nnuncert.models.dnnc._eval.DNNCDensity",
"os.path.join",
"nnuncert.models.dnnc._dnnc.DNNCHorseshoeIWLS",
"numpy.exp",
"numpy.array",
"warnings.warn",
"nnuncert.models.dnnc._eval.DNNCEvaluate",
"numpy.nan_to_num"
] | [((5365, 5422), 'warnings.warn', 'warnings.warn', (['"""DNNC horseshoe may not function properly"""'], {}), "('DNNC horseshoe may not function properly')\n", (5378, 5422), False, 'import warnings\n'), ((6881, 6903), 'numpy.array', 'np.array', (['self.eval.Ey'], {}), '(self.eval.Ey)\n', (6889, 6903), True, 'import numpy as np\n'), ((6925, 6949), 'numpy.array', 'np.array', (['self.eval.Vary'], {}), '(self.eval.Vary)\n', (6933, 6949), True, 'import numpy as np\n'), ((6963, 6984), 'numpy.exp', 'np.exp', (['self.dens.lpy'], {}), '(self.dens.lpy)\n', (6969, 6984), True, 'import numpy as np\n'), ((6993, 7022), 'numpy.nan_to_num', 'np.nan_to_num', (['py'], {'copy': '(False)'}), '(py, copy=False)\n', (7006, 7022), True, 'import numpy as np\n'), ((1389, 1433), 'os.path.join', 'os.path.join', (['path', "('dnnc_' + self.dnnc_type)"], {}), "(path, 'dnnc_' + self.dnnc_type)\n", (1401, 1433), False, 'import os\n'), ((2742, 2789), 'os.path.join', 'os.path.join', (["*[path, 'variables', 'variables']"], {}), "(*[path, 'variables', 'variables'])\n", (2754, 2789), False, 'import os\n'), ((6026, 6125), 'nnuncert.models.dnnc._eval.DNNCEvaluate', 'DNNCEvaluate', (['psi', 'dnnc.dist'], {'betas': 'dnnc.betahat', 'tau2s': 'dnnc.tau2hat', 'dnnc_type': 'model.dnnc_type'}), '(psi, dnnc.dist, betas=dnnc.betahat, tau2s=dnnc.tau2hat,\n dnnc_type=model.dnnc_type)\n', (6038, 6125), False, 'from nnuncert.models.dnnc._eval import DNNCDensity, DNNCEvaluate\n'), ((6220, 6318), 'nnuncert.models.dnnc._eval.DNNCDensity', 'DNNCDensity', (['psi', 'dnnc.dist'], {'betas': 'dnnc.betahat', 'tau2s': 'dnnc.tau2hat', 'dnnc_type': 'model.dnnc_type'}), '(psi, dnnc.dist, betas=dnnc.betahat, tau2s=dnnc.tau2hat,\n dnnc_type=model.dnnc_type)\n', (6231, 6318), False, 'from nnuncert.models.dnnc._eval import DNNCDensity, DNNCEvaluate\n'), ((7096, 7134), 'nnuncert.utils.dist.Dist._from_fx', 'Dist._from_fx', (['dnnc.dist.x_linspace', 'p'], {}), '(dnnc.dist.x_linspace, p)\n', (7109, 7134), False, 'from nnuncert.utils.dist import Dist\n'), ((3782, 3826), 'os.path.join', 'os.path.join', (['path', "('dnnc_' + self.dnnc_type)"], {}), "(path, 'dnnc_' + self.dnnc_type)\n", (3794, 3826), False, 'import os\n'), ((6456, 6578), 'nnuncert.models.dnnc._eval.DNNCEvaluate', 'DNNCEvaluate', (['psi', 'dnnc.dist'], {'betas': 'dnnc.betahat', 'taus': 'dnnc.tauhat', 'lambdas': 'dnnc.lambdahat', 'dnnc_type': 'model.dnnc_type'}), '(psi, dnnc.dist, betas=dnnc.betahat, taus=dnnc.tauhat, lambdas=\n dnnc.lambdahat, dnnc_type=model.dnnc_type)\n', (6468, 6578), False, 'from nnuncert.models.dnnc._eval import DNNCDensity, DNNCEvaluate\n'), ((6672, 6793), 'nnuncert.models.dnnc._eval.DNNCDensity', 'DNNCDensity', (['psi', 'dnnc.dist'], {'betas': 'dnnc.betahat', 'taus': 'dnnc.tauhat', 'lambdas': 'dnnc.lambdahat', 'dnnc_type': 'model.dnnc_type'}), '(psi, dnnc.dist, betas=dnnc.betahat, taus=dnnc.tauhat, lambdas=\n dnnc.lambdahat, dnnc_type=model.dnnc_type)\n', (6683, 6793), False, 'from nnuncert.models.dnnc._eval import DNNCDensity, DNNCEvaluate\n'), ((4018, 4071), 'nnuncert.models.dnnc._dnnc.DNNCRidgeIWLS', 'DNNCRidgeIWLS', (['self.psi_clean', 'y_train'], {}), '(self.psi_clean, y_train, **dnnc_kwargs)\n', (4031, 4071), False, 'from nnuncert.models.dnnc._dnnc import DNNCRidgeIWLS, DNNCHorseshoeIWLS\n'), ((4126, 4183), 'nnuncert.models.dnnc._dnnc.DNNCHorseshoeIWLS', 'DNNCHorseshoeIWLS', (['self.psi_clean', 'y_train'], {}), '(self.psi_clean, y_train, **dnnc_kwargs)\n', (4143, 4183), False, 'from nnuncert.models.dnnc._dnnc import DNNCRidgeIWLS, DNNCHorseshoeIWLS\n')] |
"""Utils for Pfam family classification experiments."""
import os
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats
import sklearn.metrics as metrics
from sklearn.neighbors import KNeighborsClassifier as knn
from pkg_resources import resource_filename
from fs_gcsfs import GCSFS
from google_research.protein_lm import domains
from contextual_lenses.train_utils import create_data_iterator
from contextual_lenses.loss_fns import cross_entropy_loss
# Data preprocessing.
# Original code source: https://www.kaggle.com/drewbryant/starter-pfam-seed-random-split.
def read_all_shards(partition, data_dir, bucket_name):
"""Combines different CSVs into a single dataframe."""
shards = []
gcsfs = GCSFS(bucket_name)
for fn in gcsfs.listdir(os.path.join(data_dir, partition)):
with gcsfs.open(os.path.join(data_dir, partition, fn)) as f:
shards.append(pd.read_csv(f, index_col=None))
return pd.concat(shards)
def mod_family_accession(family_accession):
"""Reduces family accession to everything prior to '.'."""
return family_accession[:family_accession.index('.')]
# Pfam protein_lm domain.
PFAM_PROTEIN_DOMAIN = domains.VariableLengthDiscreteDomain(
vocab=domains.ProteinVocab(include_anomalous_amino_acids=True,
include_bos=True,
include_eos=True,
include_pad=True,
include_mask=True),
length=512)
# Number of categories for one-hot encoding.
PFAM_NUM_CATEGORIES = 27
def residues_to_one_hot_inds(seq):
"""Converts amino acid residues to one hot indices."""
one_hot_inds = PFAM_PROTEIN_DOMAIN.encode([seq])[0]
return one_hot_inds
def get_family_ids():
"""Pfam family ids."""
family_ids = open(
resource_filename('contextual_lenses.resources', 'pfam_family_ids.txt'),
'r').readlines()
return family_ids
def get_family_id_to_index():
"""Dictionary mapping family id to index."""
family_ids = open(
resource_filename('contextual_lenses.resources', 'pfam_family_ids.txt'),
'r').readlines()
family_id_to_index = {}
for i, family_id in enumerate(family_ids):
family_id_to_index[family_id.replace('\n', '')] = i
return family_id_to_index
def create_pfam_df(family_accessions,
test=False,
samples=None,
random_state=0,
data_partitions_dirpath='random_split/',
gcs_bucket='neuralblast_public'):
"""Processes Pfam data into a featurized dataframe with samples many entries per family."""
family_id_to_index = get_family_id_to_index()
if test:
pfam_df = read_all_shards(partition='test',
data_dir=data_partitions_dirpath,
bucket_name=gcs_bucket)
else:
pfam_df = read_all_shards(partition='train',
data_dir=data_partitions_dirpath,
bucket_name=gcs_bucket)
pfam_df['mod_family_accession'] = pfam_df.family_accession.apply(
lambda x: mod_family_accession(x))
pfam_df = pfam_df[pfam_df.mod_family_accession.isin(family_accessions)]
pfam_df['index'] = pfam_df.family_id.apply(lambda x: family_id_to_index[x])
pfam_df['one_hot_inds'] = pfam_df.sequence.apply(
lambda x: residues_to_one_hot_inds(x[:512]))
if samples is not None:
pfam_df = pfam_df.sample(frac=1,
replace=False,
random_state=random_state)
pfam_df = pfam_df.groupby('mod_family_accession').head(
samples).reset_index()
return pfam_df
def create_pfam_seq_batches(family_accessions,
batch_size,
test=False,
samples=None,
epochs=1,
drop_remainder=False,
buffer_size=None,
shuffle_seed=0,
sample_random_state=0,
data_partitions_dirpath='random_split/',
gcs_bucket='neuralblast_public',
as_numpy=False):
"""Creates iterable object of Pfam sequences."""
pfam_df = create_pfam_df(family_accessions,
test=test,
samples=samples,
random_state=sample_random_state,
data_partitions_dirpath=data_partitions_dirpath,
gcs_bucket=gcs_bucket)
pfam_batches = create_data_iterator(df=pfam_df,
input_col='one_hot_inds',
output_col='index',
batch_size=batch_size,
epochs=epochs,
buffer_size=buffer_size,
seed=shuffle_seed,
drop_remainder=drop_remainder,
add_outputs=False,
as_numpy=as_numpy)
return pfam_batches
def create_pfam_batches(family_accessions,
batch_size,
test=False,
samples=None,
epochs=1,
drop_remainder=False,
buffer_size=None,
shuffle_seed=0,
sample_random_state=0,
data_partitions_dirpath='random_split/',
gcs_bucket='neuralblast_public',
as_numpy=True):
"""Creates iterable object of Pfam data batches."""
pfam_df = create_pfam_df(family_accessions,
test=test,
samples=samples,
random_state=sample_random_state,
data_partitions_dirpath=data_partitions_dirpath,
gcs_bucket=gcs_bucket)
pfam_indexes = pfam_df['index'].values
pfam_batches = create_data_iterator(df=pfam_df,
input_col='one_hot_inds',
output_col='index',
batch_size=batch_size,
epochs=epochs,
buffer_size=buffer_size,
seed=shuffle_seed,
drop_remainder=drop_remainder,
as_numpy=as_numpy)
return pfam_batches, pfam_indexes
# Model evaluation.
def pfam_evaluate(predict_fn,
test_family_accessions,
title,
loss_fn_kwargs,
batch_size=512,
data_partitions_dirpath='random_split/',
gcs_bucket='neuralblast_public'):
"""Computes predicted family ids and measures performance in cross entropy and accuracy."""
test_batches, test_indexes = create_pfam_batches(
family_accessions=test_family_accessions,
batch_size=batch_size,
test=True,
buffer_size=1,
gcs_bucket=gcs_bucket,
data_partitions_dirpath=data_partitions_dirpath)
pred_indexes = []
cross_entropy = 0.
for batch in iter(test_batches):
X, Y = batch
Y_hat = predict_fn(X)
cross_entropy += cross_entropy_loss(Y, Y_hat, **loss_fn_kwargs)
preds = jnp.argmax(Y_hat, axis=1)
for pred in preds:
pred_indexes.append(pred)
pred_indexes = np.array(pred_indexes)
acc = metrics.accuracy_score(test_indexes, pred_indexes)
results = {
'title': title,
'cross_entropy': cross_entropy,
'accuracy': acc,
}
return results, pred_indexes
def compute_embeddings(encoder, data_batches):
"""Computes sequence embeddings according to a specified encoder."""
vectors = []
for batch in iter(data_batches):
X, Y = batch
X_embedded = encoder(X)
for vec in np.array(X_embedded):
vectors.append(vec)
vectors = np.array(vectors)
return vectors
def pfam_nearest_neighbors_classification(
encoder,
family_accessions,
batch_size=512,
n_neighbors=1,
train_samples=None,
test_samples=None,
shuffle_seed=0,
sample_random_state=0,
data_partitions_dirpath='random_split/',
gcs_bucket='neuralblast_public'):
"""Nearest neighbors classification on Pfam families using specified encoder."""
train_batches, train_indexes = create_pfam_batches(
family_accessions=family_accessions,
batch_size=batch_size,
samples=train_samples,
buffer_size=1,
shuffle_seed=shuffle_seed,
sample_random_state=sample_random_state,
data_partitions_dirpath=data_partitions_dirpath,
gcs_bucket=gcs_bucket)
test_batches, test_indexes = create_pfam_batches(
family_accessions=family_accessions,
batch_size=batch_size,
test=True,
samples=test_samples,
buffer_size=1,
shuffle_seed=shuffle_seed,
sample_random_state=sample_random_state,
data_partitions_dirpath=data_partitions_dirpath,
gcs_bucket=gcs_bucket)
train_vectors = compute_embeddings(encoder, train_batches)
test_vectors = compute_embeddings(encoder, test_batches)
knn_classifier = knn(n_neighbors=n_neighbors)
knn_classifier.fit(train_vectors, train_indexes)
knn_predictions = knn_classifier.predict(test_vectors)
knn_accuracy = metrics.accuracy_score(test_indexes, knn_predictions)
results = {
str(n_neighbors) + "-nn accuracy": knn_accuracy,
'train_samples': train_samples,
'test_samples': test_samples
}
return results, knn_predictions, knn_classifier
| [
"pandas.read_csv",
"contextual_lenses.loss_fns.cross_entropy_loss",
"sklearn.neighbors.KNeighborsClassifier",
"os.path.join",
"pkg_resources.resource_filename",
"numpy.array",
"contextual_lenses.train_utils.create_data_iterator",
"google_research.protein_lm.domains.ProteinVocab",
"jax.numpy.argmax",... | [((816, 834), 'fs_gcsfs.GCSFS', 'GCSFS', (['bucket_name'], {}), '(bucket_name)\n', (821, 834), False, 'from fs_gcsfs import GCSFS\n'), ((1050, 1067), 'pandas.concat', 'pd.concat', (['shards'], {}), '(shards)\n', (1059, 1067), True, 'import pandas as pd\n'), ((4873, 5115), 'contextual_lenses.train_utils.create_data_iterator', 'create_data_iterator', ([], {'df': 'pfam_df', 'input_col': '"""one_hot_inds"""', 'output_col': '"""index"""', 'batch_size': 'batch_size', 'epochs': 'epochs', 'buffer_size': 'buffer_size', 'seed': 'shuffle_seed', 'drop_remainder': 'drop_remainder', 'add_outputs': '(False)', 'as_numpy': 'as_numpy'}), "(df=pfam_df, input_col='one_hot_inds', output_col=\n 'index', batch_size=batch_size, epochs=epochs, buffer_size=buffer_size,\n seed=shuffle_seed, drop_remainder=drop_remainder, add_outputs=False,\n as_numpy=as_numpy)\n", (4893, 5115), False, 'from contextual_lenses.train_utils import create_data_iterator\n'), ((6462, 6681), 'contextual_lenses.train_utils.create_data_iterator', 'create_data_iterator', ([], {'df': 'pfam_df', 'input_col': '"""one_hot_inds"""', 'output_col': '"""index"""', 'batch_size': 'batch_size', 'epochs': 'epochs', 'buffer_size': 'buffer_size', 'seed': 'shuffle_seed', 'drop_remainder': 'drop_remainder', 'as_numpy': 'as_numpy'}), "(df=pfam_df, input_col='one_hot_inds', output_col=\n 'index', batch_size=batch_size, epochs=epochs, buffer_size=buffer_size,\n seed=shuffle_seed, drop_remainder=drop_remainder, as_numpy=as_numpy)\n", (6482, 6681), False, 'from contextual_lenses.train_utils import create_data_iterator\n'), ((8030, 8052), 'numpy.array', 'np.array', (['pred_indexes'], {}), '(pred_indexes)\n', (8038, 8052), True, 'import numpy as np\n'), ((8064, 8114), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['test_indexes', 'pred_indexes'], {}), '(test_indexes, pred_indexes)\n', (8086, 8114), True, 'import sklearn.metrics as metrics\n'), ((8578, 8595), 'numpy.array', 'np.array', (['vectors'], {}), '(vectors)\n', (8586, 8595), True, 'import numpy as np\n'), ((9918, 9946), 'sklearn.neighbors.KNeighborsClassifier', 'knn', ([], {'n_neighbors': 'n_neighbors'}), '(n_neighbors=n_neighbors)\n', (9921, 9946), True, 'from sklearn.neighbors import KNeighborsClassifier as knn\n'), ((10079, 10132), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['test_indexes', 'knn_predictions'], {}), '(test_indexes, knn_predictions)\n', (10101, 10132), True, 'import sklearn.metrics as metrics\n'), ((863, 896), 'os.path.join', 'os.path.join', (['data_dir', 'partition'], {}), '(data_dir, partition)\n', (875, 896), False, 'import os\n'), ((1334, 1467), 'google_research.protein_lm.domains.ProteinVocab', 'domains.ProteinVocab', ([], {'include_anomalous_amino_acids': '(True)', 'include_bos': '(True)', 'include_eos': '(True)', 'include_pad': '(True)', 'include_mask': '(True)'}), '(include_anomalous_amino_acids=True, include_bos=True,\n include_eos=True, include_pad=True, include_mask=True)\n', (1354, 1467), False, 'from google_research.protein_lm import domains\n'), ((7855, 7901), 'contextual_lenses.loss_fns.cross_entropy_loss', 'cross_entropy_loss', (['Y', 'Y_hat'], {}), '(Y, Y_hat, **loss_fn_kwargs)\n', (7873, 7901), False, 'from contextual_lenses.loss_fns import cross_entropy_loss\n'), ((7919, 7944), 'jax.numpy.argmax', 'jnp.argmax', (['Y_hat'], {'axis': '(1)'}), '(Y_hat, axis=1)\n', (7929, 7944), True, 'import jax.numpy as jnp\n'), ((8510, 8530), 'numpy.array', 'np.array', (['X_embedded'], {}), '(X_embedded)\n', (8518, 8530), True, 'import numpy as np\n'), ((923, 960), 'os.path.join', 'os.path.join', (['data_dir', 'partition', 'fn'], {}), '(data_dir, partition, fn)\n', (935, 960), False, 'import os\n'), ((994, 1024), 'pandas.read_csv', 'pd.read_csv', (['f'], {'index_col': 'None'}), '(f, index_col=None)\n', (1005, 1024), True, 'import pandas as pd\n'), ((1938, 2009), 'pkg_resources.resource_filename', 'resource_filename', (['"""contextual_lenses.resources"""', '"""pfam_family_ids.txt"""'], {}), "('contextual_lenses.resources', 'pfam_family_ids.txt')\n", (1955, 2009), False, 'from pkg_resources import resource_filename\n'), ((2174, 2245), 'pkg_resources.resource_filename', 'resource_filename', (['"""contextual_lenses.resources"""', '"""pfam_family_ids.txt"""'], {}), "('contextual_lenses.resources', 'pfam_family_ids.txt')\n", (2191, 2245), False, 'from pkg_resources import resource_filename\n')] |
import matplotlib.pyplot as plt
import cv2
import numpy as np
# Number of Colors in color palette
N = 14
def map2img(canvas, colors):
image = np.zeros((canvas.shape[0],canvas.shape[1],3))
for i in range(0,canvas.shape[0]):
for j in range(0,canvas.shape[1]):
image[i,j,:] = colors[int(canvas[i,j]),:]
return image.astype(int)
def setup(size=(256,256)):
colors = np.zeros((N,3))
for i in range(0,N):
colors[i,:] = [random.randint(0,255),random.randint(0,255),random.randint(0,255)]
canvas = np.zeros(size)
for i in range(0,size[0]):
for j in range(0,size[1]):
canvas[i,j] = int(random.randint(0,N-1))
return canvas, colors
def automate_cyclic(canvas, N, gnome):
new_can = np.zeros(canvas.shape)
height, width = canvas.shape
for i in range(0,height):
for j in range(0,width):
new_can[i,j] = canvas[i,j]
nextValue = int(canvas[i,j]+1)%N
flag=0
for i1 in [-1,0,1]:
for j1 in [-1,0,1]:
if (gnome[i1+1,j1+1] == 1) and canvas[(i+i1)%height,(j+j1)%width] == nextValue:
flag=1
break
if (flag==1):
break
if flag==1:
new_can[i,j] = nextValue
return new_can
# Neighborhood expressed as 1's and 0's as whether include the neighbor or not respectively
chromosome = np.array([[0., 0., 1.], [1., 0., 0.], [1., 0., 0.]]) ## Replace the chrmosome as per choice to check results
canvas,colors = setup()
for i in range(0,1000):
canvas = automate_cyclic(canvas,N, chromosome)
img = map2img(canvas, colors)
if (i%50==0):
plt.figure(figsize = (5,5))
plt.imshow(img.astype(int))
plt.show(block=False)
plt.pause(0.01)
plt.close()
| [
"matplotlib.pyplot.close",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show"
] | [((1328, 1389), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]])\n', (1336, 1389), True, 'import numpy as np\n'), ((147, 194), 'numpy.zeros', 'np.zeros', (['(canvas.shape[0], canvas.shape[1], 3)'], {}), '((canvas.shape[0], canvas.shape[1], 3))\n', (155, 194), True, 'import numpy as np\n'), ((390, 406), 'numpy.zeros', 'np.zeros', (['(N, 3)'], {}), '((N, 3))\n', (398, 406), True, 'import numpy as np\n'), ((529, 543), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (537, 543), True, 'import numpy as np\n'), ((728, 750), 'numpy.zeros', 'np.zeros', (['canvas.shape'], {}), '(canvas.shape)\n', (736, 750), True, 'import numpy as np\n'), ((1610, 1636), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (1620, 1636), True, 'import matplotlib.pyplot as plt\n'), ((1674, 1695), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (1682, 1695), True, 'import matplotlib.pyplot as plt\n'), ((1700, 1715), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (1709, 1715), True, 'import matplotlib.pyplot as plt\n'), ((1720, 1731), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1729, 1731), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
import os
from pathlib import Path
import gpytorch
import matplotlib.pyplot as plt
import numpy as np
import torch
from gpytorch.kernels import RBFKernel, ScaleKernel
from gpytorch.means import LinearMean
from sklearn.model_selection import train_test_split
from torchmetrics import MeanSquaredError, R2Score
from GPErks.constants import HEIGHT, WIDTH
from GPErks.gp.data.dataset import Dataset
from GPErks.gp.experiment import GPExperiment
from GPErks.log.logger import get_logger
from GPErks.perks.diagnostics import Diagnostics
from GPErks.perks.gsa import SobolGSA
from GPErks.perks.inference import Inference
from GPErks.serialization.labels import read_labels_from_file
from GPErks.serialization.path import posix_path
from GPErks.train.early_stop import (
GLEarlyStoppingCriterion,
PkEarlyStoppingCriterion,
PQEarlyStoppingCriterion,
SimpleEarlyStoppingCriterion,
UPEarlyStoppingCriterion,
)
from GPErks.train.emulator import GPEmulator
from GPErks.train.snapshot import EveryEpochSnapshottingCriterion
from GPErks.utils.random import set_seed
from GPErks.utils.test_functions import forrester
log = get_logger()
def main(state, early_stopping_criterion):
seed = 8
set_seed(seed) # reproducible sampling
##========================================================================
## load dataset
##========================================================================
path_to_data = "data/nkmodel/"
X_train = np.loadtxt(path_to_data + "X_train.txt", dtype=float)
y_train = np.loadtxt(path_to_data + "y_train.txt", dtype=float)
X_ = np.loadtxt(path_to_data + "X_test.txt", dtype=float)
y_ = np.loadtxt(path_to_data + "y_test.txt", dtype=float)
X_test, X_val, y_test, y_val = train_test_split(
X_, y_, test_size=0.5, random_state=state
)
target_label_idx = 0
xlabels = read_labels_from_file(path_to_data + "xlabels.txt")
ylabel = read_labels_from_file(path_to_data + "ylabels.txt")[
target_label_idx
]
dataset = Dataset(
X_train,
y_train,
X_val=X_val,
y_val=y_val,
X_test=X_test,
y_test=y_test,
x_labels=xlabels,
y_label=ylabel,
)
##========================================================================
## define experiment options
##========================================================================
likelihood = gpytorch.likelihoods.GaussianLikelihood()
input_size = dataset.input_size
mean_function = LinearMean(input_size=input_size)
kernel = ScaleKernel(RBFKernel(ard_num_dims=input_size))
metrics = [R2Score(), MeanSquaredError()]
experiment = GPExperiment(
dataset,
likelihood,
mean_function,
kernel,
n_restarts=1,
metrics=metrics,
seed=seed, # reproducible training
)
##========================================================================
## define training options
##========================================================================
optimizer = torch.optim.Adam(experiment.model.parameters(), lr=0.1)
device = "cuda" if torch.cuda.is_available() else "cpu"
here = os.path.abspath(os.path.dirname(__file__))
example_name = Path(__file__).name.replace(".py", "")
snapshot_dir = posix_path(here, "snapshot", example_name)
os.makedirs(snapshot_dir, exist_ok=True)
snapshotting_criterion = EveryEpochSnapshottingCriterion(
posix_path(snapshot_dir, "restart_{restart}"),
"epoch_{epoch}.pth",
)
##========================================================================
## train model
##========================================================================
emul = GPEmulator(experiment, device)
best_model, best_train_stats = emul.train(
optimizer,
early_stopping_criterion,
snapshotting_criterion,
)
return best_train_stats.best_epoch
if __name__ == "__main__":
max_epochs = 1000
early_stopping_criteria = [
SimpleEarlyStoppingCriterion(max_epochs, patience=8),
GLEarlyStoppingCriterion(max_epochs, alpha=1.0, patience=8),
UPEarlyStoppingCriterion(
max_epochs, strip_length=5, successive_strips=4
),
PQEarlyStoppingCriterion(
max_epochs, alpha=1.0, patience=8, strip_length=5
),
]
n_splits = 5
keys = ["<KEY>"]
best_epochs = {}
for key, esc in zip(keys, early_stopping_criteria):
best_epochs[key] = []
for i in range(n_splits):
best_epochs[key].append(main(i, esc))
fig, axis = plt.subplots(1, 1, figsize=(2 * WIDTH, 2 * HEIGHT / 3))
for key in keys:
axis.scatter(np.arange(1, n_splits + 1), best_epochs[key], label=key)
axis.legend()
axis.set_xlabel("Experiment id", fontsize=12)
axis.set_ylabel("Best epoch", fontsize=12)
plt.show()
| [
"GPErks.train.early_stop.SimpleEarlyStoppingCriterion",
"gpytorch.means.LinearMean",
"gpytorch.kernels.RBFKernel",
"GPErks.gp.data.dataset.Dataset",
"GPErks.serialization.path.posix_path",
"torch.cuda.is_available",
"numpy.arange",
"GPErks.log.logger.get_logger",
"pathlib.Path",
"GPErks.serializat... | [((1153, 1165), 'GPErks.log.logger.get_logger', 'get_logger', ([], {}), '()\n', (1163, 1165), False, 'from GPErks.log.logger import get_logger\n'), ((1228, 1242), 'GPErks.utils.random.set_seed', 'set_seed', (['seed'], {}), '(seed)\n', (1236, 1242), False, 'from GPErks.utils.random import set_seed\n'), ((1497, 1550), 'numpy.loadtxt', 'np.loadtxt', (["(path_to_data + 'X_train.txt')"], {'dtype': 'float'}), "(path_to_data + 'X_train.txt', dtype=float)\n", (1507, 1550), True, 'import numpy as np\n'), ((1565, 1618), 'numpy.loadtxt', 'np.loadtxt', (["(path_to_data + 'y_train.txt')"], {'dtype': 'float'}), "(path_to_data + 'y_train.txt', dtype=float)\n", (1575, 1618), True, 'import numpy as np\n'), ((1629, 1681), 'numpy.loadtxt', 'np.loadtxt', (["(path_to_data + 'X_test.txt')"], {'dtype': 'float'}), "(path_to_data + 'X_test.txt', dtype=float)\n", (1639, 1681), True, 'import numpy as np\n'), ((1691, 1743), 'numpy.loadtxt', 'np.loadtxt', (["(path_to_data + 'y_test.txt')"], {'dtype': 'float'}), "(path_to_data + 'y_test.txt', dtype=float)\n", (1701, 1743), True, 'import numpy as np\n'), ((1780, 1839), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_', 'y_'], {'test_size': '(0.5)', 'random_state': 'state'}), '(X_, y_, test_size=0.5, random_state=state)\n', (1796, 1839), False, 'from sklearn.model_selection import train_test_split\n'), ((1894, 1945), 'GPErks.serialization.labels.read_labels_from_file', 'read_labels_from_file', (["(path_to_data + 'xlabels.txt')"], {}), "(path_to_data + 'xlabels.txt')\n", (1915, 1945), False, 'from GPErks.serialization.labels import read_labels_from_file\n'), ((2058, 2178), 'GPErks.gp.data.dataset.Dataset', 'Dataset', (['X_train', 'y_train'], {'X_val': 'X_val', 'y_val': 'y_val', 'X_test': 'X_test', 'y_test': 'y_test', 'x_labels': 'xlabels', 'y_label': 'ylabel'}), '(X_train, y_train, X_val=X_val, y_val=y_val, X_test=X_test, y_test=\n y_test, x_labels=xlabels, y_label=ylabel)\n', (2065, 2178), False, 'from GPErks.gp.data.dataset import Dataset\n'), ((2454, 2495), 'gpytorch.likelihoods.GaussianLikelihood', 'gpytorch.likelihoods.GaussianLikelihood', ([], {}), '()\n', (2493, 2495), False, 'import gpytorch\n'), ((2553, 2586), 'gpytorch.means.LinearMean', 'LinearMean', ([], {'input_size': 'input_size'}), '(input_size=input_size)\n', (2563, 2586), False, 'from gpytorch.means import LinearMean\n'), ((2713, 2815), 'GPErks.gp.experiment.GPExperiment', 'GPExperiment', (['dataset', 'likelihood', 'mean_function', 'kernel'], {'n_restarts': '(1)', 'metrics': 'metrics', 'seed': 'seed'}), '(dataset, likelihood, mean_function, kernel, n_restarts=1,\n metrics=metrics, seed=seed)\n', (2725, 2815), False, 'from GPErks.gp.experiment import GPExperiment\n'), ((3354, 3396), 'GPErks.serialization.path.posix_path', 'posix_path', (['here', '"""snapshot"""', 'example_name'], {}), "(here, 'snapshot', example_name)\n", (3364, 3396), False, 'from GPErks.serialization.path import posix_path\n'), ((3401, 3441), 'os.makedirs', 'os.makedirs', (['snapshot_dir'], {'exist_ok': '(True)'}), '(snapshot_dir, exist_ok=True)\n', (3412, 3441), False, 'import os\n'), ((3783, 3813), 'GPErks.train.emulator.GPEmulator', 'GPEmulator', (['experiment', 'device'], {}), '(experiment, device)\n', (3793, 3813), False, 'from GPErks.train.emulator import GPEmulator\n'), ((4670, 4725), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(2 * WIDTH, 2 * HEIGHT / 3)'}), '(1, 1, figsize=(2 * WIDTH, 2 * HEIGHT / 3))\n', (4682, 4725), True, 'import matplotlib.pyplot as plt\n'), ((4944, 4954), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4952, 4954), True, 'import matplotlib.pyplot as plt\n'), ((1959, 2010), 'GPErks.serialization.labels.read_labels_from_file', 'read_labels_from_file', (["(path_to_data + 'ylabels.txt')"], {}), "(path_to_data + 'ylabels.txt')\n", (1980, 2010), False, 'from GPErks.serialization.labels import read_labels_from_file\n'), ((2612, 2646), 'gpytorch.kernels.RBFKernel', 'RBFKernel', ([], {'ard_num_dims': 'input_size'}), '(ard_num_dims=input_size)\n', (2621, 2646), False, 'from gpytorch.kernels import RBFKernel, ScaleKernel\n'), ((2664, 2673), 'torchmetrics.R2Score', 'R2Score', ([], {}), '()\n', (2671, 2673), False, 'from torchmetrics import MeanSquaredError, R2Score\n'), ((2675, 2693), 'torchmetrics.MeanSquaredError', 'MeanSquaredError', ([], {}), '()\n', (2691, 2693), False, 'from torchmetrics import MeanSquaredError, R2Score\n'), ((3185, 3210), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3208, 3210), False, 'import torch\n'), ((3250, 3275), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3265, 3275), False, 'import os\n'), ((3512, 3557), 'GPErks.serialization.path.posix_path', 'posix_path', (['snapshot_dir', '"""restart_{restart}"""'], {}), "(snapshot_dir, 'restart_{restart}')\n", (3522, 3557), False, 'from GPErks.serialization.path import posix_path\n'), ((4083, 4135), 'GPErks.train.early_stop.SimpleEarlyStoppingCriterion', 'SimpleEarlyStoppingCriterion', (['max_epochs'], {'patience': '(8)'}), '(max_epochs, patience=8)\n', (4111, 4135), False, 'from GPErks.train.early_stop import GLEarlyStoppingCriterion, PkEarlyStoppingCriterion, PQEarlyStoppingCriterion, SimpleEarlyStoppingCriterion, UPEarlyStoppingCriterion\n'), ((4145, 4204), 'GPErks.train.early_stop.GLEarlyStoppingCriterion', 'GLEarlyStoppingCriterion', (['max_epochs'], {'alpha': '(1.0)', 'patience': '(8)'}), '(max_epochs, alpha=1.0, patience=8)\n', (4169, 4204), False, 'from GPErks.train.early_stop import GLEarlyStoppingCriterion, PkEarlyStoppingCriterion, PQEarlyStoppingCriterion, SimpleEarlyStoppingCriterion, UPEarlyStoppingCriterion\n'), ((4214, 4287), 'GPErks.train.early_stop.UPEarlyStoppingCriterion', 'UPEarlyStoppingCriterion', (['max_epochs'], {'strip_length': '(5)', 'successive_strips': '(4)'}), '(max_epochs, strip_length=5, successive_strips=4)\n', (4238, 4287), False, 'from GPErks.train.early_stop import GLEarlyStoppingCriterion, PkEarlyStoppingCriterion, PQEarlyStoppingCriterion, SimpleEarlyStoppingCriterion, UPEarlyStoppingCriterion\n'), ((4319, 4394), 'GPErks.train.early_stop.PQEarlyStoppingCriterion', 'PQEarlyStoppingCriterion', (['max_epochs'], {'alpha': '(1.0)', 'patience': '(8)', 'strip_length': '(5)'}), '(max_epochs, alpha=1.0, patience=8, strip_length=5)\n', (4343, 4394), False, 'from GPErks.train.early_stop import GLEarlyStoppingCriterion, PkEarlyStoppingCriterion, PQEarlyStoppingCriterion, SimpleEarlyStoppingCriterion, UPEarlyStoppingCriterion\n'), ((4768, 4794), 'numpy.arange', 'np.arange', (['(1)', '(n_splits + 1)'], {}), '(1, n_splits + 1)\n', (4777, 4794), True, 'import numpy as np\n'), ((3296, 3310), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3300, 3310), False, 'from pathlib import Path\n')] |
from typing import Dict
import cv2
import numpy
from . import _debug
from ._params import Params as _Params
from ._types import DialData
from ._utils import float_point_to_int
_dial_data_map: Dict[int, Dict[str, DialData]] = {}
def get_dial_data(params: _Params) -> Dict[str, DialData]:
dial_data = _dial_data_map.get(id(params))
if dial_data is None:
dial_data = _get_dial_data(params)
_dial_data_map[id(params)] = dial_data
return dial_data
def _get_dial_data(params: _Params) -> Dict[str, DialData]:
result = {}
for (name, dial_center) in params.dial_centers.items():
mask = numpy.zeros(
shape=params.dials_template_size,
dtype=numpy.uint8)
dial_radius = int(round(dial_center.diameter/2.0))
center = float_point_to_int(dial_center.center)
# Draw two circles to the mask image
dist_from_center = params.needle_dists_from_dial_center[name]
start_radius = dial_radius + dist_from_center
circle_thickness = params.needle_circle_mask_thickness[name]
for i in [0, circle_thickness - 1]:
cv2.circle(mask, center, start_radius + i, 255)
# Fill the area between the two circles and save result to
# circle_mask
fill_mask = numpy.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=numpy.uint8)
fill_point = (center[0] + start_radius + 1, center[1])
cv2.floodFill(mask, fill_mask, fill_point, 255)
circle_mask = mask.copy()
# Fill also the center circle in the mask image
cv2.floodFill(mask, fill_mask, center, 255)
result[name] = DialData(name, dial_center.center, mask, circle_mask)
if 'masks' in _debug.DEBUG:
cv2.imshow('mask of ' + name, mask)
cv2.imshow('circle_mask of ' + name, circle_mask)
if 'masks' in _debug.DEBUG:
cv2.waitKey(0)
return result
| [
"cv2.imshow",
"cv2.floodFill",
"cv2.circle",
"numpy.zeros",
"cv2.waitKey"
] | [((629, 693), 'numpy.zeros', 'numpy.zeros', ([], {'shape': 'params.dials_template_size', 'dtype': 'numpy.uint8'}), '(shape=params.dials_template_size, dtype=numpy.uint8)\n', (640, 693), False, 'import numpy\n'), ((1287, 1357), 'numpy.zeros', 'numpy.zeros', (['(mask.shape[0] + 2, mask.shape[1] + 2)'], {'dtype': 'numpy.uint8'}), '((mask.shape[0] + 2, mask.shape[1] + 2), dtype=numpy.uint8)\n', (1298, 1357), False, 'import numpy\n'), ((1442, 1489), 'cv2.floodFill', 'cv2.floodFill', (['mask', 'fill_mask', 'fill_point', '(255)'], {}), '(mask, fill_mask, fill_point, 255)\n', (1455, 1489), False, 'import cv2\n'), ((1589, 1632), 'cv2.floodFill', 'cv2.floodFill', (['mask', 'fill_mask', 'center', '(255)'], {}), '(mask, fill_mask, center, 255)\n', (1602, 1632), False, 'import cv2\n'), ((1897, 1911), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1908, 1911), False, 'import cv2\n'), ((1129, 1176), 'cv2.circle', 'cv2.circle', (['mask', 'center', '(start_radius + i)', '(255)'], {}), '(mask, center, start_radius + i, 255)\n', (1139, 1176), False, 'import cv2\n'), ((1759, 1794), 'cv2.imshow', 'cv2.imshow', (["('mask of ' + name)", 'mask'], {}), "('mask of ' + name, mask)\n", (1769, 1794), False, 'import cv2\n'), ((1807, 1856), 'cv2.imshow', 'cv2.imshow', (["('circle_mask of ' + name)", 'circle_mask'], {}), "('circle_mask of ' + name, circle_mask)\n", (1817, 1856), False, 'import cv2\n')] |
import numpy as np
def f(n):
term = 1
logsum = 0
for k in range(1,n):
term *= n/k
logsum+= term
return np.exp(np.log(logsum)-n)
print(f(10),f(100),f(1000)) | [
"numpy.log"
] | [((143, 157), 'numpy.log', 'np.log', (['logsum'], {}), '(logsum)\n', (149, 157), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding:utf8 -*-
# ================================================================================
# Copyright 2022 Alibaba Inc. All Rights Reserved.
#
# History:
# 2022.03.01. Be created by xingzhang.rxz. Used for language identification.
# 2018.04.27. Be created by jiangshi.lxq. Forked and adatped from tensor2tensor.
# For internal use only. DON'T DISTRIBUTE.
# ================================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from config import *
from transformer import transformer_model_fn as model_fn
from data_reader import input_fn
import time
import tensorflow as tf
import numpy as np
import os, errno
import subprocess
import re
from functools import reduce
tf.logging.set_verbosity(tf.logging.INFO)
def cal_acc(output_file, dev_src, dev_trg, acc_log, trg_vocab):
acc_lt = []
with tf.gfile.GFile(output_file, 'r') as fout, tf.gfile.GFile(output_file+".res", 'w') as fres:
hypotheses = fout.readlines()
print('Num of decoded sentence:%d' % len(hypotheses))
with tf.gfile.GFile(dev_src, 'r') as fdevsrc:
try:
devsrcs = []
for i,line in enumerate(fdevsrc):
devsrcs.append(line)
except:
print("error:",i,line)
with tf.gfile.GFile(dev_trg, 'r') as fdevtrg:
devtrgs = fdevtrg.readlines()
acc_counter, acc_counter_total = 0, 0
acc_counter_lt = [0] * len(trg_vocab)
acc_counter_all = [0] * len(trg_vocab)
for hypothes, src, trg in zip(hypotheses, devsrcs, devtrgs):
hypothes, src, trg = hypothes.strip(), src.strip(), trg.strip()
if trg not in trg_vocab: continue
trg_index = trg_vocab.index(trg)
acc_counter_total += 1
acc_counter_all[trg_index] += 1
print(src+"\t"+trg+"\t"+hypothes, file=fres)
if hypothes.split("\t")[0] == trg:
acc_counter += 1
acc_counter_lt[trg_index] += 1
acc_lt = [acc_counter / acc_counter_total]
acc_lt.extend([r/(a+0.0001) for r, a in zip(acc_counter_lt, acc_counter_all)])
with tf.gfile.GFile(acc_log, 'a') as f:
bleu = '%s' % ('\t'.join(format(x*100, "0.2f") for x in acc_lt))
print('ACC: %s' % bleu)
print(bleu, file=f)
return bleu
def get_dict_from_collection(collection_name):
key_collection = collection_name + "_keys"
value_collection = collection_name + "_values"
keys = tf.get_collection(key_collection)
values = tf.get_collection(value_collection)
return dict(zip(keys, values))
def extract_batches(tensors):
if not isinstance(tensors, dict):
for tensor in tensors:
yield tensor
else:
batch_size = None
for value in tensors.values():
batch_size = batch_size or value.shape[0]
for b in range(batch_size):
yield {
key: value[b] for key, value in tensors.items()
}
class SaveEvaluationPredictionHook(tf.train.SessionRunHook):
def __init__(self, output_file, dev_src, dev_trg, trg_vocab_file, acc_log):
self._output_file = output_file
self._dev_src = dev_src
self._dev_trg = dev_trg
self._acc_log = acc_log
trg_rvocab = [w.strip() for w in open(trg_vocab_file)]
self._trg_vocab = trg_rvocab
self.devsrcs = [line.strip() for line in open(dev_src, "r", encoding='utf-8', errors='ignore').readlines()]
self.devtrgs = [line.strip() for line in open(dev_trg, "r", encoding='utf-8', errors='ignore').readlines()]
self.dev_index = -1
def begin(self):
self._predictions = get_dict_from_collection("predictions")
self._features = tf.get_collection("features")
self._labels = tf.get_collection("labels")
if use_user_lang_map: self._contexts = tf.get_collection("contexts")
self._global_step = tf.train.get_global_step()
self.start_time = time.mktime(time.localtime())
self.count = 0
def before_run(self, run_context):
if use_user_lang_map: return tf.train.SessionRunArgs([self._predictions, self._global_step, self._features, self._labels, self._contexts])
else: return tf.train.SessionRunArgs([self._predictions, self._global_step, self._features, self._labels])
def after_run(self, run_context, run_values):
if use_user_lang_map: predictions, current_step, features, labels, contexts = run_values.results
else: predictions, current_step, features, labels = run_values.results
self._output_path = "{}.{}".format(self._output_file, current_step)
if self.count % 10000 == 0: tf.logging.info("eval sample:"+str(self.count))
with open(self._output_path,'a') as output_file:
for index, prediction in enumerate(extract_batches(predictions)):
self.count += 1
self.dev_index += 1
prob = prediction['predict_score']
prob_index = np.argsort(prob)[::-1]
prob_index_lang = [self._trg_vocab[item] for item in prob_index]
prob_value = np.sort(prob)[::-1]
prob_index_lang = prob_index_lang[:3]
prob_value = prob_value[:3]
prob_str = "\t".join(['%s\t%.2f'%(i,v*100) for i,v in zip(prob_index_lang, prob_value)])
print(prob_str, file=output_file)
if showinfos:
if use_user_lang_map: feature, context = features[0], contexts[0]
else: feature = features[0]
self.dev_index = self.dev_index % len(self.devsrcs)
devsrc, devtrg = self.devsrcs[self.dev_index], self.devtrgs[self.dev_index]
if devtrg != prob_index_lang[0]:
tf.logging.info("text:"+devsrc+" lang:"+devtrg)
tf.logging.info("features:"+" ".join([str(item) for item in feature[index]]))
if use_user_lang_map: tf.logging.info("context:"+" ".join([str(item) for item in context[index]]))
tf.logging.info("predict:"+prob_str)
def end(self, session):
_ = self.count
end_time = time.mktime(time.localtime())
dur_sec = end_time - self.start_time
tf.logging.info("Evaluation speed "+str((_+1)/dur_sec)+"e/s:"+str(dur_sec*1000/(_+1))+"ms/e")
tf.logging.info("Evaluation predictions saved to %s", self._output_path)
score = cal_acc(self._output_path, self._dev_src, self._dev_trg, self._acc_log, self._trg_vocab)
tf.logging.info("accuracy: %s", score[:5])
def cvt(checkpoint_dir,output_dir):
model_name = "NmtModel"
with tf.Graph().as_default() as graph:
with tf.Session(graph=graph) as sess:
var_list = []
var_sum = 0
for var_name, var_shape in tf.contrib.framework.list_variables(checkpoint_dir):
if 'ExponentialMovingAverage' in var_name or 'global_step' in var_name:
tf.logging.info("var_name: %s:%s", var_name, var_shape)
if 'ExponentialMovingAverage' in var_name: var_sum += reduce(lambda x,y:x*y, var_shape)
var_value = tf.contrib.framework.load_variable(checkpoint_dir, var_name)
var_name = var_name.replace('/'+model_name+'/', '/').replace('/'+model_name+'/', '/').replace('/ExponentialMovingAverage','')
if var_name == 'global_step':
step = str(var_value)
var = tf.Variable(var_value, name=var_name)
var_list.append(var)
tf.logging.info("Params sum is %d", var_sum)
saver = tf.train.Saver()
sess.run(tf.variables_initializer(var_list))
saver.save(sess, output_dir+'/model.ckpt-'+step)
sess.close()
def sorted_dir(folder):
def getmtime(name):
path = os.path.join(folder, name)
return os.path.getmtime(path)
li = [f for f in os.listdir(folder) if '.index' in f]
return sorted(li, key=getmtime, reverse=False)
def get_last_step(folder):
def getmtime(name):
return os.path.getmtime(dev_out+'.'+name)
li = [re.sub(r'^.*?(\d+)$', r'\1',f) for f in os.listdir(folder) if re.match(r'.*\.\d+$',f)]
if li == []:
return '1'
return sorted(li, key=getmtime, reverse=True)[0]
def main(_):
for k in params.keys():
if 'dropout' in k:
params[k] = 0.0
eval_input_fn = lambda: input_fn(
dev_src,
dev_trg,
vocab_src,
vocab_trg,
ulid_file=dev_ulid,
batch_size=params["decode_batch_size"],
is_train=False,
use_script_embedding=None,
use_word_embedding=None,
script_vocab_file=None,
src_word_vocab_file=None
)
eval_hooks = []
eval_hooks.append(SaveEvaluationPredictionHook(dev_out,dev_src,dev_trg,vocab_trg,acc_log))
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
session_config = tf.ConfigProto(gpu_options=gpu_options,allow_soft_placement=True, log_device_placement=False)
eval_model_dir = model_dir.rstrip('\/')
if params['ema_decay'] > 0.0:
eval_model_dir = model_dir.rstrip('\/')+'/ema'
transformer = tf.estimator.Estimator(model_fn=model_fn, model_dir=eval_model_dir, params=params, config=tf.estimator.RunConfig(session_config=session_config))
try:
os.makedirs(os.path.dirname(dev_out))
except OSError as e:
if e.errno != errno.EEXIST:
tf.logging.info(e)
raise
if params['continuous_eval'] == True:
current = 1
if params["eval_from_step"] > 0:
current = params["eval_from_step"] - 1
while True:
ckpts = sorted_dir(model_dir)
for ckpt in ckpts:
iteration = int(re.sub(r'^.*?(\d+).*', r'\1', ckpt))
if iteration > current and os.path.isfile(model_dir.rstrip('\/')+'/'+ckpt):
if params['ema_decay'] > 0.0:
cvt(model_dir.rstrip('\/')+'/'+ckpt.replace('.index',''),eval_model_dir)
else:
with open(model_dir.rstrip('\/')+'/checkpoint') as ck:
old=ck.readline()
keep=ck.read()
with open(model_dir.rstrip('\/')+'/checkpoint', 'w') as newck:
print >> newck, 'model_checkpoint_path: \"'+ckpt.replace('.index','')+'\"'
print >> newck, keep
try:
os.remove(dev_out+'.'+str(iteration))
except OSError:
pass
tf.logging.info("start evaluating...")
normal_bleu_lines = []
if tf.gfile.Exists(acc_log):
with tf.gfile.GFile(acc_log, 'r') as f:
bleu_lines = f.readlines()
for i in bleu_lines:
normal_bleu_lines.append(i)
# add new steps first
normal_bleu_lines.append('%s\t' % iteration)
with tf.gfile.GFile(acc_log, 'w') as f:
f.write(''.join(normal_bleu_lines))
transformer.evaluate(eval_input_fn, hooks=eval_hooks)
# except:
# tf.logging.info( "Evaluation of checkpoint %s failed!" % ckpt)
current = iteration
time.sleep(5)
else:
ckpt = open(model_dir.rstrip('\/')+'/checkpoint').readline().strip()
ckpt = re.sub(r'^.*\: \"(.*?)\"', r'\1.index', ckpt)
if params['ema_decay'] > 0.0:
cvt(model_dir.rstrip('\/')+'/'+ckpt.replace('.index',''),eval_model_dir)
transformer.evaluate(eval_input_fn, hooks=eval_hooks)
if __name__ == '__main__':
tf.app.run()
| [
"tensorflow.contrib.framework.load_variable",
"tensorflow.logging.set_verbosity",
"time.sleep",
"tensorflow.contrib.framework.list_variables",
"numpy.argsort",
"tensorflow.gfile.GFile",
"tensorflow.GPUOptions",
"tensorflow.variables_initializer",
"tensorflow.app.run",
"tensorflow.Graph",
"os.lis... | [((828, 869), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (852, 869), True, 'import tensorflow as tf\n'), ((2632, 2665), 'tensorflow.get_collection', 'tf.get_collection', (['key_collection'], {}), '(key_collection)\n', (2649, 2665), True, 'import tensorflow as tf\n'), ((2677, 2712), 'tensorflow.get_collection', 'tf.get_collection', (['value_collection'], {}), '(value_collection)\n', (2694, 2712), True, 'import tensorflow as tf\n'), ((8902, 8934), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (8915, 8934), True, 'import tensorflow as tf\n'), ((9026, 9124), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options', 'allow_soft_placement': '(True)', 'log_device_placement': '(False)'}), '(gpu_options=gpu_options, allow_soft_placement=True,\n log_device_placement=False)\n', (9040, 9124), True, 'import tensorflow as tf\n'), ((11983, 11995), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (11993, 11995), True, 'import tensorflow as tf\n'), ((961, 993), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['output_file', '"""r"""'], {}), "(output_file, 'r')\n", (975, 993), True, 'import tensorflow as tf\n'), ((1003, 1044), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (["(output_file + '.res')", '"""w"""'], {}), "(output_file + '.res', 'w')\n", (1017, 1044), True, 'import tensorflow as tf\n'), ((2296, 2324), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['acc_log', '"""a"""'], {}), "(acc_log, 'a')\n", (2310, 2324), True, 'import tensorflow as tf\n'), ((3789, 3818), 'tensorflow.get_collection', 'tf.get_collection', (['"""features"""'], {}), "('features')\n", (3806, 3818), True, 'import tensorflow as tf\n'), ((3838, 3865), 'tensorflow.get_collection', 'tf.get_collection', (['"""labels"""'], {}), "('labels')\n", (3855, 3865), True, 'import tensorflow as tf\n'), ((3963, 3989), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (3987, 3989), True, 'import tensorflow as tf\n'), ((6327, 6399), 'tensorflow.logging.info', 'tf.logging.info', (['"""Evaluation predictions saved to %s"""', 'self._output_path'], {}), "('Evaluation predictions saved to %s', self._output_path)\n", (6342, 6399), True, 'import tensorflow as tf\n'), ((6505, 6547), 'tensorflow.logging.info', 'tf.logging.info', (['"""accuracy: %s"""', 'score[:5]'], {}), "('accuracy: %s', score[:5])\n", (6520, 6547), True, 'import tensorflow as tf\n'), ((7858, 7884), 'os.path.join', 'os.path.join', (['folder', 'name'], {}), '(folder, name)\n', (7870, 7884), False, 'import os, errno\n'), ((7900, 7922), 'os.path.getmtime', 'os.path.getmtime', (['path'], {}), '(path)\n', (7916, 7922), False, 'import os, errno\n'), ((8100, 8138), 'os.path.getmtime', 'os.path.getmtime', (["(dev_out + '.' + name)"], {}), "(dev_out + '.' + name)\n", (8116, 8138), False, 'import os, errno\n'), ((8146, 8177), 're.sub', 're.sub', (['"""^.*?(\\\\d+)$"""', '"""\\\\1"""', 'f'], {}), "('^.*?(\\\\d+)$', '\\\\1', f)\n", (8152, 8177), False, 'import re\n'), ((8447, 8686), 'data_reader.input_fn', 'input_fn', (['dev_src', 'dev_trg', 'vocab_src', 'vocab_trg'], {'ulid_file': 'dev_ulid', 'batch_size': "params['decode_batch_size']", 'is_train': '(False)', 'use_script_embedding': 'None', 'use_word_embedding': 'None', 'script_vocab_file': 'None', 'src_word_vocab_file': 'None'}), "(dev_src, dev_trg, vocab_src, vocab_trg, ulid_file=dev_ulid,\n batch_size=params['decode_batch_size'], is_train=False,\n use_script_embedding=None, use_word_embedding=None, script_vocab_file=\n None, src_word_vocab_file=None)\n", (8455, 8686), False, 'from data_reader import input_fn\n'), ((11720, 11767), 're.sub', 're.sub', (['"""^.*\\\\: \\\\"(.*?)\\\\\\""""', '"""\\\\1.index"""', 'ckpt'], {}), '(\'^.*\\\\: \\\\"(.*?)\\\\"\', \'\\\\1.index\', ckpt)\n', (11726, 11767), False, 'import re\n'), ((1174, 1202), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['dev_src', '"""r"""'], {}), "(dev_src, 'r')\n", (1188, 1202), True, 'import tensorflow as tf\n'), ((1418, 1446), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['dev_trg', '"""r"""'], {}), "(dev_trg, 'r')\n", (1432, 1446), True, 'import tensorflow as tf\n'), ((3909, 3938), 'tensorflow.get_collection', 'tf.get_collection', (['"""contexts"""'], {}), "('contexts')\n", (3926, 3938), True, 'import tensorflow as tf\n'), ((4024, 4040), 'time.localtime', 'time.localtime', ([], {}), '()\n', (4038, 4040), False, 'import time\n'), ((4132, 4246), 'tensorflow.train.SessionRunArgs', 'tf.train.SessionRunArgs', (['[self._predictions, self._global_step, self._features, self._labels, self.\n _contexts]'], {}), '([self._predictions, self._global_step, self.\n _features, self._labels, self._contexts])\n', (4155, 4246), True, 'import tensorflow as tf\n'), ((4259, 4357), 'tensorflow.train.SessionRunArgs', 'tf.train.SessionRunArgs', (['[self._predictions, self._global_step, self._features, self._labels]'], {}), '([self._predictions, self._global_step, self.\n _features, self._labels])\n', (4282, 4357), True, 'import tensorflow as tf\n'), ((6166, 6182), 'time.localtime', 'time.localtime', ([], {}), '()\n', (6180, 6182), False, 'import time\n'), ((6669, 6692), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (6679, 6692), True, 'import tensorflow as tf\n'), ((6791, 6842), 'tensorflow.contrib.framework.list_variables', 'tf.contrib.framework.list_variables', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (6826, 6842), True, 'import tensorflow as tf\n'), ((7569, 7613), 'tensorflow.logging.info', 'tf.logging.info', (['"""Params sum is %d"""', 'var_sum'], {}), "('Params sum is %d', var_sum)\n", (7584, 7613), True, 'import tensorflow as tf\n'), ((7634, 7650), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (7648, 7650), True, 'import tensorflow as tf\n'), ((7945, 7963), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (7955, 7963), False, 'import os, errno\n'), ((8186, 8204), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (8196, 8204), False, 'import os, errno\n'), ((8208, 8233), 're.match', 're.match', (['""".*\\\\.\\\\d+$"""', 'f'], {}), "('.*\\\\.\\\\d+$', f)\n", (8216, 8233), False, 'import re\n'), ((8964, 9003), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (8978, 9003), True, 'import tensorflow as tf\n'), ((9361, 9414), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'session_config': 'session_config'}), '(session_config=session_config)\n', (9383, 9414), True, 'import tensorflow as tf\n'), ((9445, 9469), 'os.path.dirname', 'os.path.dirname', (['dev_out'], {}), '(dev_out)\n', (9460, 9469), False, 'import os, errno\n'), ((11604, 11617), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (11614, 11617), False, 'import time\n'), ((6622, 6632), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (6630, 6632), True, 'import tensorflow as tf\n'), ((7672, 7706), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['var_list'], {}), '(var_list)\n', (7696, 7706), True, 'import tensorflow as tf\n'), ((9544, 9562), 'tensorflow.logging.info', 'tf.logging.info', (['e'], {}), '(e)\n', (9559, 9562), True, 'import tensorflow as tf\n'), ((4991, 5007), 'numpy.argsort', 'np.argsort', (['prob'], {}), '(prob)\n', (5001, 5007), True, 'import numpy as np\n'), ((5116, 5129), 'numpy.sort', 'np.sort', (['prob'], {}), '(prob)\n', (5123, 5129), True, 'import numpy as np\n'), ((6952, 7007), 'tensorflow.logging.info', 'tf.logging.info', (['"""var_name: %s:%s"""', 'var_name', 'var_shape'], {}), "('var_name: %s:%s', var_name, var_shape)\n", (6967, 7007), True, 'import tensorflow as tf\n'), ((7149, 7209), 'tensorflow.contrib.framework.load_variable', 'tf.contrib.framework.load_variable', (['checkpoint_dir', 'var_name'], {}), '(checkpoint_dir, var_name)\n', (7183, 7209), True, 'import tensorflow as tf\n'), ((7478, 7515), 'tensorflow.Variable', 'tf.Variable', (['var_value'], {'name': 'var_name'}), '(var_value, name=var_name)\n', (7489, 7515), True, 'import tensorflow as tf\n'), ((9861, 9896), 're.sub', 're.sub', (['"""^.*?(\\\\d+).*"""', '"""\\\\1"""', 'ckpt'], {}), "('^.*?(\\\\d+).*', '\\\\1', ckpt)\n", (9867, 9896), False, 'import re\n'), ((10771, 10809), 'tensorflow.logging.info', 'tf.logging.info', (['"""start evaluating..."""'], {}), "('start evaluating...')\n", (10786, 10809), True, 'import tensorflow as tf\n'), ((10877, 10901), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['acc_log'], {}), '(acc_log)\n', (10892, 10901), True, 'import tensorflow as tf\n'), ((5767, 5820), 'tensorflow.logging.info', 'tf.logging.info', (["('text:' + devsrc + ' lang:' + devtrg)"], {}), "('text:' + devsrc + ' lang:' + devtrg)\n", (5782, 5820), True, 'import tensorflow as tf\n'), ((6052, 6090), 'tensorflow.logging.info', 'tf.logging.info', (["('predict:' + prob_str)"], {}), "('predict:' + prob_str)\n", (6067, 6090), True, 'import tensorflow as tf\n'), ((7082, 7119), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'var_shape'], {}), '(lambda x, y: x * y, var_shape)\n', (7088, 7119), False, 'from functools import reduce\n'), ((11263, 11291), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['acc_log', '"""w"""'], {}), "(acc_log, 'w')\n", (11277, 11291), True, 'import tensorflow as tf\n'), ((10932, 10960), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['acc_log', '"""r"""'], {}), "(acc_log, 'r')\n", (10946, 10960), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
# @Author: Bao
# @Date: 2021-12-11 08:47:12
# @Last Modified by: dorihp
# @Last Modified time: 2022-01-07 14:19:15
import json
import time
import cv2
import numpy as np
from onvif import ONVIFCamera
class Detector():
def __init__(self, cfg, weights, classes, input_size):
super(Detector, self).__init__()
assert input_size % 32 == 0, "Input size must be a multiple of 32!"
# Init detector and it's parameters
self.net = cv2.dnn_DetectionModel(cfg, weights)
self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
self.input_size = input_size
self.net.setInputSize(input_size, input_size)
self.net.setInputScale(1.0 / 255)
self.net.setInputSwapRB(True)
with open(classes, 'rt') as f:
self.classes = f.read().rstrip('\n').split('\n')
def detect(self, frame, cl_filter):
classes, _, boxes = self.net.detect(frame, confThreshold=0.1, nmsThreshold=0.4)
cen_x = cen_y = False
# print(classes, boxes)
if len(classes):
for _class, box in zip(classes.flatten(), boxes):
# if _class != cl_filter:
# continue
left, top, width, height = box
cen_x = int(left + width / 2)
cen_y = int(top + height / 2)
right = left + width
bottom = top + height
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.circle(frame, (cen_x, cen_y), radius=0, color=(0, 0, 255), thickness=5)
# break
return frame, classes, boxes
# return frame, cen_x, cen_y
class Undistort():
def __init__(self, params):
super(Undistort, self).__init__()
parameters = np.load(params)
self.mtx = parameters["mtx"]
self.dist = parameters["dist"]
self.newcameramtx, _ = cv2.getOptimalNewCameraMatrix(self.mtx, self.dist, (640, 480), 1, (640, 480))
def do_undistort(self, frame):
return cv2.undistort(frame, self.mtx, self.dist, None, self.newcameramtx)
def get_cur_position(ptz, token):
''' Get current location in PanTilt
http://www.onvif.org/onvif/ver20/ptz/wsdl/ptz.wsdl#op.RelativeMove
Args:
- pan, tilt, zoom: movement postition value
'''
position = dict({'x': 0, 'y': 0})
ptz_status = ptz.GetStatus({'ProfileToken': token}).Position
position['x'] = ptz_status.PanTilt.x
position['y'] = ptz_status.PanTilt.y
return position
def do_rel_move(ptz, token, pan, tilt, zoom=0):
''' Perfrom relative move
http://www.onvif.org/onvif/ver20/ptz/wsdl/ptz.wsdl#op.RelativeMove
Args:
- pan, tilt, zoom: movement postition value
'''
moverequest = ptz.create_type('RelativeMove')
moverequest.ProfileToken = token
if moverequest.Speed is None:
moverequest.Translation = {'PanTilt': {'x': 0.0, 'y': 0.0}}
moverequest.Speed = ptz.GetStatus({'ProfileToken': token}).Position
moverequest.Translation = {'PanTilt': {'x': pan, 'y': tilt}, 'Zoom': {'x': zoom}}
moverequest.Speed.Zoom.x = {'PanTilt': {'x': 1, 'y': 1}, 'Zoom': 1}
ptz.RelativeMove(moverequest)
def undistort_point(x, y):
parameters = np.load("parameters.npz")
mtx = parameters["mtx"]
dist = parameters["dist"]
rvecs = parameters["rvecs"]
tvecs = parameters["tvecs"]
h, w = 640, 480
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
in_point = (x, y)
in_point = np.expand_dims(np.asarray(in_point, dtype=np.float32), axis=0)
out_point = cv2.undistortPoints(in_point, mtx, dist, P=newcameramtx)
return out_point
def get_point(image):
''' Use color filter to get coordinate of an object with specified color '''
result = image.copy()
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# Red object
# lower = np.array([155, 25, 0])
# upper = np.array([179, 255, 255])
# Green object
lower = np.array([36, 25, 25])
upper = np.array([70, 255, 255])
mask = cv2.inRange(image, lower, upper)
diff = cv2.bitwise_and(result, result, mask=mask)
diff = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
cnts, _ = cv2.findContours(diff, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
return_cnt = None
com_x1, com_y1, com_x2, com_y2 = None, None, None, None
largest = 0
# Get largest object
if not cnts is None:
for cnt in cnts:
if cv2.contourArea(cnt) < 50:
continue
(x, y, w, h) = cv2.boundingRect(cnt)
if w * h > largest:
largest = w * h
_x = x + w
_y = y + h
# if com_x1 is None or x < com_x1:
com_x1 = x
# if com_y1 is None or y < com_y1:
com_y1 = y
# if com_x2 is None or _x > com_x2:
com_x2 = _x
# if com_y2 is None or _y > com_y2:
com_y2 = _y
cv2.rectangle(diff, (com_x1, com_y1), (com_x2, com_y2), (255, 255, 255), 2)
return_cnt = (int((com_x1 + com_x2) / 2), int((com_y1 + com_y2) / 2))
return return_cnt, diff
def main(detector, undistort):
''' In this function:
- Do undistort on the first frame
- Detect an object and get it's location
- Perform a relative moving
- Do undistort on the second frame
- Detect and get location of the previous object again
The object must be the only one in context
'''
with open("../../config.json", "r") as f:
data = json.load(f)
mycam = ONVIFCamera(data["ip_camera"], 80, data["onvif_id"], data["onvif_pwd"])
# Create media service object
media = mycam.create_media_service()
# Create ptz service object
ptz = mycam.create_ptz_service()
# Get target profile
media_profile = media.GetProfiles()[0]
token = media_profile.token
cap = cv2.VideoCapture(data["rtsp_link"])
frame_idx = 0
while 1:
# Capture the first frame
ret, frame = cap.read()
if not ret:
print("Cannot connect to camera!")
return
if frame_idx == 0:
frame = np.flip(frame, axis=0)
frame = undistort.do_undistort(frame)
# Get current camera position in Onvif coordinate system
location = get_cur_position(ptz, token)
print("Location before moving: Pan-%.5f \t Tilt-%.5f" %(location['x'], location['y']))
# Detect object in this frame
# frame_1, obj_x, obj_y = detector.detect(frame, 41)
# print("Object's center coordinate before moving: ", obj_x, ":", obj_y)
# Get object with color
cnt, frame_1 = get_point(frame)
print("Object's center coordinate before moving: ", cnt)
# Do relative move
move_p = 0.1
move_t = 0.2
do_rel_move(ptz, token, move_p, move_t)
if frame_idx == 100:
frame = np.flip(frame, axis=0)
frame = undistort.do_undistort(frame)
# Get new position of object
# frame_2, _obj_x, _obj_y = detector.detect(frame, 1)
# print("Object's center coordinate after moving: ", _obj_x, ":", _obj_x)
_cnt, frame_2 = get_point(frame)
print("Object's center coordinate after moving: ", _cnt)
break
frame_idx += 1
cv2.imshow("Before", frame_1)
cv2.imshow("After", frame_2)
cv2.waitKey(0)
cap.release()
if __name__ == '__main__':
detector = Detector("./yolov4/yolov4-tiny.cfg", "./yolov4/yolov4-tiny.weights", "./yolov4/coco.names", 416)
undistort = Undistort("./calibrate-undistort-camera/parameters.npz")
main(detector, undistort)
| [
"cv2.rectangle",
"onvif.ONVIFCamera",
"cv2.imshow",
"numpy.array",
"numpy.flip",
"cv2.undistort",
"numpy.asarray",
"cv2.contourArea",
"cv2.waitKey",
"cv2.getOptimalNewCameraMatrix",
"cv2.circle",
"cv2.cvtColor",
"cv2.undistortPoints",
"cv2.findContours",
"cv2.inRange",
"cv2.bitwise_and... | [((3469, 3494), 'numpy.load', 'np.load', (['"""parameters.npz"""'], {}), "('parameters.npz')\n", (3476, 3494), True, 'import numpy as np\n'), ((3672, 3731), 'cv2.getOptimalNewCameraMatrix', 'cv2.getOptimalNewCameraMatrix', (['mtx', 'dist', '(w, h)', '(1)', '(w, h)'], {}), '(mtx, dist, (w, h), 1, (w, h))\n', (3701, 3731), False, 'import cv2\n'), ((3853, 3909), 'cv2.undistortPoints', 'cv2.undistortPoints', (['in_point', 'mtx', 'dist'], {'P': 'newcameramtx'}), '(in_point, mtx, dist, P=newcameramtx)\n', (3872, 3909), False, 'import cv2\n'), ((4083, 4121), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (4095, 4121), False, 'import cv2\n'), ((4256, 4278), 'numpy.array', 'np.array', (['[36, 25, 25]'], {}), '([36, 25, 25])\n', (4264, 4278), True, 'import numpy as np\n'), ((4292, 4316), 'numpy.array', 'np.array', (['[70, 255, 255]'], {}), '([70, 255, 255])\n', (4300, 4316), True, 'import numpy as np\n'), ((4331, 4363), 'cv2.inRange', 'cv2.inRange', (['image', 'lower', 'upper'], {}), '(image, lower, upper)\n', (4342, 4363), False, 'import cv2\n'), ((4378, 4420), 'cv2.bitwise_and', 'cv2.bitwise_and', (['result', 'result'], {'mask': 'mask'}), '(result, result, mask=mask)\n', (4393, 4420), False, 'import cv2\n'), ((4433, 4471), 'cv2.cvtColor', 'cv2.cvtColor', (['diff', 'cv2.COLOR_BGR2GRAY'], {}), '(diff, cv2.COLOR_BGR2GRAY)\n', (4445, 4471), False, 'import cv2\n'), ((4487, 4553), 'cv2.findContours', 'cv2.findContours', (['diff', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(diff, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (4503, 4553), False, 'import cv2\n'), ((5307, 5382), 'cv2.rectangle', 'cv2.rectangle', (['diff', '(com_x1, com_y1)', '(com_x2, com_y2)', '(255, 255, 255)', '(2)'], {}), '(diff, (com_x1, com_y1), (com_x2, com_y2), (255, 255, 255), 2)\n', (5320, 5382), False, 'import cv2\n'), ((5916, 5987), 'onvif.ONVIFCamera', 'ONVIFCamera', (["data['ip_camera']", '(80)', "data['onvif_id']", "data['onvif_pwd']"], {}), "(data['ip_camera'], 80, data['onvif_id'], data['onvif_pwd'])\n", (5927, 5987), False, 'from onvif import ONVIFCamera\n'), ((6254, 6289), 'cv2.VideoCapture', 'cv2.VideoCapture', (["data['rtsp_link']"], {}), "(data['rtsp_link'])\n", (6270, 6289), False, 'import cv2\n'), ((7819, 7848), 'cv2.imshow', 'cv2.imshow', (['"""Before"""', 'frame_1'], {}), "('Before', frame_1)\n", (7829, 7848), False, 'import cv2\n'), ((7854, 7882), 'cv2.imshow', 'cv2.imshow', (['"""After"""', 'frame_2'], {}), "('After', frame_2)\n", (7864, 7882), False, 'import cv2\n'), ((7888, 7902), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (7899, 7902), False, 'import cv2\n'), ((507, 543), 'cv2.dnn_DetectionModel', 'cv2.dnn_DetectionModel', (['cfg', 'weights'], {}), '(cfg, weights)\n', (529, 543), False, 'import cv2\n'), ((1945, 1960), 'numpy.load', 'np.load', (['params'], {}), '(params)\n', (1952, 1960), True, 'import numpy as np\n'), ((2076, 2153), 'cv2.getOptimalNewCameraMatrix', 'cv2.getOptimalNewCameraMatrix', (['self.mtx', 'self.dist', '(640, 480)', '(1)', '(640, 480)'], {}), '(self.mtx, self.dist, (640, 480), 1, (640, 480))\n', (2105, 2153), False, 'import cv2\n'), ((2208, 2274), 'cv2.undistort', 'cv2.undistort', (['frame', 'self.mtx', 'self.dist', 'None', 'self.newcameramtx'], {}), '(frame, self.mtx, self.dist, None, self.newcameramtx)\n', (2221, 2274), False, 'import cv2\n'), ((3788, 3826), 'numpy.asarray', 'np.asarray', (['in_point'], {'dtype': 'np.float32'}), '(in_point, dtype=np.float32)\n', (3798, 3826), True, 'import numpy as np\n'), ((5888, 5900), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5897, 5900), False, 'import json\n'), ((4832, 4853), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (4848, 4853), False, 'import cv2\n'), ((6535, 6557), 'numpy.flip', 'np.flip', (['frame'], {'axis': '(0)'}), '(frame, axis=0)\n', (6542, 6557), True, 'import numpy as np\n'), ((7377, 7399), 'numpy.flip', 'np.flip', (['frame'], {'axis': '(0)'}), '(frame, axis=0)\n', (7384, 7399), True, 'import numpy as np\n'), ((1558, 1624), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left, top)', '(right, bottom)', '(0, 0, 255)', '(2)'], {}), '(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n', (1571, 1624), False, 'import cv2\n'), ((1642, 1717), 'cv2.circle', 'cv2.circle', (['frame', '(cen_x, cen_y)'], {'radius': '(0)', 'color': '(0, 0, 255)', 'thickness': '(5)'}), '(frame, (cen_x, cen_y), radius=0, color=(0, 0, 255), thickness=5)\n', (1652, 1717), False, 'import cv2\n'), ((4753, 4773), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (4768, 4773), False, 'import cv2\n')] |
from tkinter import *
from tkinter.font import Font
from ttk import *
from tkinter.scrolledtext import ScrolledText
from tkinter import messagebox
import tensorflow as tf
import numpy as np
import re
import seq2seq
# preprocessed data
import Final_data
import data_utils
# load data from pickle and npy files
metadata, idx_q, idx_a = Final_data.load_data(PATH='./Final_META/')
(trainX, trainY), (testX, testY), (validX, validY) = data_utils.split_dataset(idx_q, idx_a)
# parameters
xseq_len = trainX.shape[-1]
yseq_len = trainY.shape[-1]
batch_size = 32
xvocab_size = len(metadata['idx2w'])
yvocab_size = xvocab_size
emb_dim = 1024
#Model created
model = seq2seq.Seq2Seq(xseq_len=xseq_len,
yseq_len=yseq_len,
xvocab_size=xvocab_size,
yvocab_size=yvocab_size,
ckpt_path='ckpt/',
emb_dim=emb_dim,
num_layers=3
)
#Batch generation
val_batch_gen = data_utils.rand_batch_gen(validX, validY, batch_size)
test_batch_gen = data_utils.rand_batch_gen(testX, testY, batch_size)
train_batch_gen = data_utils.rand_batch_gen(trainX, trainY, batch_size)
sess = model.restore_last_session()
#Create a window frame
window = Tk()
#Set Icon
window.iconbitmap(default='logo.ico')
#Set Size
window.geometry("500x600")
#Set Style
window.style = Style()
#('clam', 'alt', 'default', 'classic')
window.style.theme_use("clam")
#Give title to window
window.wm_title("demiBot")
window.option_add('*font', 'Helvetica 11')
#Menu Funtion Reset
def reset():
messages.config(state=NORMAL)
messages.delete('1.0', END)
messages.config(state=DISABLED)
#Menu Funtion About
def about():
messagebox.showinfo("About demiBot", "demiBot is a chatter bot trained on movie dialogues from over 3000 movies with over 290,000 dialouges. \n\nApart from being a movie aficionado it also has a little bit of twitter in him.\n\n\t\tEnjoy chatting with him!!!")
#Menubar
menubar = Menu(window)
menubar.add_command(label="Reset", command=reset)
menubar.add_command(label="About", command=about)
menubar.add_command(label="Quit!", command=window.quit)
window.config(menu=menubar)
#Message Box For Chat
messages = ScrolledText(window)
messages.pack(fill=X,padx=10, pady=10)
messages.config(state=DISABLED)
messages.tag_configure('tag-left', justify='left')
messages.tag_configure('tag-right', justify='right')
UserFont = Font(family="Malgun Gothic Semilight", size=12)
#Input Field For Typing
input_user = StringVar()
input_field = Entry(window, text=input_user)
input_field.pack(side=BOTTOM, fill=X, padx=15, pady=15)
def Enter_pressed(event):
quest = input_field.get()
if quest!='' :
messages.config(state=NORMAL)
messages.configure(font=UserFont)
messages.insert(END, 'User : \n%s\n' % quest,'tag-left')
messages.yview_moveto(messages.yview()[1])
input_user.set('')
print(quest)
else:
return "break"
quest = quest.lower()
quest = Final_data.filter_line(quest, Final_data.EN_WHITELIST)
que_tok = [w.strip() for w in quest.split(' ') if w]
#for q in zip(que_tok):
print(que_tok)
inp_idx = Final_data.pad_seq(que_tok,metadata['w2idx'],Final_data.limit['maxq'])
#for q in range(inp_idx):
#print(inp_idx)
inp_idx_arr = np.zeros([1, Final_data.limit['maxq']], dtype=np.int32)
inp_idx_arr[0] = np.array(inp_idx)
no = 0
#print(inp_idx_arr.shape)
input_ = test_batch_gen.__next__()[0]
#output = model.predict(sess, inp_idx_arr.T)
while True:
output = model.predict(sess, inp_idx_arr.T, no)
answ = ''
for ii, oi in zip(inp_idx_arr, output):
q = data_utils.decode(sequence=ii, lookup=metadata['idx2w'], separator=' ')
decoded = data_utils.decode(sequence=oi, lookup=metadata['idx2w'], separator=' ').split(' ')
if decoded.count('unk') <= 2:
print('q : [{0}]; a : [{1}]'.format(q, ' '.join(decoded)))
answ = ' '.join(decoded)
answ = re.sub("\s\s+", " ", answ)
#messages.configure(font=demiFont)
messages.insert(END, 'demiBot : \n%s\n' % answ, 'tag-right')
messages.yview_moveto(messages.yview()[1])
messages.config(state=DISABLED)
return
else:
#print('else : q : [{0}]; a : [{1}]'.format(q, ' '.join(decoded)))
pass
no = no + 1
messages.insert(END, 'demiBot : \n%s\n Sorry! I am not able to answer that\n' , 'tag-right')
messages.yview_moveto(messages.yview()[1])
messages.config(state=DISABLED)
return "break"
#frame window with bind value
frame = Frame(window) # , width=300, height=300)
input_field.bind("<Return>", Enter_pressed)
frame.pack()
window.mainloop()
| [
"data_utils.rand_batch_gen",
"seq2seq.Seq2Seq",
"Final_data.load_data",
"data_utils.split_dataset",
"data_utils.decode",
"Final_data.pad_seq",
"tkinter.font.Font",
"tkinter.scrolledtext.ScrolledText",
"numpy.zeros",
"numpy.array",
"re.sub",
"Final_data.filter_line",
"tkinter.messagebox.showi... | [((338, 380), 'Final_data.load_data', 'Final_data.load_data', ([], {'PATH': '"""./Final_META/"""'}), "(PATH='./Final_META/')\n", (358, 380), False, 'import Final_data\n'), ((434, 472), 'data_utils.split_dataset', 'data_utils.split_dataset', (['idx_q', 'idx_a'], {}), '(idx_q, idx_a)\n', (458, 472), False, 'import data_utils\n'), ((661, 824), 'seq2seq.Seq2Seq', 'seq2seq.Seq2Seq', ([], {'xseq_len': 'xseq_len', 'yseq_len': 'yseq_len', 'xvocab_size': 'xvocab_size', 'yvocab_size': 'yvocab_size', 'ckpt_path': '"""ckpt/"""', 'emb_dim': 'emb_dim', 'num_layers': '(3)'}), "(xseq_len=xseq_len, yseq_len=yseq_len, xvocab_size=\n xvocab_size, yvocab_size=yvocab_size, ckpt_path='ckpt/', emb_dim=\n emb_dim, num_layers=3)\n", (676, 824), False, 'import seq2seq\n'), ((1068, 1121), 'data_utils.rand_batch_gen', 'data_utils.rand_batch_gen', (['validX', 'validY', 'batch_size'], {}), '(validX, validY, batch_size)\n', (1093, 1121), False, 'import data_utils\n'), ((1139, 1190), 'data_utils.rand_batch_gen', 'data_utils.rand_batch_gen', (['testX', 'testY', 'batch_size'], {}), '(testX, testY, batch_size)\n', (1164, 1190), False, 'import data_utils\n'), ((1209, 1262), 'data_utils.rand_batch_gen', 'data_utils.rand_batch_gen', (['trainX', 'trainY', 'batch_size'], {}), '(trainX, trainY, batch_size)\n', (1234, 1262), False, 'import data_utils\n'), ((2305, 2325), 'tkinter.scrolledtext.ScrolledText', 'ScrolledText', (['window'], {}), '(window)\n', (2317, 2325), False, 'from tkinter.scrolledtext import ScrolledText\n'), ((2512, 2559), 'tkinter.font.Font', 'Font', ([], {'family': '"""Malgun Gothic Semilight"""', 'size': '(12)'}), "(family='Malgun Gothic Semilight', size=12)\n", (2516, 2559), False, 'from tkinter.font import Font\n'), ((1793, 2060), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""About demiBot"""', '"""demiBot is a chatter bot trained on movie dialogues from over 3000 movies with over 290,000 dialouges. \n\nApart from being a movie aficionado it also has a little bit of twitter in him.\n\n\t\tEnjoy chatting with him!!!"""'], {}), '(\'About demiBot\',\n """demiBot is a chatter bot trained on movie dialogues from over 3000 movies with over 290,000 dialouges. \n\nApart from being a movie aficionado it also has a little bit of twitter in him.\n\n\t\tEnjoy chatting with him!!!"""\n )\n', (1812, 2060), False, 'from tkinter import messagebox\n'), ((3104, 3158), 'Final_data.filter_line', 'Final_data.filter_line', (['quest', 'Final_data.EN_WHITELIST'], {}), '(quest, Final_data.EN_WHITELIST)\n', (3126, 3158), False, 'import Final_data\n'), ((3278, 3350), 'Final_data.pad_seq', 'Final_data.pad_seq', (['que_tok', "metadata['w2idx']", "Final_data.limit['maxq']"], {}), "(que_tok, metadata['w2idx'], Final_data.limit['maxq'])\n", (3296, 3350), False, 'import Final_data\n'), ((3418, 3473), 'numpy.zeros', 'np.zeros', (["[1, Final_data.limit['maxq']]"], {'dtype': 'np.int32'}), "([1, Final_data.limit['maxq']], dtype=np.int32)\n", (3426, 3473), True, 'import numpy as np\n'), ((3495, 3512), 'numpy.array', 'np.array', (['inp_idx'], {}), '(inp_idx)\n', (3503, 3512), True, 'import numpy as np\n'), ((3808, 3879), 'data_utils.decode', 'data_utils.decode', ([], {'sequence': 'ii', 'lookup': "metadata['idx2w']", 'separator': '""" """'}), "(sequence=ii, lookup=metadata['idx2w'], separator=' ')\n", (3825, 3879), False, 'import data_utils\n'), ((4166, 4194), 're.sub', 're.sub', (['"""\\\\s\\\\s+"""', '""" """', 'answ'], {}), "('\\\\s\\\\s+', ' ', answ)\n", (4172, 4194), False, 'import re\n'), ((3902, 3973), 'data_utils.decode', 'data_utils.decode', ([], {'sequence': 'oi', 'lookup': "metadata['idx2w']", 'separator': '""" """'}), "(sequence=oi, lookup=metadata['idx2w'], separator=' ')\n", (3919, 3973), False, 'import data_utils\n')] |
#!/usr/bin/env python
"""
train_SVM.py
VARPA, University of Coruna
<NAME>, <NAME>.
26 Oct 2017
"""
from sklearn import metrics
import numpy as np
class performance_measures:
def __init__(self, n):
self.n_classes = n
self.confusion_matrix = np.empty([])
self.Recall = np.empty(n)
self.Precision = np.empty(n)
self.Specificity = np.empty(n)
self.Acc = np.empty(n)
self.F_measure = np.empty(n)
self.gmean_se = 0.0
self.gmean_p = 0.0
self.Overall_Acc = 0.0
self.kappa = 0.0
self.Ij = 0.0
self.Ijk = 0.0
# Compute Cohen' kappa from a confussion matrix
# Kappa value:
# < 0.20 Poor
# 0.21-0.40 Fair
# 0.41-0.60 Moderate
# 0.61-0.80 Good
# 0.81-1.00 Very good
def compute_cohen_kappa(confusion_matrix):
prob_expectedA = np.empty(len(confusion_matrix))
prob_expectedB = np.empty(len(confusion_matrix))
prob_observed = 0
for n in range(0, len(confusion_matrix)):
prob_expectedA[n] = sum(confusion_matrix[n,:]) / sum(sum(confusion_matrix))
prob_expectedB[n] = sum(confusion_matrix[:,n]) / sum(sum(confusion_matrix))
prob_observed = prob_observed + confusion_matrix[n][n]
prob_expected = np.dot(prob_expectedA, prob_expectedB)
prob_observed = prob_observed / sum(sum(confusion_matrix))
kappa = (prob_observed - prob_expected) / (1 - prob_expected)
return kappa, prob_observed, prob_expected
# Compute the performance measures following the AAMI recommendations.
# Using sensivity (recall), specificity (precision) and accuracy
# for each class: (N, SVEB, VEB, F)
def compute_AAMI_performance_measures(predictions, gt_labels):
n_classes = 4 #5
pf_ms = performance_measures(n_classes)
# TODO If conf_mat no llega a clases 4 por gt_labels o predictions...
# hacer algo para que no falle el codigo...
# NOTE: added labels=[0,1,2,3])...
# Confussion matrix
conf_mat = metrics.confusion_matrix(gt_labels, predictions, labels=[0,1,2,3])
conf_mat = conf_mat.astype(float)
pf_ms.confusion_matrix = conf_mat
# Overall Acc
pf_ms.Overall_Acc = metrics.accuracy_score(gt_labels, predictions)
# AAMI: Sens, Spec, Acc
# N: 0, S: 1, V: 2, F: 3 # (Q: 4) not used
for i in range(0, n_classes):
TP = conf_mat[i,i]
FP = sum(conf_mat[:,i]) - conf_mat[i,i]
TN = sum(sum(conf_mat)) - sum(conf_mat[i,:]) - sum(conf_mat[:,i]) + conf_mat[i,i]
FN = sum(conf_mat[i,:]) - conf_mat[i,i]
if i == 2: # V
# Exceptions for AAMI recomendations:
# 1 do not reward or penalize a classifier for the classification of (F) as (V)
FP = FP - conf_mat[i][3]
pf_ms.Recall[i] = TP / (TP + FN)
pf_ms.Precision[i] = TP / (TP + FP)
pf_ms.Specificity[i] = TN / (TN + FP); # 1-FPR
pf_ms.Acc[i] = (TP + TN) / (TP + TN + FP + FN)
if TP == 0:
pf_ms.F_measure[i] = 0.0
else:
pf_ms.F_measure[i] = 2 * (pf_ms.Precision[i] * pf_ms.Recall[i] )/ (pf_ms.Precision[i] + pf_ms.Recall[i])
# Compute Cohen's Kappa
pf_ms.kappa, prob_obsv, prob_expect = compute_cohen_kappa(conf_mat)
# Compute Index-j recall_S + recall_V + precision_S + precision_V
pf_ms.Ij = pf_ms.Recall[1] + pf_ms.Recall[2] + pf_ms.Precision[1] + pf_ms.Precision[2]
# Compute Index-jk
w1 = 0.5
w2 = 0.125
pf_ms.Ijk = w1 * pf_ms.kappa + w2 * pf_ms.Ij
return pf_ms
# Export to filename.txt file the performance measure score
def write_AAMI_results(performance_measures, filename):
f = open(filename, "w")
f.write("Ijk: " + str(format(performance_measures.Ijk, '.4f')) + "\n")
f.write("Ij: " + str(format(performance_measures.Ij, '.4f'))+ "\n")
f.write("Cohen's Kappa: " + str(format(performance_measures.kappa, '.4f'))+ "\n\n")
# Conf matrix
f.write("Confusion Matrix:"+ "\n\n")
f.write("\n".join(str(elem) for elem in performance_measures.confusion_matrix.astype(int))+ "\n\n")
f.write("Overall ACC: " + str(format(performance_measures.Overall_Acc, '.4f'))+ "\n\n")
f.write("mean Acc: " + str(format(np.average(performance_measures.Acc[:]), '.4f'))+ "\n")
f.write("mean Recall: " + str(format(np.average(performance_measures.Recall[:]), '.4f'))+ "\n")
f.write("mean Precision: " + str(format(np.average(performance_measures.Precision[:]), '.4f'))+ "\n")
f.write("N:"+ "\n\n")
f.write("Sens: " + str(format(performance_measures.Recall[0], '.4f'))+ "\n")
f.write("Prec: " + str(format(performance_measures.Precision[0], '.4f'))+ "\n")
f.write("Acc: " + str(format(performance_measures.Acc[0], '.4f'))+ "\n")
f.write("SVEB:"+ "\n\n")
f.write("Sens: " + str(format(performance_measures.Recall[1], '.4f'))+ "\n")
f.write("Prec: " + str(format(performance_measures.Precision[1], '.4f'))+ "\n")
f.write("Acc: " + str(format(performance_measures.Acc[1], '.4f'))+ "\n")
f.write("VEB:"+ "\n\n")
f.write("Sens: " + str(format(performance_measures.Recall[2], '.4f'))+ "\n")
f.write("Prec: " + str(format(performance_measures.Precision[2], '.4f'))+ "\n")
f.write("Acc: " + str(format(performance_measures.Acc[2], '.4f'))+ "\n")
f.write("F:"+ "\n\n")
f.write("Sens: " + str(format(performance_measures.Recall[3], '.4f'))+ "\n")
f.write("Prec: " + str(format(performance_measures.Precision[3], '.4f'))+ "\n")
f.write("Acc: " + str(format(performance_measures.Acc[3], '.4f'))+ "\n")
f.close()
| [
"numpy.average",
"numpy.dot",
"numpy.empty",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.confusion_matrix"
] | [((1394, 1432), 'numpy.dot', 'np.dot', (['prob_expectedA', 'prob_expectedB'], {}), '(prob_expectedA, prob_expectedB)\n', (1400, 1432), True, 'import numpy as np\n'), ((2117, 2186), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['gt_labels', 'predictions'], {'labels': '[0, 1, 2, 3]'}), '(gt_labels, predictions, labels=[0, 1, 2, 3])\n', (2141, 2186), False, 'from sklearn import metrics\n'), ((2303, 2349), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['gt_labels', 'predictions'], {}), '(gt_labels, predictions)\n', (2325, 2349), False, 'from sklearn import metrics\n'), ((279, 291), 'numpy.empty', 'np.empty', (['[]'], {}), '([])\n', (287, 291), True, 'import numpy as np\n'), ((326, 337), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (334, 337), True, 'import numpy as np\n'), ((372, 383), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (380, 383), True, 'import numpy as np\n'), ((418, 429), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (426, 429), True, 'import numpy as np\n'), ((464, 475), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (472, 475), True, 'import numpy as np\n'), ((510, 521), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (518, 521), True, 'import numpy as np\n'), ((4377, 4416), 'numpy.average', 'np.average', (['performance_measures.Acc[:]'], {}), '(performance_measures.Acc[:])\n', (4387, 4416), True, 'import numpy as np\n'), ((4474, 4516), 'numpy.average', 'np.average', (['performance_measures.Recall[:]'], {}), '(performance_measures.Recall[:])\n', (4484, 4516), True, 'import numpy as np\n'), ((4577, 4622), 'numpy.average', 'np.average', (['performance_measures.Precision[:]'], {}), '(performance_measures.Precision[:])\n', (4587, 4622), True, 'import numpy as np\n')] |
import numpy as np
class ClassificationMatrix:
"""Provides measures of performance for prediction against targets.
Members:
self.classSymbol:
self.classes: Unique list of objects that is found in 'targets'
and prediction'. It is lexicographically ordered
like Python set convention.
self.classes_lookup: A lookup of class symbols indices in
conf_matrix.
self.conf_matrix: Confusion matrix.
Rows are for targets (Observations) and
columns are for prediction (Predictions).
"""
def __init__(self, predictions, targets, class_symbol=None):
"""Construct a classification matrix for prediction against targets.
Args:
predictions (iterable): Classification predictions
for two or more classes.
targets (iterable): Classification targets (Observation)
for two or more classes.
classSymbol (object):The symbol that takes as true to find
true-positives in case of binary
classification.
Whenever it is 'None', the first item of
targets takes as the symbol.
"""
# Empty target or prediction
if len(targets) == 0:
raise ValueError("'targets' cannot be empty.")
if len(predictions) == 0:
raise ValueError("'predictions' cannot be empty.")
# In case, store the first element of the targets
# to use as the symbol for the true cases, anytime
# that the caller does not provide it
self.class_symbol = class_symbol
if self.class_symbol is None:
self.class_symbol = targets[0]
# Classes symbols
self.classes = set(targets).union(set(predictions))
# The confusion matrix for calculations
self.conf_matrix = np.zeros((len(self.classes), len(self.classes)))
# create a lookup for the position of symbols in 'conf_matrix'
self.classes_lookup = dict([(c, i) for i, c in enumerate(self.classes)])
for (output, target) in zip(predictions, targets):
# Find the indices for classes from the lookup
i = self.classes_lookup[target]
j = self.classes_lookup[output]
# Update the confusion matrix based on
# the comparision of target against output
self.conf_matrix[i, j] += 1
def _get_class_symbole_index(self, class_symbol):
if class_symbol is None:
return self.classes_lookup[self.class_symbol]
else:
try:
return self.classes_lookup[class_symbol]
except KeyError:
raise KeyError(
f"The class '{class_symbol}' is not defined "
+ "in predictions or targets."
)
def tp(self, class_symbol=None):
"""True Positives
For Binary classification
| Prediction
| 0 | 1
------------------------
t | |
a 0| TP | FN
r-----------------------
g | |
e 1| FP | TN
t | |
Args:
class_symbol (object, optional): the object that is taken as
true positive.
Defaults to None and take the first element of the
'targets' as true or the one that is specified in
constructor.
Returns:
Int: True Positives
"""
i = self._get_class_symbole_index(class_symbol)
return self.conf_matrix[i, i]
def fp(self, class_symbol=None):
"""False Positives
For Binary classification
| Prediction
| 0 | 1
------------------------
t | |
a 0| TP | FN
r-----------------------
g | |
e 1| FP | TN
t | |
Args:
class_symbol (object, optional): the object that is taken as
true positive.
Defaults to None and take the first element of the
'targets' as true or the one that is specified in
constructor.
Returns:
Int: False Positives
"""
j = self._get_class_symbole_index(class_symbol)
# Select the column of the class
column = self.conf_matrix[:, j]
return np.sum([v for k, v in enumerate(column) if k != j])
def fn(self, class_symbol=None):
"""False Negative
For Binary classification
| Prediction
| 0 | 1
------------------------
t | |
a 0| TP | FN
r-----------------------
g | |
e 1| FP | TN
t | |
Args:
class_symbol (object, optional): the object that is taken as
true positive.
Defaults to None and take the first element of the
'targets' as true or the one that is specified in
constructor.
Returns:
Int: False Negative
"""
i = self._get_class_symbole_index(class_symbol)
# Select the row of the class
row = self.conf_matrix[i, :]
return np.sum([v for k, v in enumerate(row) if k != i])
def tn(self, class_symbol=None):
"""True Negative
For Binary classification
| Prediction
| 0 | 1
------------------------
t | |
a 0| TP | FN
r-----------------------
g | |
e 1| FP | TN
t | |
Args:
class_symbol (object, optional): the object that is taken as
true positive.
Defaults to None and take the first element of the
'targets' as true or the one that is specified in
constructor.
Returns:
Int: True Negative
"""
i = self._get_class_symbole_index(class_symbol)
# The diagonal of the 'conf_matrix', except the Tap
# is TN
diagonals = np.diag(self.conf_matrix)
return sum([v for k, v in enumerate(diagonals) if k != i])
def accuracy(self):
"""Finds the accuracy of the correctly classified cases.
It is equal to the ratio of the trace divided by total sum.
For 'Binray Classification', it is
(#TP + #TN)/(#TP + #FP + #TN + #FN)
Returns:
float: accuracy of correctly classified cases.
| Prediction
| 0 | 1
------------------------
t | |
a 0| TP | FN
r-----------------------
g | |
e 1| FP | TN
t | |
"""
return np.trace(self.conf_matrix) / np.sum(self.conf_matrix)
def precision(self, class_symbol=None):
"""Finds the precision of the correctly classified cases.
For 'Binray Classification', it is
#TP /(#TP + #FP)
Args:
class_symbol (object, optional): the object that is taken as
true positive.
Defaults to None and take the first element of the
'targets' as true or the one that is specified in
constructor.
Returns:
float: precision of correctly classified cases.
| Prediction
| 0 | 1
------------------------
t | |
a 0| TP | FN
r-----------------------
g | |
e 1| FP | TN
t | |
"""
true_positives = self.tp(class_symbol)
false_positives = self.fp(class_symbol)
if true_positives + false_positives == 0:
return 0
else:
return true_positives / (true_positives + false_positives)
def recall(self, class_symbol=None):
"""Finds the recall of the correctly classified cases.
For 'Binray Classification', it is
#TP /(#TP + #FN)
Args:
class_symbol (object, optional): the object that is taken as
true positive.
Defaults to None and take the first element of the
'targets' as true or the one that is specified in
constructor.
Returns:
float: recall of correctly classified cases.
| Prediction
| 0 | 1
------------------------
t | |
a 0| TP | FN
r-----------------------
g | |
e 1| FP | TN
t | |
"""
true_positives = self.tp(class_symbol)
false_positives = self.fn(class_symbol)
if true_positives + false_positives == 0:
return 0
else:
return true_positives / (true_positives + false_positives)
def sensitivity(self, class_symbol=None):
"""Finds the sensitivity of the correctly classified cases.
For 'Binray Classification', it is
#TP /(#TP + #FN)
Args:
class_symbol (object, optional): the object that is taken as
true positive.
Defaults to None and take the first element of the
'targets' as true or the one that is specified in
constructor.
Returns:
float: sensitivity of correctly classified cases.
| Prediction
| 0 | 1
------------------------
t | |
a 0| TP | FN
r-----------------------
g | |
e 1| FP | TN
t | |
"""
true_positives = self.tp(class_symbol)
false_positives = self.fn(class_symbol)
if true_positives + false_positives == 0:
return 0
else:
return true_positives / (true_positives + false_positives)
def specificity(self, class_symbol=None):
"""Finds the specificity of the correctly classified cases.
For 'Binray Classification', it is
#TN /(#TN + #FP)
Args:
class_symbol (object, optional): the object that is taken as
true positive.
Defaults to None and take the first element of the
'targets' as true or the one that is specified in
constructor.
Returns:
float: specificity of correctly classified cases.
| Prediction
| 0 | 1
------------------------
t | |
a 0| TP | FN
r-----------------------
g | |
e 1| FP | TN
t | |
"""
true_negatives = self.tn(class_symbol)
false_positives = self.fp(class_symbol)
if true_negatives + false_positives == 0:
return 0
else:
return true_negatives / (true_negatives + false_positives)
def f1(self, class_symbol=None):
"""F1 score of correctly classified cases.
f1 = (2 * precision * recall)/ (precision + recall)
Args:
class_symbol (object, optional): the object that is taken as
true positive.
Defaults to None and take the first element of the
'targets' as true or the one that is specified in
constructor.
Returns:
float: f1 score of correctly classified cases.
| Prediction
| 0 | 1
------------------------
t | |
a 0| TP | FN
r-----------------------
g | |
e 1| FP | TN
t | |
"""
return self.f_beta(beta=1, class_symbol=class_symbol)
def matthews_corrcoef(self, class_symbol=None):
"""Matthews correlation coefficient of correctly classified cases.
MCC = (TP*TN - FP*FN)/ (TP+FP)(TP+FN)(TN+FP)(TN+FN)
Args:
class_symbol (object, optional): the object that is taken as
true positive.
Defaults to None and take the first element of the
'targets' as true or the one that is specified in
constructor.
Returns:
float: Matthews correlation coefficient of correctly classified
cases.
| Prediction
| 0 | 1
------------------------
t | |
a 0| TP | FN
r-----------------------
g | |
e 1| FP | TN
t | |
"""
true_positives = self.tp(class_symbol)
true_negatives = self.tn(class_symbol)
false_positives = self.fp(class_symbol)
false_negatives = self.fn(class_symbol)
denom = (
(true_positives + false_positives)
* (true_positives + false_negatives)
* (true_negatives + false_positives)
* (true_negatives + false_negatives)
)
if denom == 0:
return np.inf
else:
return (
true_positives * true_negatives - false_positives * false_negatives
) / denom
def f_beta(self, beta=1.0, class_symbol=None):
"""F-beta score of correctly classified cases.
f-beta =
((1+beta^2) * precision * recall)/ (beta^2 * precision + recall)
Args:
beta (float, optional): Weight factor to control the precision
or recall importance. The higher beta means more recall
importance. Defaults to 1.0.
class_symbol (object, optional): the object that is taken as
true positive.
Defaults to None and take the first element of the
'targets' as true or the one that is specified in
constructor.
Returns:
float: f-beta score of correctly classified cases.
| Prediction
| 0 | 1
------------------------
t | |
a 0| TP | FN
r-----------------------
g | |
e 1| FP | TN
t | |
"""
precision = self.precision(class_symbol)
recall = self.recall(class_symbol)
beta_2 = beta ** 2
if precision + recall == 0:
return 0
else:
return ((1 + beta_2) * precision * recall) / (beta_2 * precision + recall)
| [
"numpy.trace",
"numpy.sum",
"numpy.diag"
] | [((6648, 6673), 'numpy.diag', 'np.diag', (['self.conf_matrix'], {}), '(self.conf_matrix)\n', (6655, 6673), True, 'import numpy as np\n'), ((7382, 7408), 'numpy.trace', 'np.trace', (['self.conf_matrix'], {}), '(self.conf_matrix)\n', (7390, 7408), True, 'import numpy as np\n'), ((7411, 7435), 'numpy.sum', 'np.sum', (['self.conf_matrix'], {}), '(self.conf_matrix)\n', (7417, 7435), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*- #
'''
landsat时间序列数据分析子窗口:主要是进行时间序列数据的分析和处理
具体:1)landsat数据时间序列曲线获取
'''
from PyQt5 import QtCore, QtWidgets
from scipy.optimize import leastsq
import numpy as np
import data_manager as dm
import argrithms as ag
import scipy.io
import matplotlib.pyplot as plt
import matplotlib
import gdal
matplotlib.use("Qt5Agg") # 声明使用QT5
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
class landsatPIFsUI(QtWidgets.QWidget):
'''
目的:选则伪不变特征点--这里我选用的是基于标准差的伪不变特征点选取
输入:*.Mat(裁剪后的数据源)
输出:点
'''
def __init__(self):
super().__init__()
self.doys = ['20171023', '20171108', '20171124', '20171210', '20171226', '20180111', '20180212',
'20180316', '20180417', '20180503', '20180519', '20180604', '20180620', '20180823',
'20180908', '20180924', '20181010', '20181026', '20181213', '20181229', '20190114'] # 时间点 # 获取时间点
#
self.initUI()
#
# 重要变量
self.clipValues = [] # 裁剪区域的波段反射率
self.clipStds = [] # 裁剪区域的标准差
self.clipSlopes = [] # 裁剪区域的斜率
self.pifValues = [] # PIFS的所有点的时序曲线
self.pifDetermine = [] # PIFS判断,全是0 OR 1
def initUI(self):
#
# 初始化窗口
self.setWindowTitle('PIFs Select')
self.setWindowFlags(QtCore.Qt.Dialog)
self.setWindowModality(QtCore.Qt.ApplicationModal)
#
# 设置控件
self.groupbox_FeatureCal = QtWidgets.QGroupBox('Feature Calculation', self)
self.button_inputMatDir = QtWidgets.QPushButton('Input_MatDir', self)
self.lineEdit_inputMatDir = QtWidgets.QLineEdit(self)
self.button_stds = QtWidgets.QPushButton('stds', self)
self.button_slopes = QtWidgets.QPushButton('slopes', self)
#
self.groupbox_pifsExtract = QtWidgets.QGroupBox('PIFs Extraction', self)
self.button_pifs = QtWidgets.QPushButton('PIFs Extract', self)
self.botton_pifsMethods = QtWidgets.QPushButton('PIFs-Methods', self)
self.cmobox_pifsMethod = QtWidgets.QComboBox(self)
self.button_lower = QtWidgets.QPushButton('Lower', self)
self.button_upper = QtWidgets.QPushButton('Upper', self)
self.lineEdit_lower = QtWidgets.QLineEdit(self)
self.lineEdit_upper = QtWidgets.QLineEdit(self)
self.button_export = QtWidgets.QPushButton('Export', self)
self.button_saveMatDir = QtWidgets.QPushButton('Save_MatDir', self)
self.lineEdit_saveMatDir = QtWidgets.QLineEdit(self)
self.view = myView(self)
self.scene = QtWidgets.QGraphicsScene()
#
self.cmobox_mode = QtWidgets.QComboBox(self)
self.button_showImg = QtWidgets.QPushButton('Show', self)
#
self.groupbox_pifsOtherBands = QtWidgets.QGroupBox('PIFs-Other Bands', self)
self.button_inputOtherBandMat = QtWidgets.QPushButton('Input-OB', self)
self.lineEdit_inputOtherBandMat = QtWidgets.QLineEdit(self)
self.button_pifsImport = QtWidgets.QPushButton('PIFs-Import', self)
self.button_exportOtherBand = QtWidgets.QPushButton('Export-Values', self)
self.lineEdit_exportOtherBand = QtWidgets.QLineEdit(self)
# Layout
grid = QtWidgets.QGridLayout(self)
grid_FeatureCal = QtWidgets.QGridLayout(self.groupbox_FeatureCal)
grid_pifsExtract = QtWidgets.QGridLayout(self.groupbox_pifsExtract)
grid_pifsOtherBands = QtWidgets.QGridLayout(self.groupbox_pifsOtherBands)
#
grid.addWidget(self.groupbox_FeatureCal, 0, 0, 2, 4)
grid.addWidget(self.groupbox_pifsExtract, 2, 0, 6, 4)
grid.addWidget(self.view, 0, 4, 10, 8)
grid.addWidget(self.groupbox_pifsOtherBands, 8, 0, 2, 4)
self.view.setFixedWidth(500)
#
grid_FeatureCal.addWidget(self.button_inputMatDir, 0, 0, 1, 1)
grid_FeatureCal.addWidget(self.lineEdit_inputMatDir, 0, 1, 1, 3)
grid_FeatureCal.addWidget(self.button_stds, 1, 2, 1, 1)
grid_FeatureCal.addWidget(self.button_slopes, 1, 3, 1, 1)
#
grid_pifsExtract.addWidget(self.botton_pifsMethods, 0, 0, 1, 1)
grid_pifsExtract.addWidget(self.cmobox_pifsMethod, 0, 1, 1, 3)
grid_pifsExtract.addWidget(self.button_lower, 1, 0, 1, 1)
grid_pifsExtract.addWidget(self.lineEdit_lower, 1, 1, 1, 3)
grid_pifsExtract.addWidget(self.button_upper, 2, 0, 1, 1)
grid_pifsExtract.addWidget(self.lineEdit_upper, 2, 1, 1, 3)
grid_pifsExtract.addWidget(self.button_pifs, 3, 2, 1, 2)
grid_pifsExtract.addWidget(self.button_saveMatDir, 4, 0, 1, 1)
grid_pifsExtract.addWidget(self.lineEdit_saveMatDir, 4, 1, 1, 3)
grid_pifsExtract.addWidget(self.cmobox_mode, 5, 0, 1, 1)
grid_pifsExtract.addWidget(self.button_showImg, 5, 1, 1, 1)
grid_pifsExtract.addWidget(self.button_export, 5, 2, 1, 2)
#
grid_pifsOtherBands.addWidget(self.button_inputOtherBandMat, 0, 0, 1, 1)
grid_pifsOtherBands.addWidget(self.lineEdit_inputOtherBandMat, 0, 1, 1, 3)
grid_pifsOtherBands.addWidget(self.button_pifsImport, 1, 0, 1, 1)
grid_pifsOtherBands.addWidget(self.button_exportOtherBand, 1, 1, 1, 1)
grid_pifsOtherBands.addWidget(self.lineEdit_exportOtherBand, 1, 2, 1, 2)
#
# 初始化
self.cmobox_pifsMethod.addItems(['std', 'slope'])
self.cmobox_mode.addItems(['img-stds', 'img-pifsDerterMined', 'img-slopes'])
self.botton_pifsMethods.setDisabled(True)
self.button_lower.setDisabled(True)
self.button_upper.setDisabled(True)
self.button_pifs.setDisabled(True)
self.button_exportOtherBand.setDisabled(True)
self.button_pifs.setStyleSheet("background-color: blue")
self.button_export.setStyleSheet("background-color: blue")
self.button_pifsImport.setStyleSheet("background-color: blue")
# 槽和函数
self.button_inputMatDir.clicked.connect(self.slot_buttonInputMatDir) # 输入Mat路径
self.button_slopes.clicked.connect(self.slot_buttonSlope) # 对比PIF选择方法
self.button_stds.clicked.connect(self.slot_buttonStd) # 计算研究区域的标准差
self.button_pifs.clicked.connect(self.slot_buttonPifs) # 计算研究区域的PIFs,0 OR 1
self.button_showImg.clicked.connect(self.slot_buttonShowImg) # 显示图像
self.button_export.clicked.connect(self.slot_buttonExport) # 输出图像代表的数据
self.button_saveMatDir.clicked.connect(self.slot_buttonSaveMatDir) # 输入保存mat路径
#
self.button_inputOtherBandMat.clicked.connect(self.slot_buttonInputOtherBandMat) # 输入其他波段的反射率数据
self.button_pifsImport.clicked.connect(self.slot_buttonPIFsImport) # PIFs提取
self.button_exportOtherBand.clicked.connect(self.slot_buttonExportOtherBandsValues) # PIFs提取波段数据
def slot_buttonInputMatDir(self):
#
# 添加路径
open_filename = QtWidgets.QFileDialog.getOpenFileName(filter='*.mat')[0]
self.lineEdit_inputMatDir.setText(open_filename)
if 'S1000' in self.lineEdit_inputMatDir.text():
self.clipValues = scipy.io.loadmat(self.lineEdit_inputMatDir.text())['ref_Values'] # eg[21,1001,1001]
print(np.shape(np.array(self.clipValues)))
self.button_pifs.setDisabled(False)
def slot_buttonSlope(self):
#
self.cmobox_mode.setCurrentIndex(2)
self.cmobox_pifsMethod.setCurrentIndex(1)
if 'slope' in self.lineEdit_inputMatDir.text():
self.clipSlopes = scipy.io.loadmat(self.lineEdit_inputMatDir.text())['slopes']
else:
self.clipValues = scipy.io.loadmat(self.lineEdit_inputMatDir.text())['ref_Values'] # eg[21,1001,1001]
print(np.shape(np.array(self.clipValues)))
#
# 排序后最小二乘法估算斜率
arrays_clip = np.array(self.clipValues).astype(float)
for i in range(np.shape(arrays_clip)[1]):
col = []
for j in range(np.shape(arrays_clip)[2]):
#
Yi = np.sort(arrays_clip[:, i, j])
Xi = range(len(Yi))
p0 = [1, 20]
Para = leastsq(self.error, p0, args=(Xi, Yi))
slope, _ = Para[0] # 最小二乘法对排序累加后的DN值进行计算
col.append(slope)
self.clipSlopes.append(col)
#
print(np.shape(np.array(self.clipSlopes))) # [1001,1001]
print(np.percentile(self.clipSlopes, 1), np.percentile(self.clipSlopes, 99))
pass
def func(self, p, x): ##需要拟合的函数func :指定函数的形状
k, b = p
return k * x + b
def error(self, p, x, y): ##偏差函数:x,y都是列表:这里的x,y更上面的Xi,Yi中是一一对应的
return self.func(p, x) - y
def slot_buttonStd(self):
#
# 初始化
self.clipStds = []
self.cmobox_mode.setCurrentIndex(0)
self.cmobox_pifsMethod.setCurrentIndex(0)
#
# 输入数据
if 'std' in self.lineEdit_inputMatDir.text():
self.clipStds = scipy.io.loadmat(self.lineEdit_inputMatDir.text())['stds']
else:
self.clipValues = scipy.io.loadmat(self.lineEdit_inputMatDir.text())['ref_Values'] # eg[21,1001,1001]
print(np.shape(np.array(self.clipValues)))
#
# 计算DN值的方差,[0,1] * 255 -> [0,255]
arrays_clip = np.array(self.clipValues).astype(float)
for i in range(np.shape(arrays_clip)[1]):
col = []
for j in range(np.shape(arrays_clip)[2]):
std = np.round(np.std(255 * arrays_clip[:, i, j]), 5)
col.append(std)
self.clipStds.append(col)
#
print(np.shape(np.array(self.clipStds))) # [1001,1001]
print(np.percentile(self.clipStds, 1), np.percentile(self.clipStds, 99))
def slot_buttonPifs(self):
'''
目的:获取PIFS的特征矩阵和时序曲线值
'''
#
self.pifValues = []
self.pifDetermine = []
self.cmobox_mode.setCurrentIndex(1)
flag = 0
#
if 'PIF' in self.lineEdit_inputMatDir.text():
self.pifValues = scipy.io.loadmat(self.lineEdit_inputMatDir.text())['PIFs_Refvalues']
self.pifDetermine = scipy.io.loadmat(self.lineEdit_inputMatDir.text())['PIFs_determine']
#
else:
#
arrays_values = np.array(self.clipValues).astype(float)
#
# 根据方差来进行计算
if self.cmobox_pifsMethod.currentIndex() == 0:
#
# 设定阈值
lower_limit = float(self.lineEdit_lower.text())
upper_limit = float(self.lineEdit_upper.text())
#
arrays_stds = np.array(self.clipStds).astype(float)
for i in range(np.shape(arrays_stds)[0]):
col1 = []
for j in range(np.shape(arrays_stds)[1]):
# 判定
if arrays_stds[i, j] < upper_limit and arrays_stds[i, j] > lower_limit and arrays_values[
12, i, j] > 0.1 and np.max(arrays_values[:, i, j]) < 0.4:
flag = 1
row = []
row.extend(arrays_values[:, i, j])
row.extend([(i + 2572), (j + 2108)])
self.pifValues.append(row)
else:
flag = 0
#
col1.append(flag)
self.pifDetermine.append(col1)
#
# 根据论文'A Long Time-Series Radiometric Normalization Method for Landsat Images'里提出的方法
if self.cmobox_pifsMethod.currentIndex() == 1:
#
# 设定阈值
lower_limit = float(self.lineEdit_lower.text())
upper_limit = float(self.lineEdit_upper.text())
#
arrays_slopes = np.array(self.clipSlopes).astype(float)
for i in range(np.shape(arrays_slopes)[0]):
col1 = []
for j in range(np.shape(arrays_slopes)[1]):
#
if arrays_slopes[i, j] <= upper_limit and arrays_slopes[i, j] >= lower_limit and arrays_values[
12, i, j] > 0.1 and np.max(arrays_values[:, i, j]) < 0.4:
flag = 1
row = []
row.extend(arrays_values[:, i, j])
row.extend([(i + 2572), (j + 2108)])
self.pifValues.append(row)
else:
flag = 0
col1.append(flag)
self.pifDetermine.append(col1)
#
print(np.shape(self.pifValues))
pass
def slot_buttonShowImg(self):
'''
目的:结果可视化
'''
self.scene = QtWidgets.QGraphicsScene()
#
if self.cmobox_mode.currentIndex() == 0:
self.img = myFigure(width=3, height=3)
pos = plt.imshow(self.clipStds, cmap='jet')
plt.xticks(fontsize=3)
plt.yticks(fontsize=3)
cb = plt.colorbar(pos, shrink=0.8)
cb.ax.tick_params(labelsize=3)
self.scene.addWidget(self.img)
self.view.setScene(self.scene)
#
if self.cmobox_mode.currentIndex() == 1:
self.img = myFigure(width=3, height=3)
pos = plt.imshow(self.pifDetermine, cmap='binary')
plt.xticks(fontsize=3)
plt.yticks(fontsize=3)
cb = plt.colorbar(pos, shrink=0.8)
cb.ax.tick_params(labelsize=3)
self.scene.addWidget(self.img)
self.view.setScene(self.scene)
#
if self.cmobox_mode.currentIndex() == 2:
self.img = myFigure(width=3, height=3)
pos = plt.imshow(self.clipSlopes, cmap='jet')
plt.xticks(fontsize=3)
plt.yticks(fontsize=3)
cb = plt.colorbar(pos, shrink=0.8)
cb.ax.tick_params(labelsize=3)
self.scene.addWidget(self.img)
self.view.setScene(self.scene)
def slot_buttonExport(self):
'''
目的:输出结果
'''
if self.lineEdit_saveMatDir.text():
if self.cmobox_mode.currentIndex() == 0:
scipy.io.savemat(self.lineEdit_saveMatDir.text(), {'stds': self.clipStds})
if self.cmobox_mode.currentIndex() == 1:
scipy.io.savemat(self.lineEdit_saveMatDir.text(),
{'PIFs_determine': self.pifDetermine, 'PIFs_Refvalues': self.pifValues})
if self.cmobox_mode.currentIndex() == 2:
scipy.io.savemat(self.lineEdit_saveMatDir.text(), {'slopes': self.clipSlopes})
pass
def slot_buttonSaveMatDir(self):
'''
目的:输入储存的路径
'''
matSaveDir = QtWidgets.QFileDialog.getSaveFileName(self, 'Save PIFs MatFile', './mat/', '*.mat')[0]
self.lineEdit_saveMatDir.setText(matSaveDir)
pass
def slot_buttonInputOtherBandMat(self):
'''
获取其他波段的PIFs数据
'''
#
# 添加路径
open_filename = QtWidgets.QFileDialog.getOpenFileName(filter='*.mat')[0]
self.lineEdit_inputOtherBandMat.setText(open_filename)
if 'S1000' in self.lineEdit_inputOtherBandMat.text():
self.clipValues = scipy.io.loadmat(self.lineEdit_inputOtherBandMat.text())['ref_Values'] # eg[21,1001,1001]
print(np.shape(np.array(self.clipValues)))
self.button_exportOtherBand.setDisabled(False)
pass
def slot_buttonPIFsImport(self):
#
open_filename = QtWidgets.QFileDialog.getOpenFileName(filter='*.mat')[0]
self.pifDetermine = scipy.io.loadmat(open_filename)['PIFs_determine']
#
print(np.shape(np.array(self.pifDetermine)))
def slot_buttonExportOtherBandsValues(self):
#
pifSaveDir = QtWidgets.QFileDialog.getSaveFileName(self, 'OtherBands-PIFs', './mat/', '*.mat')[0]
self.lineEdit_exportOtherBand.setText(str(pifSaveDir))
#
shape = np.shape(np.array(self.pifDetermine))
array_pifDetermine = np.array(self.pifDetermine)
array_clipValues = np.array(self.clipValues) # np.array尽量放在循环外面,防止每次循环都要调用内存
pifSamples = []
for i in range(shape[0]):
for j in range(shape[1]):
if array_pifDetermine[i, j] == 1:
pifSamples.append(array_clipValues[:, i, j])
#
self.pifValues = pifSamples
print(np.shape(np.array(self.pifValues)))
#
scipy.io.savemat(pifSaveDir, {'PIFs_determine': self.pifDetermine, 'PIFs_Refvalues': self.pifValues})
pass
class landsatTSRRNormalizationUI(QtWidgets.QWidget):
'''
目的:进行时序遥感影像的相对校正
输入:1)时序反射率MAT数据;2)PIFS点的MAT数据
'''
def __init__(self):
super().__init__()
self.doys = ['20171023', '20171108', '20171124', '20171210', '20171226', '20180111', '20180212',
'20180316', '20180417', '20180503', '20180519', '20180604', '20180620', '20180823',
'20180908', '20180924', '20181010', '20181026', '20181213', '20181229', '20190114'] # 获取时间点
#
self.rData = [] # 仅仅是为了作图!!!
self.gData = []
self.bData = []
#
# Bp预测值
self.clipValues = [] # 输入的裁剪区的值
#
# Bp训练值
self.pifsValues = [] # 输入文件中PIFs的值[训练值]
self.pifsCorrValues = [] # PIFs校正后的值
#
# Bp测试值
self.pifsTestValues = [] # 输入测试的PIFs值
self.pifsTestCorrValues = [] # 模型运行后的PIFs结果
#
self.correctedValues = [] # 校正后的数据
self.keys = [] # 模型的参数
#
self.pifStds = [] # 原始数据的标准差
self.sortMap = [] # 升序排序映射
self.corrOrder = [] # 满足最小期望的映射关系
#
# BP 神经网络的必要参数
self.ih_w = []
self.ho_w = []
self.hide_b0 = []
self.out_b0 = []
#
self.initUi()
self.layoutUI()
self.single_slot()
def initUi(self):
#
self.setWindowTitle('NMAG METHOD')
self.setWindowModality(QtCore.Qt.ApplicationModal)
#
# 设置组件
self.groupbox_inputMat = QtWidgets.QGroupBox('Input-Mat', self)
self.groupbox_MethodChoose = QtWidgets.QGroupBox('RRN Method Choose', self)
self.groupbox_Train = QtWidgets.QGroupBox('Train', self)
self.groupbox_Test = QtWidgets.QGroupBox('Test', self)
self.groupbox_Pred = QtWidgets.QGroupBox('Pred-ClipValues', self)
# 设置控件
self.button_inputRefMatDir = QtWidgets.QPushButton('Input_ClipValues', self)
self.button_inputPIFsMatDir = QtWidgets.QPushButton('Input_PIFsDir', self)
self.lineEdit_inputRefMatDir = QtWidgets.QLineEdit(self)
self.lineEdit_inputPIFsMatDir = QtWidgets.QLineEdit(self)
self.button_pccs = QtWidgets.QPushButton('pcc')
#
# rrnChoose
self.button_rrnMethod = QtWidgets.QPushButton('Methods', self)
self.cmobox_rrnMethod = QtWidgets.QComboBox(self)
self.button_initValue = QtWidgets.QPushButton('InitValue', self)
self.cmobox_initValue = QtWidgets.QComboBox(self)
self.button_outydValue = QtWidgets.QPushButton('out_yd', self)
self.cmobox_outydValue = QtWidgets.QComboBox(self)
# BP-train
self.button_trainSample = QtWidgets.QPushButton('TrainSam', self)
self.lineEdit_trainSample = QtWidgets.QLineEdit(self)
self.table_trainModelKey = QtWidgets.QTableWidget(self)
self.view = myView(self)
self.button_keySave = QtWidgets.QPushButton('KeySave', self)
self.button_trainBP = QtWidgets.QPushButton('Train-BP', self)
self.lineEdit_corrOrder = QtWidgets.QLineEdit(self)
self.lineEdit_imgProcess = QtWidgets.QLineEdit(self)
self.button_imgProcessMap = QtWidgets.QPushButton('拟合过程', self)
self.lineEdit_KeySaveDir = QtWidgets.QLineEdit(self)
# BP-test
self.button_contrastMethod = QtWidgets.QPushButton('Method', self)
self.cmobox_contrastMethod = QtWidgets.QComboBox(self)
self.button_testSample = QtWidgets.QPushButton('TestSam', self)
self.lineEdit_testSample = QtWidgets.QLineEdit(self)
self.button_testImportKeys = QtWidgets.QPushButton('Import Keys', self)
self.button_testBp = QtWidgets.QPushButton('Test-Bp', self)
self.button_rmseCal = QtWidgets.QPushButton('RMSE', self)
self.lineEdit_rmseCal = QtWidgets.QLineEdit(self)
# BP-Pred
self.button_predSample = QtWidgets.QPushButton('PredSam', self)
self.lineEdit_predSample = QtWidgets.QLineEdit(self)
self.button_importKeys = QtWidgets.QPushButton('Import Keys', self)
self.button_predBP = QtWidgets.QPushButton('Pred-NMAG', self)
self.button_bpMatSaveDir = QtWidgets.QPushButton('NMAG-SaveDir', self)
self.lineEdit_bpMatSaveDir = QtWidgets.QLineEdit(self)
self.button_startBP = QtWidgets.QPushButton('Save-NMAG', self)
# 植被指数补充结果
self.button_viSaveDir = QtWidgets.QPushButton('VI-SaveDir', self)
self.lineEdit_viSaveDir = QtWidgets.QLineEdit(self)
self.button_viCal = QtWidgets.QPushButton('VI-Cal', self)
# RGB 可视化结果
self.groupbox_mat2rgb = QtWidgets.QGroupBox('mat-RGB-Combination', self)
self.button_rMatInput = QtWidgets.QPushButton('R-Mat', self)
self.button_gMatInput = QtWidgets.QPushButton('G-Mat', self)
self.button_bMatInput = QtWidgets.QPushButton('B-Mat', self)
self.lineEdit_rMatDir = QtWidgets.QLineEdit(self)
self.lineEdit_gMatDir = QtWidgets.QLineEdit(self)
self.lineEdit_bMatDir = QtWidgets.QLineEdit(self)
self.button_matMode = QtWidgets.QPushButton('mat-Mode', self) # 合成看是校正前还是校正后
self.cmobox_matMode = QtWidgets.QComboBox(self)
self.button_rgbSaveDir = QtWidgets.QPushButton('RGB-SaveDir', self)
self.lineEdit_rgbSaveDir = QtWidgets.QLineEdit(self)
self.button_startConvert = QtWidgets.QPushButton('Start-Convert', self)
# 初始化
self.lineEdit_inputRefMatDir.setText('./mat/B5测试样/B5S1000.mat')
self.lineEdit_inputPIFsMatDir.setText('./mat/B5测试样/PIFs-slopes-改.mat')
self.cmobox_rrnMethod.addItems(['NMAG'])
self.cmobox_initValue.addItems(['init', 'maxStds', 'maxMeans'])
self.cmobox_outydValue.addItems(['mean', 'minSSE'])
#
self.cmobox_contrastMethod.addItems(['NMAG'])
self.lineEdit_trainSample.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_testSample.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_predSample.setAlignment(QtCore.Qt.AlignCenter)
self.cmobox_matMode.addItems(['校正前(ref_Values)', '校正后(BP_CorrValues)'])
#
self.table_trainModelKey.setRowCount(4)
self.table_trainModelKey.setColumnCount(1)
self.table_trainModelKey.setHorizontalHeaderLabels(['Key'])
self.table_trainModelKey.setVerticalHeaderLabels(['BpInNode', 'HideLyNum', 'BpHideNode', 'BpOutNode'])
self.table_trainModelKey.setItem(0, 0, QtWidgets.QTableWidgetItem(str('1')))
self.table_trainModelKey.setItem(1, 0, QtWidgets.QTableWidgetItem(str('1')))
self.table_trainModelKey.setItem(2, 0, QtWidgets.QTableWidgetItem(str('10')))
self.table_trainModelKey.setItem(3, 0, QtWidgets.QTableWidgetItem(str('1')))
self.table_trainModelKey.setDisabled(True)
#
self.button_initValue.setDisabled(True)
self.cmobox_initValue.setDisabled(True)
self.button_outydValue.setDisabled(True)
self.cmobox_outydValue.setDisabled(True)
#
self.button_trainBP.setDisabled(True)
self.button_keySave.setDisabled(True)
self.button_imgProcessMap.setDisabled(True)
self.button_testBp.setDisabled(True)
self.button_predBP.setDisabled(True)
self.button_matMode.setDisabled(True)
self.button_startConvert.setDisabled(True)
self.button_viCal.setDisabled(True)
#
self.button_trainBP.setStyleSheet("background-color: blue")
self.button_testBp.setStyleSheet("background-color: blue")
self.button_predBP.setStyleSheet("background-color: blue")
self.button_startConvert.setStyleSheet("background-color: blue")
self.button_viCal.setStyleSheet("background-color: blue")
def layoutUI(self):
#
grid = QtWidgets.QGridLayout(self)
grid.addWidget(self.groupbox_inputMat, 0, 0, 2, 4)
grid.addWidget(self.groupbox_MethodChoose, 2, 0, 2, 4)
grid.addWidget(self.groupbox_Train, 4, 0, 5, 4)
grid.addWidget(self.groupbox_Test, 9, 0, 5, 4)
grid.addWidget(self.groupbox_Pred, 14, 0, 4, 4)
grid.addWidget(self.groupbox_mat2rgb, 0, 6, 6, 4)
grid.addWidget(self.view, 6, 6, 12 ,4)
self.view.setFixedWidth(500)
self.view.setFixedHeight(400)
self.groupbox_Test.setFixedWidth(500)
#
grid_inputMat = QtWidgets.QGridLayout(self.groupbox_inputMat)
grid_rrnChoose = QtWidgets.QGridLayout(self.groupbox_MethodChoose)
grid_train = QtWidgets.QGridLayout(self.groupbox_Train)
grid_test = QtWidgets.QGridLayout(self.groupbox_Test)
grid_pred = QtWidgets.QGridLayout(self.groupbox_Pred)
grid_mat2rgb = QtWidgets.QGridLayout(self.groupbox_mat2rgb)
#
grid_inputMat.addWidget(self.button_inputRefMatDir, 0, 0, 1, 1) # 输入文本框模式
grid_inputMat.addWidget(self.lineEdit_inputRefMatDir, 0, 1, 1, 5)
grid_inputMat.addWidget(self.button_inputPIFsMatDir, 1, 0, 1, 1)
grid_inputMat.addWidget(self.lineEdit_inputPIFsMatDir, 1, 1, 1, 5)
#
grid_rrnChoose.addWidget(self.button_rrnMethod, 0, 0, 1, 1) # 方法选择模块
grid_rrnChoose.addWidget(self.cmobox_rrnMethod, 0, 1, 1, 3)
grid_rrnChoose.addWidget(self.button_initValue, 1, 0, 1, 1)
grid_rrnChoose.addWidget(self.cmobox_initValue, 1, 1, 1, 1)
grid_rrnChoose.addWidget(self.button_outydValue, 1, 2, 1, 1)
grid_rrnChoose.addWidget(self.cmobox_outydValue, 1, 3, 1, 1)
#
grid_train.addWidget(self.button_trainSample, 0, 0, 1, 1) # BP训练模块
grid_train.addWidget(self.lineEdit_trainSample, 0, 1, 1, 1)
grid_train.addWidget(self.button_trainBP, 1, 0, 1, 1)
grid_train.addWidget(self.button_keySave, 1, 1, 1, 1)
grid_train.addWidget(self.lineEdit_KeySaveDir, 2, 0, 1, 2)
grid_train.addWidget(self.button_imgProcessMap, 3, 0, 1, 1)
grid_train.addWidget(self.lineEdit_imgProcess, 3, 1, 1, 1)
grid_train.addWidget(self.lineEdit_corrOrder, 4, 0, 1, 4)
grid_train.addWidget(self.table_trainModelKey, 0, 2, 4, 2)
#
grid_pred.addWidget(self.button_predSample, 0, 0, 1, 1) # BP预测模块
grid_pred.addWidget(self.lineEdit_predSample, 0, 1, 1, 1)
grid_pred.addWidget(self.button_importKeys, 0, 2, 1, 1)
grid_pred.addWidget(self.button_predBP, 0, 3, 1, 1)
grid_pred.addWidget(self.button_bpMatSaveDir, 1, 0, 1, 1)
grid_pred.addWidget(self.lineEdit_bpMatSaveDir, 1, 1, 1, 3)
grid_pred.addWidget(self.button_startBP, 2, 3, 1, 1)
#
grid_test.addWidget(self.button_contrastMethod, 0, 0, 1, 1) # BP测试模块
grid_test.addWidget(self.cmobox_contrastMethod, 0, 1, 1, 3)
grid_test.addWidget(self.button_testSample, 1, 0, 1, 1)
grid_test.addWidget(self.lineEdit_testSample, 1, 1, 1, 1)
grid_test.addWidget(self.button_testImportKeys, 1, 2, 1, 1)
grid_test.addWidget(self.button_testBp, 1, 3, 1, 1)
grid_test.addWidget(self.button_rmseCal, 2, 0, 1, 1)
grid_test.addWidget(self.lineEdit_rmseCal, 2, 1, 1, 3)
#
grid_mat2rgb.addWidget(self.button_matMode, 0, 0, 1, 1)
grid_mat2rgb.addWidget(self.cmobox_matMode, 0, 1, 1, 1)
grid_mat2rgb.addWidget(self.button_rMatInput, 1, 0, 1, 1)
grid_mat2rgb.addWidget(self.button_gMatInput, 2, 0, 1, 1)
grid_mat2rgb.addWidget(self.button_bMatInput, 3, 0, 1, 1)
grid_mat2rgb.addWidget(self.lineEdit_rMatDir, 1, 1, 1, 3)
grid_mat2rgb.addWidget(self.lineEdit_gMatDir, 2, 1, 1, 3)
grid_mat2rgb.addWidget(self.lineEdit_bMatDir, 3, 1, 1, 3)
grid_mat2rgb.addWidget(self.button_rgbSaveDir, 4, 0, 1, 1)
grid_mat2rgb.addWidget(self.lineEdit_rgbSaveDir, 4, 1, 1, 2)
grid_mat2rgb.addWidget(self.button_startConvert, 4, 3, 1, 1)
grid_mat2rgb.addWidget(self.button_viSaveDir, 5, 0, 1, 1)
grid_mat2rgb.addWidget(self.lineEdit_viSaveDir, 5, 1, 1, 2)
grid_mat2rgb.addWidget(self.button_viCal, 5, 3, 1, 1)
pass
def single_slot(self):
self.button_inputRefMatDir.clicked.connect(self.slot_buttonInputRefDir)
self.button_inputPIFsMatDir.clicked.connect(self.slot_buttonInputPIFsDir)
#
self.button_trainSample.clicked.connect(self.slot_buttonTrainSample) # 输入训练样本
self.button_trainBP.clicked.connect(self.slot_buttonTrainBp) # 模型训练
self.button_keySave.clicked.connect(self.slot_buttonKeySave) # 保存权重和阈值
self.button_imgProcessMap.clicked.connect(self.slot_buttonImgProcessMap) # 对拟合过程进行监测
#
self.button_testSample.clicked.connect(self.slot_buttonTestSample) # 输入测试样本
self.button_testImportKeys.clicked.connect(self.slot_buttonImportKeys) # 输入参数Bp
self.button_testBp.clicked.connect(self.slot_buttonTestBp) # 进行测试BP样本集的输出
self.button_rmseCal.clicked.connect(self.slot_buttonRMSECal) # RMSE计算
#
self.button_predSample.clicked.connect(self.slot_buttonPredSample) # 输入预测样本
self.button_importKeys.clicked.connect(self.slot_buttonImportKeys) # 输入参数【同test】
self.button_predBP.clicked.connect(self.slot_buttonPredBp) # 进行预测BP样本集的输出
self.button_startBP.clicked.connect(self.slot_buttonBpStartDir) # 保存结果
self.button_bpMatSaveDir.clicked.connect(self.slot_buttonBpMatSaveDir)
#
self.button_viSaveDir.clicked.connect(self.slot_buttonVIsaveDir) # 输入保存NDVI的路径
self.button_viCal.clicked.connect(self.slot_buttonVICal) # 保存NDVI的数值
#
self.button_rMatInput.clicked.connect(self.slot_buttonRMatInput) # 输入各个波段的Mat数据
self.button_gMatInput.clicked.connect(self.slot_buttonGMatInput)
self.button_bMatInput.clicked.connect(self.slot_buttonBMatInput)
self.button_rgbSaveDir.clicked.connect(self.slot_buttonRgbSaveDir) # 输入保存RGB的路径
self.button_startConvert.clicked.connect(self.slot_buttonStartConvert) # 开始进行转换
pass
def slot_buttonInputRefDir(self):
#
# 输入裁剪后的时间序列反射率数据
open_filename = QtWidgets.QFileDialog.getOpenFileName(filter='*.mat')[0]
self.lineEdit_inputRefMatDir.setText(open_filename)
pass
def slot_buttonInputPIFsDir(self):
#
open_filename = QtWidgets.QFileDialog.getOpenFileName(filter='*.mat')[0]
self.lineEdit_inputPIFsMatDir.setText(open_filename)
def slot_buttonLsmCorredSaveDir(self):
#
corredSaveDir = QtWidgets.QFileDialog.getSaveFileName(self, '校正后波段反射率数据', './mat/', '*.mat')[0]
self.lineEdit_lsmCorredSaveDir.setText(corredSaveDir)
'''
NMAG 槽函数
'''
def slot_buttonTrainSample(self):
self.pifsValues = scipy.io.loadmat(self.lineEdit_inputPIFsMatDir.text())['PIFs_Refvalues'] # [num,23]
print(np.shape(np.array(self.pifsValues)))
self.lineEdit_trainSample.setText(str(np.shape(np.array(self.pifsValues))[0]))
self.button_trainBP.setDisabled(False)
self.button_keySave.setDisabled(False)
self.table_trainModelKey.setDisabled(False)
def slot_buttonTrainBp(self):
#
# 变量初始化
bpInNode = int(self.table_trainModelKey.item(0, 0).text())
bpHideNode = int(self.table_trainModelKey.item(2, 0).text())
bpOutNode = int(self.table_trainModelKey.item(3, 0).text())
#
self.ih_w = []
self.ho_w = []
self.hide_b0 = []
self.out_b0 = []
#
# BP简单
# self.lossFig = myFigure()
# BP + 贪婪算法 (NMAG)
if self.cmobox_rrnMethod.currentIndex() == 0:
#
# 初始化
m_Outsample = []
self.corrOrder = [] # 映射
self.pifsCorrValues = []
#
array_ih_w = np.zeros([len(self.doys), bpInNode, bpHideNode])
array_ho_w = np.zeros([len(self.doys), bpHideNode, bpOutNode])
array_hide_b0 = np.zeros([len(self.doys), bpHideNode])
array_out_b0 = np.zeros([len(self.doys), bpOutNode])
#
# 选定初始影像
# 1.添加初始时间20171023
self.corrOrder.append(0)
#
m_Outsample.append(np.array(self.pifsValues)[:, self.corrOrder[0]]) # 起始参考值
#
print('First Map is : %s' % self.doys[int(self.corrOrder[0])])
print(np.array(m_Outsample)[0, :])
#
for i in range(1, len(self.doys)):
#
# 获取最小期望值的index
rmse_sum = np.zeros([len(self.doys)])
array_Outsample = np.array(m_Outsample) # 预测值集合的数组
for j in range(len(self.doys)):
if j in self.corrOrder:
rmse_sum[j] = 99999
else:
#
sum = 0.0
for p in range(np.shape(array_Outsample)[0]):
z0 = np.array(self.pifsValues)[:, j] - array_Outsample[p, :] # xi - f(xj)
sum += np.sum(z0 * z0) # sum((xi-f(xj))*(xi-(f(xj)))
rmse_sum[j] = sum
#
index = np.argsort(rmse_sum)[0]
print('\nNow input %s data' % self.doys[index])
self.corrOrder.append(index)
#
# 输入层
m_Insample = np.array(self.pifsValues)[:, index] # 待校正-输入值
print('Insam:', m_Insample)
#
train_bp = ag.bpNet(bpInNode, bpHideNode, bpOutNode, imgNum=i, Greedy=True) ## 直接构造损失函数,进行梯度下降
train_bp.bpInitNetFunc()
#
times = 0
err = []
err_time = []
while train_bp.totalErr > 0.0001 and times < 1000:
times += 1
train_bp.bpNetTrainFunc(m_Insample, m_Outsample, imgNum=i, Greedy=True)
if (times + 1) % 10 == 0:
print('Doys %s BP %5d DT:%10.5f\n' % (self.doys[index], (times + 1), train_bp.totalErr))
err.append(train_bp.totalErr)
err_time.append(times + 1)
#
# 绘制损失函数曲线
plt.plot(err_time, err)
#
# 储存误差矩阵
scipy.io.savemat(f'./mat/{self.doys[index]}_HideNode_{bpHideNode}.mat',{'err_time':err_time,'error':err})
#
# 加入计算结果
#
corrValue = []
for h in range(np.shape(np.array(m_Insample))[0]):
train_bp.bpNetRecognizeFunc(m_Insample[h])
corrValue.extend(train_bp.out_y0.tolist())
#
print('out_y0:', np.array(corrValue))
m_Outsample.append(corrValue) # 添加预测值作为参考值,保证局部最优
#
array_ih_w[index, :, :] = train_bp.ih_w # 期望结果:[21,1,10]
array_ho_w[index, :, :] = train_bp.ho_w # [21,10,1]
array_hide_b0[index, :] = train_bp.hide_b0 # [21,10]
array_out_b0[index, :] = train_bp.out_b0 # [21,1]
#
# 保存变量
self.ih_w = array_ih_w.tolist()
self.ho_w = array_ho_w.tolist()
self.hide_b0 = array_hide_b0.tolist()
self.out_b0 = array_out_b0.tolist()
self.pifsCorrValues = m_Outsample
print(np.shape(np.array(m_Outsample)))
print(self.corrOrder)
pass
#
# 显示图像
xlabel = [10, 50, 100, 300, 500, 1000]
plt.xticks(xlabel, fontsize=5,rotation=45)
ylabel = [0, 1, 2, 3,4,5]
plt.yticks(ylabel, fontsize=5)
self.lossFig.ax.set_title('loss')
self.scence = QtWidgets.QGraphicsScene()
self.scence.addWidget(self.lossFig)
self.view.setScene(self.scence)
def slot_buttonKeySave(self):
#
# 保存模型参数
#
saveDir = QtWidgets.QFileDialog.getSaveFileName(self, 'BPNet', './mat/', '*.mat')[0]
self.lineEdit_KeySaveDir.setText(saveDir)
#
if saveDir:
if self.cmobox_rrnMethod.currentIndex() == 1: # 针对两景遥感影像而言
scipy.io.savemat(saveDir,
{'ih_w': self.ih_w, 'ho_w': self.ho_w,
'hide_b0': self.hide_b0, 'out_b0': self.out_b0})
if self.cmobox_rrnMethod.currentIndex() == 3 or self.cmobox_rrnMethod.currentIndex() == 4 or self.cmobox_rrnMethod.currentIndex() == 5:
scipy.io.savemat(saveDir,
{'ih_w': self.ih_w, 'ho_w': self.ho_w,
'hide_b0': self.hide_b0, 'out_b0': self.out_b0, 'corrOrder': self.corrOrder,
'pifsCorrValues': self.pifsCorrValues})
else:
print('Wrong Input!')
def slot_buttonImgProcessMap(self): # 过程监视
#
bpInNode = int(self.table_trainModelKey.item(0, 0).text())
bpHideNode = int(self.table_trainModelKey.item(2, 0).text())
bpOutNode = int(self.table_trainModelKey.item(3, 0).text())
#
# 获取inSam
index = int(self.lineEdit_imgProcess.text()) # 横坐标Xi
inSam = np.array(self.pifsValues)[:, index]
#
# 获取out_y0
train_bp = ag.bpNet(bpInNode, bpHideNode, bpOutNode)
train_bp.bpInitNetFunc()
#
if self.cmobox_rrnMethod.currentIndex() == 3:
tt = index
#
out_yd = np.array(self.pifsValues)[:, 0] # 真值为初始值
outSam = []
train_bp.ih_w = np.array(self.ih_w[tt]).astype(float) # 定参数
train_bp.ho_w = np.array(self.ho_w[tt]).astype(float)
train_bp.out_b0 = np.array(self.out_b0[tt]).astype(float)
train_bp.hide_b0 = np.array(self.hide_b0[tt]).astype(float)
m_Insample = np.array(self.pifsValues)[:, index]
for h in range(np.shape(np.array(m_Insample))[0]):
train_bp.bpNetRecognizeFunc(m_Insample[h])
outSam.extend(train_bp.out_y0.tolist())
#
print('DOY:', self.doys[index])
print('Insam:', np.array(inSam))
print('out_yd:', np.array(out_yd))
print('out_y0:', np.array(outSam))
if self.cmobox_rrnMethod.currentIndex() == 5:
if self.corrOrder:
corrOrder = self.corrOrder
else:
corrOrder = [0, 1, 6, 17, 20, 7, 16, 2, 15, 5, 3, 4, 19, 18, 14, 9, 13, 8, 11, 12, 10]
self.lineEdit_corrOrder.setText(str(corrOrder))
#
m_Outsample = []
m_Outsample.append(np.array(self.pifsValues)[:, 0])
for i in range(1, len(corrOrder)):
#
# 获取out_yd:
if index == corrOrder[i]:
out_yd = np.mean(m_Outsample, axis=0) # 真值为均值
tt = corrOrder[i]
outSam = []
train_bp.ih_w = np.array(self.ih_w[tt]).astype(float) # 定参数
train_bp.ho_w = np.array(self.ho_w[tt]).astype(float)
train_bp.out_b0 = np.array(self.out_b0[tt]).astype(float)
train_bp.hide_b0 = np.array(self.hide_b0[tt]).astype(float)
m_Insample = np.array(self.pifsValues)[:, tt]
for h in range(np.shape(np.array(m_Insample))[0]):
train_bp.bpNetRecognizeFunc(m_Insample[h])
outSam.extend(train_bp.out_y0.tolist()) # 此处必须要加tolist,不然结果会随时变化
##
print('DOY:', self.doys[index])
print('Insam:', np.array(inSam))
print('out_yd:', np.array(out_yd))
print('out_y0:', np.array(outSam))
#
# lsm实际测试
p0 = [1, 20]
keys = leastsq(self.error, p0, args=(m_Insample, out_yd))[0]
#
outSam_lsm = np.round(m_Insample * keys[0] + keys[1], 5)
#
erro = np.sum((outSam_lsm - out_yd) * (outSam_lsm - out_yd)) / 2.0
print(erro)
break
#
tt = corrOrder[i]
corrValue = []
train_bp.ih_w = np.array(self.ih_w[tt]).astype(float) # 定参数
train_bp.ho_w = np.array(self.ho_w[tt]).astype(float)
train_bp.out_b0 = np.array(self.out_b0[tt]).astype(float)
train_bp.hide_b0 = np.array(self.hide_b0[tt]).astype(float)
m_Insample = np.array(self.pifsValues)[:, tt]
for h in range(np.shape(np.array(m_Insample))[0]):
train_bp.bpNetRecognizeFunc(m_Insample[h])
corrValue.extend(train_bp.out_y0.tolist()) # 此处必须要加tolist,不然结果会随时变化
#
m_Outsample.append(corrValue) # 添加预测值作为参考值,保证局部最优
#
# 画图
#
self.progressFig = myFigure()
m = []
m.append(inSam)
m.append(out_yd)
m.append(outSam)
if self.cmobox_rrnMethod.currentIndex() == 5:
m.append(outSam_lsm)
index222 = np.lexsort([np.array(m)[0, :]])
m = np.array(m)[:, index222]
plt.scatter(m[0, :], m[1, :], s=1, c='b')
plt.plot(m[0, :], m[2, :], c='r')
if self.cmobox_rrnMethod.currentIndex() == 5:
plt.plot(m[0, :], m[3, :], c='g')
self.progressFig.ax.set_title('DOY:%s' % self.doys[index])
#
self.scence = QtWidgets.QGraphicsScene()
self.scence.addWidget(self.progressFig)
self.view.setScene(self.scence)
def slot_buttonTestSample(self):
self.pifsTestValues = scipy.io.loadmat(self.lineEdit_inputPIFsMatDir.text())['PIFs_Refvalues'] # [num,23]
print(np.shape(np.array(self.pifsTestValues)))
self.lineEdit_testSample.setText(str(np.shape(np.array(self.pifsTestValues))[0]))
#
# 计算原始影像的RMSE
rmse = np.zeros([len(self.doys), len(self.doys)])
array_outSam = (np.array(self.pifsTestValues) + 0.1) / (2e-5)
print(np.shape(array_outSam)) # eg:[21,12515]
for mm in range(len(self.doys)):
for nn in range(len(self.doys)):
z0 = array_outSam[mm, :] - array_outSam[nn, :]
rmse[mm, nn] = np.sqrt(np.mean(z0 * z0))
#
mean_rmse = np.mean(rmse)
std_rmse = np.std(rmse)
print("mean:{},std:{}".format(mean_rmse, std_rmse))
#
self.button_testBp.setDisabled(False)
def slot_buttonTestBp(self): # 输入测试集
#
self.pifsTestCorrValues = []
#
bpInNode = int(self.table_trainModelKey.item(0, 0).text())
bpHideNode = int(self.table_trainModelKey.item(2, 0).text())
bpOutNode = int(self.table_trainModelKey.item(3, 0).text())
#
train_bp = ag.bpNet(bpInNode, bpHideNode, bpOutNode)
train_bp.bpInitNetFunc()
#
# 获取PIFS校正后的值
if self.cmobox_contrastMethod.currentIndex() == 0:
#
outSam = []
outSam.append(np.array(self.pifsTestValues)[:, 0])
for tt in range(1, len(self.doys)):
#
train_bp.ih_w = np.array(self.ih_w[tt]).astype(float) # 定参数
train_bp.ho_w = np.array(self.ho_w[tt]).astype(float)
train_bp.out_b0 = np.array(self.out_b0[tt]).astype(float)
train_bp.hide_b0 = np.array(self.hide_b0[tt]).astype(float)
#
m_Insample = np.array(self.pifsTestValues)[:, tt] # 输入数据
col = []
for h in range(np.shape(np.array(m_Insample))[0]):
train_bp.bpNetRecognizeFunc(m_Insample[h])
col.extend(train_bp.out_y0.tolist())
#
outSam.append(col)
#
self.pifsTestCorrValues = outSam
print(np.shape(np.array(self.pifsTestCorrValues)))
#
pass
def slot_buttonRMSECal(self):
# 计算RMSE
rmseDir = QtWidgets.QFileDialog.getSaveFileName(self, 'BPNet', './mat/', '*.mat')[0]
self.lineEdit_rmseCal.setText(rmseDir)
#
if rmseDir:
rmse = np.zeros([len(self.doys), len(self.doys)])
array_outSam = (np.array(self.pifsTestCorrValues) + 0.1) / (2e-5) # ref to DN
print(np.shape(array_outSam)) # eg:[21,12515]
for mm in range(len(self.doys)):
for nn in range(len(self.doys)):
z0 = array_outSam[mm, :] - array_outSam[nn, :]
rmse[mm, nn] = np.sqrt(np.mean(z0 * z0))
#
mean_rmse = np.mean(rmse)
std_rmse = np.std(rmse)
print("mean:{},std:{}".format(mean_rmse, std_rmse))
#
# 保存路径
scipy.io.savemat(rmseDir,
{'pifsTestCorrValues': self.pifsTestCorrValues, 'rmse': rmse, 'mean': mean_rmse,
'std': std_rmse})
#
def slot_buttonPredSample(self):
#
# 导入测试样本数据
self.clipValues = scipy.io.loadmat(self.lineEdit_inputRefMatDir.text())['ref_Values'] # eg[21,1001,1001]
print(np.shape(np.array(self.clipValues)))
self.lineEdit_predSample.setText(str(np.shape(np.array(self.clipValues))))
#
self.button_predBP.setDisabled(False)
def slot_buttonImportKeys(self):
open_filename = QtWidgets.QFileDialog.getOpenFileName(filter='*.mat')[0]
self.ih_w = scipy.io.loadmat(open_filename)['ih_w']
self.ho_w = scipy.io.loadmat(open_filename)['ho_w']
self.out_b0 = scipy.io.loadmat(open_filename)['out_b0']
self.hide_b0 = scipy.io.loadmat(open_filename)['hide_b0']
print(np.shape(np.array(self.ih_w)))
print(np.shape(np.array(self.ho_w)))
print(np.shape(np.array(self.out_b0)))
print(np.shape(np.array(self.hide_b0)))
#
self.button_imgProcessMap.setDisabled(False)
def slot_buttonPredBp(self):
#
# 变量初始化
bpInNode = int(self.table_trainModelKey.item(0, 0).text())
bpHideNode = int(self.table_trainModelKey.item(2, 0).text())
bpOutNode = int(self.table_trainModelKey.item(3, 0).text())
#
#
if self.cmobox_rrnMethod.currentIndex() == 0:
#
self.correctedValues = [] # 校正后变量
pred_bp = ag.bpNet(bpInNode, bpHideNode, bpOutNode)
initIndex = 0
#
# 设定初始影像
if self.cmobox_initValue.currentText() == '初始影像':
initIndex = 0
#
if self.cmobox_initValue.currentText() == 'maxStds':
pifStds = []
for i in range(len(self.doys)):
pifValue = np.array(self.pifsValues)[:, i]
pifStd = np.std(pifValue)
pifStds.append(pifStd)
maxStdIndex = np.argsort(pifStds)[-1] # 获得映射
initIndex = maxStdIndex
#
print(initIndex)
#
for tt in range(len(self.doys)):
#
corrValue = []
if tt == initIndex:
corrValue = np.array(self.clipValues).astype(float)[tt, :, :]
else:
pred_bp.ih_w = np.array(self.ih_w[tt]).astype(float) # 定参数
pred_bp.ho_w = np.array(self.ho_w[tt]).astype(float)
pred_bp.out_b0 = np.array(self.out_b0[tt]).astype(float)
pred_bp.hide_b0 = np.array(self.hide_b0[tt]).astype(float)
train_img = np.array(self.clipValues).astype(float)[tt, :, :]
#
for i in range(np.shape(np.array(train_img))[0]):
col = []
for j in range(np.shape(np.array(train_img))[1]):
Ori_Values = train_img[i, j]
pred_bp.bpNetRecognizeFunc(Ori_Values)
col.extend(pred_bp.out_y0.tolist())
#
corrValue.append(col)
#
print(np.shape(np.array(corrValue)))
#
self.correctedValues.append(corrValue)
#
print(np.shape(np.array(self.correctedValues)))
#
def slot_buttonBpStartDir(self):
# 保存结果
scipy.io.savemat(self.lineEdit_bpMatSaveDir.text(),
{'BP_CorrValues': self.correctedValues})
pass
def slot_buttonBpMatSaveDir(self):
#
# 导入BP的文件保存路径
saveDir = QtWidgets.QFileDialog.getSaveFileName(self, 'BPNet', './mat/', '*.mat')[0]
self.lineEdit_bpMatSaveDir.setText(saveDir)
pass
#
def slot_buttonRMatInput(self):
#
open_filename = QtWidgets.QFileDialog.getOpenFileName(filter='*.mat')[0]
#
if open_filename:
if self.cmobox_matMode.currentIndex() == 0: # 校正前(ref_Values)
self.rData = scipy.io.loadmat(open_filename)['ref_Values']
if self.cmobox_matMode.currentIndex() == 1: # 校正后(BP_CorrValues)
self.rData = scipy.io.loadmat(open_filename)['BP_CorrValues']
#
print(np.shape(np.array(self.rData)), np.array(self.rData)[0, 0, 0]) # eg: [21,1001,1001]
self.lineEdit_rMatDir.setText(open_filename)
def slot_buttonGMatInput(self):
#
open_filename = QtWidgets.QFileDialog.getOpenFileName(filter='*.mat')[0]
#
if open_filename:
if self.cmobox_matMode.currentIndex() == 0: # 校正前(ref_Values)
self.gData = scipy.io.loadmat(open_filename)['ref_Values']
if self.cmobox_matMode.currentIndex() == 1: # 校正后(BP_CorrValues)
self.gData = scipy.io.loadmat(open_filename)['BP_CorrValues']
#
print(np.shape(np.array(self.gData)), np.array(self.gData)[0, 0, 0])
self.lineEdit_gMatDir.setText(open_filename)
else:
print('NO Data!!!')
def slot_buttonBMatInput(self):
#
open_filename = QtWidgets.QFileDialog.getOpenFileName(filter='*.mat')[0]
#
if open_filename:
if self.cmobox_matMode.currentIndex() == 0: # 校正前(ref_Values)
self.bData = scipy.io.loadmat(open_filename)['ref_Values']
if self.cmobox_matMode.currentIndex() == 1: # 校正后(BP_CorrValues)
self.bData = scipy.io.loadmat(open_filename)['BP_CorrValues']
#
print(np.shape(np.array(self.bData)), np.array(self.bData)[0, 0, 0])
self.lineEdit_bMatDir.setText(open_filename)
else:
print('NO Data!!!')
def slot_buttonRgbSaveDir(self):
#
rgbSaveDir = QtWidgets.QFileDialog.getExistingDirectory(self)
dir = ("%s/" % rgbSaveDir)
self.lineEdit_rgbSaveDir.setText(dir)
#
self.button_startConvert.setDisabled(False)
def slot_buttonStartConvert(self):
#
rgbSaveDir = self.lineEdit_rgbSaveDir.text()
rDataSet = np.array(self.rData)
gDataSet = np.array(self.gData)
bDataSet = np.array(self.bData)
width = np.shape(rDataSet)[1]
height = np.shape(rDataSet)[2]
#
for tt in range(len(self.doys)):
rData = rDataSet[tt, :, :]
gData = gDataSet[tt, :, :]
bData = bDataSet[tt, :, :]
#
rData = dm.dataManagement().linearstretching(rData)
gData = dm.dataManagement().linearstretching(gData)
bData = dm.dataManagement().linearstretching(bData)
#
gtiff_driver = gdal.GetDriverByName('GTiff')
gtiff_name = "%s/%s-%s.tif" % (rgbSaveDir, str(self.doys[tt]), str(self.cmobox_matMode.currentIndex()))
out_ds = gtiff_driver.Create(gtiff_name,
width,
height,
3,
gdal.GDT_Byte)
#
# 存入1波段数据
out_ds.GetRasterBand(3).WriteArray(bData)
#
# 存入2波段数据
out_ds.GetRasterBand(2).WriteArray(gData)
#
# 存入3波段数据
out_ds.GetRasterBand(1).WriteArray(rData)
out_ds.FlushCache()
#
del out_ds
pass
#
def slot_buttonVIsaveDir(self):
viSaveDir = QtWidgets.QFileDialog.getSaveFileName(self, 'VI-Cal', './mat/', '*.mat')[0]
self.lineEdit_viSaveDir.setText(viSaveDir)
#
self.button_viCal.setDisabled(False)
pass
def slot_buttonVICal(self):
#
viSaveDir = self.lineEdit_viSaveDir.text()
NIR_refValues = np.array(self.gData) # eg:(21,1001,1001)
R_refValues = np.array(self.bData)
viDatas = (NIR_refValues - R_refValues) / (NIR_refValues + R_refValues)
print(np.shape(viDatas))
#
scipy.io.savemat(viSaveDir,{'NDVI_Values':viDatas})
class myView(QtWidgets.QGraphicsView):
'''
添加场景的窗口
'''
def __init__(self, *_args):
super().__init__(*_args)
self.zoomInFactor = 1.1
self.zoomOutFactor = 1.0 / self.zoomInFactor
self.factor = 1.0
def keyPressEvent(self, event):
if event.modifiers() == QtCore.Qt.ControlModifier and event.key() == QtCore.Qt.Key_Equal:
self.scale(self.zoomInFactor, self.zoomInFactor)
self.factor = self.factor * self.zoomInFactor
if event.modifiers() == QtCore.Qt.ControlModifier and event.key() == QtCore.Qt.Key_Minus:
self.scale(self.zoomOutFactor, self.zoomOutFactor)
self.factor = self.factor * self.zoomOutFactor
if event.modifiers() == QtCore.Qt.ControlModifier and event.key() == QtCore.Qt.Key_0:
# print(self.geometry()) # 放大缩小是变换了视角
self.scale(1 / self.factor, 1 / self.factor)
self.factor = 1.0
if event.modifiers() == QtCore.Qt.ControlModifier and event.key() == QtCore.Qt.Key_O:
self.scale(0.4, 0.4)
class myFigure(FigureCanvas):
'''
提供一个载体,把弹出的figure给装载进取,然后获取句柄,就可以完成镶嵌
'''
def __init__(self, width=2.5, height=2, dpi=100):
# 第一步:创建一个创建Figure
self.fig = plt.figure(figsize=(width, height), dpi=dpi)
# 第二步:在父类中激活Figure窗口
super(myFigure, self).__init__(self.fig) # 此句必不可少,否则不能显示图形
# 第三步:创建一个子图,用于绘制图形用,111表示子图编号,如matlab的subplot(1,1,1)
self.ax = self.fig.add_subplot(111)
| [
"gdal.GetDriverByName",
"numpy.argsort",
"numpy.array",
"PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"data_manager.dataManagement",
"matplotlib.pyplot.imshow",
"PyQt5.QtWidgets.QTableWidget",
"numpy.mean",
"PyQt5.QtWidgets.QComboBox",
"numpy.sort",
"matplotlib.pyplot.plot",
"numpy.max",
"s... | [((311, 335), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (325, 335), False, 'import matplotlib\n'), ((1470, 1518), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['"""Feature Calculation"""', 'self'], {}), "('Feature Calculation', self)\n", (1489, 1518), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((1553, 1596), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Input_MatDir"""', 'self'], {}), "('Input_MatDir', self)\n", (1574, 1596), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((1633, 1658), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (1652, 1658), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((1686, 1721), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""stds"""', 'self'], {}), "('stds', self)\n", (1707, 1721), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((1751, 1788), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""slopes"""', 'self'], {}), "('slopes', self)\n", (1772, 1788), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((1835, 1879), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['"""PIFs Extraction"""', 'self'], {}), "('PIFs Extraction', self)\n", (1854, 1879), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((1907, 1950), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""PIFs Extract"""', 'self'], {}), "('PIFs Extract', self)\n", (1928, 1950), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((1985, 2028), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""PIFs-Methods"""', 'self'], {}), "('PIFs-Methods', self)\n", (2006, 2028), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2062, 2087), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self'], {}), '(self)\n', (2081, 2087), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2116, 2152), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Lower"""', 'self'], {}), "('Lower', self)\n", (2137, 2152), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2181, 2217), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Upper"""', 'self'], {}), "('Upper', self)\n", (2202, 2217), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2248, 2273), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (2267, 2273), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2304, 2329), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (2323, 2329), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2359, 2396), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Export"""', 'self'], {}), "('Export', self)\n", (2380, 2396), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2430, 2472), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Save_MatDir"""', 'self'], {}), "('Save_MatDir', self)\n", (2451, 2472), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2508, 2533), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (2527, 2533), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2588, 2614), 'PyQt5.QtWidgets.QGraphicsScene', 'QtWidgets.QGraphicsScene', ([], {}), '()\n', (2612, 2614), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2652, 2677), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self'], {}), '(self)\n', (2671, 2677), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2708, 2743), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Show"""', 'self'], {}), "('Show', self)\n", (2729, 2743), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2793, 2838), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['"""PIFs-Other Bands"""', 'self'], {}), "('PIFs-Other Bands', self)\n", (2812, 2838), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2879, 2918), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Input-OB"""', 'self'], {}), "('Input-OB', self)\n", (2900, 2918), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((2961, 2986), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (2980, 2986), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((3020, 3062), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""PIFs-Import"""', 'self'], {}), "('PIFs-Import', self)\n", (3041, 3062), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((3101, 3145), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Export-Values"""', 'self'], {}), "('Export-Values', self)\n", (3122, 3145), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((3186, 3211), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (3205, 3211), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((3244, 3271), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self'], {}), '(self)\n', (3265, 3271), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((3298, 3345), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self.groupbox_FeatureCal'], {}), '(self.groupbox_FeatureCal)\n', (3319, 3345), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((3373, 3421), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self.groupbox_pifsExtract'], {}), '(self.groupbox_pifsExtract)\n', (3394, 3421), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((3452, 3503), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self.groupbox_pifsOtherBands'], {}), '(self.groupbox_pifsOtherBands)\n', (3473, 3503), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((12977, 13003), 'PyQt5.QtWidgets.QGraphicsScene', 'QtWidgets.QGraphicsScene', ([], {}), '()\n', (13001, 13003), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((16314, 16341), 'numpy.array', 'np.array', (['self.pifDetermine'], {}), '(self.pifDetermine)\n', (16322, 16341), True, 'import numpy as np\n'), ((16369, 16394), 'numpy.array', 'np.array', (['self.clipValues'], {}), '(self.clipValues)\n', (16377, 16394), True, 'import numpy as np\n'), ((18399, 18437), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['"""Input-Mat"""', 'self'], {}), "('Input-Mat', self)\n", (18418, 18437), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((18475, 18521), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['"""RRN Method Choose"""', 'self'], {}), "('RRN Method Choose', self)\n", (18494, 18521), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((18552, 18586), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['"""Train"""', 'self'], {}), "('Train', self)\n", (18571, 18586), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((18616, 18649), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['"""Test"""', 'self'], {}), "('Test', self)\n", (18635, 18649), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((18679, 18723), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['"""Pred-ClipValues"""', 'self'], {}), "('Pred-ClipValues', self)\n", (18698, 18723), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((18776, 18823), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Input_ClipValues"""', 'self'], {}), "('Input_ClipValues', self)\n", (18797, 18823), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((18862, 18906), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Input_PIFsDir"""', 'self'], {}), "('Input_PIFsDir', self)\n", (18883, 18906), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((18946, 18971), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (18965, 18971), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((19012, 19037), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (19031, 19037), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((19065, 19093), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""pcc"""'], {}), "('pcc')\n", (19086, 19093), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((19156, 19194), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Methods"""', 'self'], {}), "('Methods', self)\n", (19177, 19194), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((19227, 19252), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self'], {}), '(self)\n', (19246, 19252), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((19285, 19325), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""InitValue"""', 'self'], {}), "('InitValue', self)\n", (19306, 19325), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((19358, 19383), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self'], {}), '(self)\n', (19377, 19383), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((19417, 19454), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""out_yd"""', 'self'], {}), "('out_yd', self)\n", (19438, 19454), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((19488, 19513), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self'], {}), '(self)\n', (19507, 19513), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((19567, 19606), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""TrainSam"""', 'self'], {}), "('TrainSam', self)\n", (19588, 19606), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((19643, 19668), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (19662, 19668), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((19704, 19732), 'PyQt5.QtWidgets.QTableWidget', 'QtWidgets.QTableWidget', (['self'], {}), '(self)\n', (19726, 19732), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((19796, 19834), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""KeySave"""', 'self'], {}), "('KeySave', self)\n", (19817, 19834), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((19865, 19904), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Train-BP"""', 'self'], {}), "('Train-BP', self)\n", (19886, 19904), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((19939, 19964), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (19958, 19964), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((20000, 20025), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (20019, 20025), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((20062, 20097), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""拟合过程"""', 'self'], {}), "('拟合过程', self)\n", (20083, 20097), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((20133, 20158), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (20152, 20158), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((20214, 20251), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Method"""', 'self'], {}), "('Method', self)\n", (20235, 20251), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((20289, 20314), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self'], {}), '(self)\n', (20308, 20314), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((20348, 20386), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""TestSam"""', 'self'], {}), "('TestSam', self)\n", (20369, 20386), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((20422, 20447), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (20441, 20447), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((20485, 20527), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Import Keys"""', 'self'], {}), "('Import Keys', self)\n", (20506, 20527), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((20557, 20595), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Test-Bp"""', 'self'], {}), "('Test-Bp', self)\n", (20578, 20595), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((20626, 20661), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""RMSE"""', 'self'], {}), "('RMSE', self)\n", (20647, 20661), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((20694, 20719), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (20713, 20719), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((20771, 20809), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""PredSam"""', 'self'], {}), "('PredSam', self)\n", (20792, 20809), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((20845, 20870), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (20864, 20870), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((20904, 20946), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Import Keys"""', 'self'], {}), "('Import Keys', self)\n", (20925, 20946), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((20976, 21016), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Pred-NMAG"""', 'self'], {}), "('Pred-NMAG', self)\n", (20997, 21016), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((21052, 21095), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""NMAG-SaveDir"""', 'self'], {}), "('NMAG-SaveDir', self)\n", (21073, 21095), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((21133, 21158), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (21152, 21158), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((21189, 21229), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Save-NMAG"""', 'self'], {}), "('Save-NMAG', self)\n", (21210, 21229), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((21281, 21322), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""VI-SaveDir"""', 'self'], {}), "('VI-SaveDir', self)\n", (21302, 21322), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((21357, 21382), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (21376, 21382), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((21411, 21448), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""VI-Cal"""', 'self'], {}), "('VI-Cal', self)\n", (21432, 21448), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((21501, 21549), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['"""mat-RGB-Combination"""', 'self'], {}), "('mat-RGB-Combination', self)\n", (21520, 21549), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((21582, 21618), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""R-Mat"""', 'self'], {}), "('R-Mat', self)\n", (21603, 21618), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((21651, 21687), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""G-Mat"""', 'self'], {}), "('G-Mat', self)\n", (21672, 21687), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((21720, 21756), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""B-Mat"""', 'self'], {}), "('B-Mat', self)\n", (21741, 21756), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((21789, 21814), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (21808, 21814), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((21847, 21872), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (21866, 21872), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((21905, 21930), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (21924, 21930), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((21961, 22000), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""mat-Mode"""', 'self'], {}), "('mat-Mode', self)\n", (21982, 22000), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((22047, 22072), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self'], {}), '(self)\n', (22066, 22072), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((22106, 22148), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""RGB-SaveDir"""', 'self'], {}), "('RGB-SaveDir', self)\n", (22127, 22148), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((22184, 22209), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (22203, 22209), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((22245, 22289), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Start-Convert"""', 'self'], {}), "('Start-Convert', self)\n", (22266, 22289), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((24659, 24686), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self'], {}), '(self)\n', (24680, 24686), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((25236, 25281), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self.groupbox_inputMat'], {}), '(self.groupbox_inputMat)\n', (25257, 25281), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((25307, 25356), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self.groupbox_MethodChoose'], {}), '(self.groupbox_MethodChoose)\n', (25328, 25356), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((25378, 25420), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self.groupbox_Train'], {}), '(self.groupbox_Train)\n', (25399, 25420), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((25441, 25482), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self.groupbox_Test'], {}), '(self.groupbox_Test)\n', (25462, 25482), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((25503, 25544), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self.groupbox_Pred'], {}), '(self.groupbox_Pred)\n', (25524, 25544), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((25568, 25612), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self.groupbox_mat2rgb'], {}), '(self.groupbox_mat2rgb)\n', (25589, 25612), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((36475, 36518), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xlabel'], {'fontsize': '(5)', 'rotation': '(45)'}), '(xlabel, fontsize=5, rotation=45)\n', (36485, 36518), True, 'import matplotlib.pyplot as plt\n'), ((36560, 36590), 'matplotlib.pyplot.yticks', 'plt.yticks', (['ylabel'], {'fontsize': '(5)'}), '(ylabel, fontsize=5)\n', (36570, 36590), True, 'import matplotlib.pyplot as plt\n'), ((36655, 36681), 'PyQt5.QtWidgets.QGraphicsScene', 'QtWidgets.QGraphicsScene', ([], {}), '()\n', (36679, 36681), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((38230, 38271), 'argrithms.bpNet', 'ag.bpNet', (['bpInNode', 'bpHideNode', 'bpOutNode'], {}), '(bpInNode, bpHideNode, bpOutNode)\n', (38238, 38271), True, 'import argrithms as ag\n'), ((42265, 42306), 'matplotlib.pyplot.scatter', 'plt.scatter', (['m[0, :]', 'm[1, :]'], {'s': '(1)', 'c': '"""b"""'}), "(m[0, :], m[1, :], s=1, c='b')\n", (42276, 42306), True, 'import matplotlib.pyplot as plt\n'), ((42315, 42348), 'matplotlib.pyplot.plot', 'plt.plot', (['m[0, :]', 'm[2, :]'], {'c': '"""r"""'}), "(m[0, :], m[2, :], c='r')\n", (42323, 42348), True, 'import matplotlib.pyplot as plt\n'), ((42548, 42574), 'PyQt5.QtWidgets.QGraphicsScene', 'QtWidgets.QGraphicsScene', ([], {}), '()\n', (42572, 42574), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((43413, 43426), 'numpy.mean', 'np.mean', (['rmse'], {}), '(rmse)\n', (43420, 43426), True, 'import numpy as np\n'), ((43446, 43458), 'numpy.std', 'np.std', (['rmse'], {}), '(rmse)\n', (43452, 43458), True, 'import numpy as np\n'), ((43909, 43950), 'argrithms.bpNet', 'ag.bpNet', (['bpInNode', 'bpHideNode', 'bpOutNode'], {}), '(bpInNode, bpHideNode, bpOutNode)\n', (43917, 43950), True, 'import argrithms as ag\n'), ((51937, 51985), 'PyQt5.QtWidgets.QFileDialog.getExistingDirectory', 'QtWidgets.QFileDialog.getExistingDirectory', (['self'], {}), '(self)\n', (51979, 51985), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((52252, 52272), 'numpy.array', 'np.array', (['self.rData'], {}), '(self.rData)\n', (52260, 52272), True, 'import numpy as np\n'), ((52292, 52312), 'numpy.array', 'np.array', (['self.gData'], {}), '(self.gData)\n', (52300, 52312), True, 'import numpy as np\n'), ((52332, 52352), 'numpy.array', 'np.array', (['self.bData'], {}), '(self.bData)\n', (52340, 52352), True, 'import numpy as np\n'), ((53976, 53996), 'numpy.array', 'np.array', (['self.gData'], {}), '(self.gData)\n', (53984, 53996), True, 'import numpy as np\n'), ((54040, 54060), 'numpy.array', 'np.array', (['self.bData'], {}), '(self.bData)\n', (54048, 54060), True, 'import numpy as np\n'), ((55516, 55560), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, height)', 'dpi': 'dpi'}), '(figsize=(width, height), dpi=dpi)\n', (55526, 55560), True, 'import matplotlib.pyplot as plt\n'), ((6895, 6948), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', ([], {'filter': '"""*.mat"""'}), "(filter='*.mat')\n", (6932, 6948), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((8449, 8482), 'numpy.percentile', 'np.percentile', (['self.clipSlopes', '(1)'], {}), '(self.clipSlopes, 1)\n', (8462, 8482), True, 'import numpy as np\n'), ((8484, 8518), 'numpy.percentile', 'np.percentile', (['self.clipSlopes', '(99)'], {}), '(self.clipSlopes, 99)\n', (8497, 8518), True, 'import numpy as np\n'), ((9760, 9791), 'numpy.percentile', 'np.percentile', (['self.clipStds', '(1)'], {}), '(self.clipStds, 1)\n', (9773, 9791), True, 'import numpy as np\n'), ((9793, 9825), 'numpy.percentile', 'np.percentile', (['self.clipStds', '(99)'], {}), '(self.clipStds, 99)\n', (9806, 9825), True, 'import numpy as np\n'), ((12841, 12865), 'numpy.shape', 'np.shape', (['self.pifValues'], {}), '(self.pifValues)\n', (12849, 12865), True, 'import numpy as np\n'), ((13132, 13169), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.clipStds'], {'cmap': '"""jet"""'}), "(self.clipStds, cmap='jet')\n", (13142, 13169), True, 'import matplotlib.pyplot as plt\n'), ((13182, 13204), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(3)'}), '(fontsize=3)\n', (13192, 13204), True, 'import matplotlib.pyplot as plt\n'), ((13217, 13239), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(3)'}), '(fontsize=3)\n', (13227, 13239), True, 'import matplotlib.pyplot as plt\n'), ((13257, 13286), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['pos'], {'shrink': '(0.8)'}), '(pos, shrink=0.8)\n', (13269, 13286), True, 'import matplotlib.pyplot as plt\n'), ((13544, 13588), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.pifDetermine'], {'cmap': '"""binary"""'}), "(self.pifDetermine, cmap='binary')\n", (13554, 13588), True, 'import matplotlib.pyplot as plt\n'), ((13601, 13623), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(3)'}), '(fontsize=3)\n', (13611, 13623), True, 'import matplotlib.pyplot as plt\n'), ((13636, 13658), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(3)'}), '(fontsize=3)\n', (13646, 13658), True, 'import matplotlib.pyplot as plt\n'), ((13676, 13705), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['pos'], {'shrink': '(0.8)'}), '(pos, shrink=0.8)\n', (13688, 13705), True, 'import matplotlib.pyplot as plt\n'), ((13963, 14002), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.clipSlopes'], {'cmap': '"""jet"""'}), "(self.clipSlopes, cmap='jet')\n", (13973, 14002), True, 'import matplotlib.pyplot as plt\n'), ((14015, 14037), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(3)'}), '(fontsize=3)\n', (14025, 14037), True, 'import matplotlib.pyplot as plt\n'), ((14050, 14072), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(3)'}), '(fontsize=3)\n', (14060, 14072), True, 'import matplotlib.pyplot as plt\n'), ((14090, 14119), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['pos'], {'shrink': '(0.8)'}), '(pos, shrink=0.8)\n', (14102, 14119), True, 'import matplotlib.pyplot as plt\n'), ((14999, 15086), 'PyQt5.QtWidgets.QFileDialog.getSaveFileName', 'QtWidgets.QFileDialog.getSaveFileName', (['self', '"""Save PIFs MatFile"""', '"""./mat/"""', '"""*.mat"""'], {}), "(self, 'Save PIFs MatFile', './mat/',\n '*.mat')\n", (15036, 15086), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((15292, 15345), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', ([], {'filter': '"""*.mat"""'}), "(filter='*.mat')\n", (15329, 15345), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((15794, 15847), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', ([], {'filter': '"""*.mat"""'}), "(filter='*.mat')\n", (15831, 15847), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((16073, 16158), 'PyQt5.QtWidgets.QFileDialog.getSaveFileName', 'QtWidgets.QFileDialog.getSaveFileName', (['self', '"""OtherBands-PIFs"""', '"""./mat/"""', '"""*.mat"""'], {}), "(self, 'OtherBands-PIFs', './mat/',\n '*.mat')\n", (16110, 16158), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((16256, 16283), 'numpy.array', 'np.array', (['self.pifDetermine'], {}), '(self.pifDetermine)\n', (16264, 16283), True, 'import numpy as np\n'), ((30949, 31002), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', ([], {'filter': '"""*.mat"""'}), "(filter='*.mat')\n", (30986, 31002), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((31153, 31206), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', ([], {'filter': '"""*.mat"""'}), "(filter='*.mat')\n", (31190, 31206), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((31349, 31425), 'PyQt5.QtWidgets.QFileDialog.getSaveFileName', 'QtWidgets.QFileDialog.getSaveFileName', (['self', '"""校正后波段反射率数据"""', '"""./mat/"""', '"""*.mat"""'], {}), "(self, '校正后波段反射率数据', './mat/', '*.mat')\n", (31386, 31425), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((36857, 36928), 'PyQt5.QtWidgets.QFileDialog.getSaveFileName', 'QtWidgets.QFileDialog.getSaveFileName', (['self', '"""BPNet"""', '"""./mat/"""', '"""*.mat"""'], {}), "(self, 'BPNet', './mat/', '*.mat')\n", (36894, 36928), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((38146, 38171), 'numpy.array', 'np.array', (['self.pifsValues'], {}), '(self.pifsValues)\n', (38154, 38171), True, 'import numpy as np\n'), ((42232, 42243), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (42240, 42243), True, 'import numpy as np\n'), ((42415, 42448), 'matplotlib.pyplot.plot', 'plt.plot', (['m[0, :]', 'm[3, :]'], {'c': '"""g"""'}), "(m[0, :], m[3, :], c='g')\n", (42423, 42448), True, 'import matplotlib.pyplot as plt\n'), ((43136, 43158), 'numpy.shape', 'np.shape', (['array_outSam'], {}), '(array_outSam)\n', (43144, 43158), True, 'import numpy as np\n'), ((45100, 45171), 'PyQt5.QtWidgets.QFileDialog.getSaveFileName', 'QtWidgets.QFileDialog.getSaveFileName', (['self', '"""BPNet"""', '"""./mat/"""', '"""*.mat"""'], {}), "(self, 'BPNet', './mat/', '*.mat')\n", (45137, 45171), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((45725, 45738), 'numpy.mean', 'np.mean', (['rmse'], {}), '(rmse)\n', (45732, 45738), True, 'import numpy as np\n'), ((45762, 45774), 'numpy.std', 'np.std', (['rmse'], {}), '(rmse)\n', (45768, 45774), True, 'import numpy as np\n'), ((46508, 46561), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', ([], {'filter': '"""*.mat"""'}), "(filter='*.mat')\n", (46545, 46561), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((47485, 47526), 'argrithms.bpNet', 'ag.bpNet', (['bpInNode', 'bpHideNode', 'bpOutNode'], {}), '(bpInNode, bpHideNode, bpOutNode)\n', (47493, 47526), True, 'import argrithms as ag\n'), ((49741, 49812), 'PyQt5.QtWidgets.QFileDialog.getSaveFileName', 'QtWidgets.QFileDialog.getSaveFileName', (['self', '"""BPNet"""', '"""./mat/"""', '"""*.mat"""'], {}), "(self, 'BPNet', './mat/', '*.mat')\n", (49778, 49812), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((49958, 50011), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', ([], {'filter': '"""*.mat"""'}), "(filter='*.mat')\n", (49995, 50011), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((50602, 50655), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', ([], {'filter': '"""*.mat"""'}), "(filter='*.mat')\n", (50639, 50655), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((51270, 51323), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', ([], {'filter': '"""*.mat"""'}), "(filter='*.mat')\n", (51307, 51323), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((52369, 52387), 'numpy.shape', 'np.shape', (['rDataSet'], {}), '(rDataSet)\n', (52377, 52387), True, 'import numpy as np\n'), ((52408, 52426), 'numpy.shape', 'np.shape', (['rDataSet'], {}), '(rDataSet)\n', (52416, 52426), True, 'import numpy as np\n'), ((52846, 52875), 'gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (52866, 52875), False, 'import gdal\n'), ((53663, 53735), 'PyQt5.QtWidgets.QFileDialog.getSaveFileName', 'QtWidgets.QFileDialog.getSaveFileName', (['self', '"""VI-Cal"""', '"""./mat/"""', '"""*.mat"""'], {}), "(self, 'VI-Cal', './mat/', '*.mat')\n", (53700, 53735), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((54155, 54172), 'numpy.shape', 'np.shape', (['viDatas'], {}), '(viDatas)\n', (54163, 54172), True, 'import numpy as np\n'), ((8392, 8417), 'numpy.array', 'np.array', (['self.clipSlopes'], {}), '(self.clipSlopes)\n', (8400, 8417), True, 'import numpy as np\n'), ((9705, 9728), 'numpy.array', 'np.array', (['self.clipStds'], {}), '(self.clipStds)\n', (9713, 9728), True, 'import numpy as np\n'), ((15962, 15989), 'numpy.array', 'np.array', (['self.pifDetermine'], {}), '(self.pifDetermine)\n', (15970, 15989), True, 'import numpy as np\n'), ((16708, 16732), 'numpy.array', 'np.array', (['self.pifValues'], {}), '(self.pifValues)\n', (16716, 16732), True, 'import numpy as np\n'), ((31694, 31719), 'numpy.array', 'np.array', (['self.pifsValues'], {}), '(self.pifsValues)\n', (31702, 31719), True, 'import numpy as np\n'), ((33442, 33463), 'numpy.array', 'np.array', (['m_Outsample'], {}), '(m_Outsample)\n', (33450, 33463), True, 'import numpy as np\n'), ((34375, 34439), 'argrithms.bpNet', 'ag.bpNet', (['bpInNode', 'bpHideNode', 'bpOutNode'], {'imgNum': 'i', 'Greedy': '(True)'}), '(bpInNode, bpHideNode, bpOutNode, imgNum=i, Greedy=True)\n', (34383, 34439), True, 'import argrithms as ag\n'), ((35116, 35139), 'matplotlib.pyplot.plot', 'plt.plot', (['err_time', 'err'], {}), '(err_time, err)\n', (35124, 35139), True, 'import matplotlib.pyplot as plt\n'), ((38427, 38452), 'numpy.array', 'np.array', (['self.pifsValues'], {}), '(self.pifsValues)\n', (38435, 38452), True, 'import numpy as np\n'), ((38799, 38824), 'numpy.array', 'np.array', (['self.pifsValues'], {}), '(self.pifsValues)\n', (38807, 38824), True, 'import numpy as np\n'), ((39099, 39114), 'numpy.array', 'np.array', (['inSam'], {}), '(inSam)\n', (39107, 39114), True, 'import numpy as np\n'), ((39145, 39161), 'numpy.array', 'np.array', (['out_yd'], {}), '(out_yd)\n', (39153, 39161), True, 'import numpy as np\n'), ((39192, 39208), 'numpy.array', 'np.array', (['outSam'], {}), '(outSam)\n', (39200, 39208), True, 'import numpy as np\n'), ((42840, 42869), 'numpy.array', 'np.array', (['self.pifsTestValues'], {}), '(self.pifsTestValues)\n', (42848, 42869), True, 'import numpy as np\n'), ((43076, 43105), 'numpy.array', 'np.array', (['self.pifsTestValues'], {}), '(self.pifsTestValues)\n', (43084, 43105), True, 'import numpy as np\n'), ((44970, 45003), 'numpy.array', 'np.array', (['self.pifsTestCorrValues'], {}), '(self.pifsTestCorrValues)\n', (44978, 45003), True, 'import numpy as np\n'), ((45424, 45446), 'numpy.shape', 'np.shape', (['array_outSam'], {}), '(array_outSam)\n', (45432, 45446), True, 'import numpy as np\n'), ((46278, 46303), 'numpy.array', 'np.array', (['self.clipValues'], {}), '(self.clipValues)\n', (46286, 46303), True, 'import numpy as np\n'), ((46838, 46857), 'numpy.array', 'np.array', (['self.ih_w'], {}), '(self.ih_w)\n', (46846, 46857), True, 'import numpy as np\n'), ((46883, 46902), 'numpy.array', 'np.array', (['self.ho_w'], {}), '(self.ho_w)\n', (46891, 46902), True, 'import numpy as np\n'), ((46928, 46949), 'numpy.array', 'np.array', (['self.out_b0'], {}), '(self.out_b0)\n', (46936, 46949), True, 'import numpy as np\n'), ((46975, 46997), 'numpy.array', 'np.array', (['self.hide_b0'], {}), '(self.hide_b0)\n', (46983, 46997), True, 'import numpy as np\n'), ((7207, 7232), 'numpy.array', 'np.array', (['self.clipValues'], {}), '(self.clipValues)\n', (7215, 7232), True, 'import numpy as np\n'), ((7723, 7748), 'numpy.array', 'np.array', (['self.clipValues'], {}), '(self.clipValues)\n', (7731, 7748), True, 'import numpy as np\n'), ((7818, 7843), 'numpy.array', 'np.array', (['self.clipValues'], {}), '(self.clipValues)\n', (7826, 7843), True, 'import numpy as np\n'), ((7885, 7906), 'numpy.shape', 'np.shape', (['arrays_clip'], {}), '(arrays_clip)\n', (7893, 7906), True, 'import numpy as np\n'), ((8042, 8071), 'numpy.sort', 'np.sort', (['arrays_clip[:, i, j]'], {}), '(arrays_clip[:, i, j])\n', (8049, 8071), True, 'import numpy as np\n'), ((8172, 8210), 'scipy.optimize.leastsq', 'leastsq', (['self.error', 'p0'], {'args': '(Xi, Yi)'}), '(self.error, p0, args=(Xi, Yi))\n', (8179, 8210), False, 'from scipy.optimize import leastsq\n'), ((9229, 9254), 'numpy.array', 'np.array', (['self.clipValues'], {}), '(self.clipValues)\n', (9237, 9254), True, 'import numpy as np\n'), ((9343, 9368), 'numpy.array', 'np.array', (['self.clipValues'], {}), '(self.clipValues)\n', (9351, 9368), True, 'import numpy as np\n'), ((9410, 9431), 'numpy.shape', 'np.shape', (['arrays_clip'], {}), '(arrays_clip)\n', (9418, 9431), True, 'import numpy as np\n'), ((10371, 10396), 'numpy.array', 'np.array', (['self.clipValues'], {}), '(self.clipValues)\n', (10379, 10396), True, 'import numpy as np\n'), ((15622, 15647), 'numpy.array', 'np.array', (['self.clipValues'], {}), '(self.clipValues)\n', (15630, 15647), True, 'import numpy as np\n'), ((33049, 33074), 'numpy.array', 'np.array', (['self.pifsValues'], {}), '(self.pifsValues)\n', (33057, 33074), True, 'import numpy as np\n'), ((33214, 33235), 'numpy.array', 'np.array', (['m_Outsample'], {}), '(m_Outsample)\n', (33222, 33235), True, 'import numpy as np\n'), ((34037, 34057), 'numpy.argsort', 'np.argsort', (['rmse_sum'], {}), '(rmse_sum)\n', (34047, 34057), True, 'import numpy as np\n'), ((34239, 34264), 'numpy.array', 'np.array', (['self.pifsValues'], {}), '(self.pifsValues)\n', (34247, 34264), True, 'import numpy as np\n'), ((35641, 35660), 'numpy.array', 'np.array', (['corrValue'], {}), '(corrValue)\n', (35649, 35660), True, 'import numpy as np\n'), ((36319, 36340), 'numpy.array', 'np.array', (['m_Outsample'], {}), '(m_Outsample)\n', (36327, 36340), True, 'import numpy as np\n'), ((38521, 38544), 'numpy.array', 'np.array', (['self.ih_w[tt]'], {}), '(self.ih_w[tt])\n', (38529, 38544), True, 'import numpy as np\n'), ((38594, 38617), 'numpy.array', 'np.array', (['self.ho_w[tt]'], {}), '(self.ho_w[tt])\n', (38602, 38617), True, 'import numpy as np\n'), ((38662, 38687), 'numpy.array', 'np.array', (['self.out_b0[tt]'], {}), '(self.out_b0[tt])\n', (38670, 38687), True, 'import numpy as np\n'), ((38733, 38759), 'numpy.array', 'np.array', (['self.hide_b0[tt]'], {}), '(self.hide_b0[tt])\n', (38741, 38759), True, 'import numpy as np\n'), ((39594, 39619), 'numpy.array', 'np.array', (['self.pifsValues'], {}), '(self.pifsValues)\n', (39602, 39619), True, 'import numpy as np\n'), ((39791, 39819), 'numpy.mean', 'np.mean', (['m_Outsample'], {'axis': '(0)'}), '(m_Outsample, axis=0)\n', (39798, 39819), True, 'import numpy as np\n'), ((40965, 41008), 'numpy.round', 'np.round', (['(m_Insample * keys[0] + keys[1])', '(5)'], {}), '(m_Insample * keys[0] + keys[1], 5)\n', (40973, 41008), True, 'import numpy as np\n'), ((41585, 41610), 'numpy.array', 'np.array', (['self.pifsValues'], {}), '(self.pifsValues)\n', (41593, 41610), True, 'import numpy as np\n'), ((42200, 42211), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (42208, 42211), True, 'import numpy as np\n'), ((43365, 43381), 'numpy.mean', 'np.mean', (['(z0 * z0)'], {}), '(z0 * z0)\n', (43372, 43381), True, 'import numpy as np\n'), ((44139, 44168), 'numpy.array', 'np.array', (['self.pifsTestValues'], {}), '(self.pifsTestValues)\n', (44147, 44168), True, 'import numpy as np\n'), ((44586, 44615), 'numpy.array', 'np.array', (['self.pifsTestValues'], {}), '(self.pifsTestValues)\n', (44594, 44615), True, 'import numpy as np\n'), ((45342, 45375), 'numpy.array', 'np.array', (['self.pifsTestCorrValues'], {}), '(self.pifsTestCorrValues)\n', (45350, 45375), True, 'import numpy as np\n'), ((46360, 46385), 'numpy.array', 'np.array', (['self.clipValues'], {}), '(self.clipValues)\n', (46368, 46385), True, 'import numpy as np\n'), ((47928, 47944), 'numpy.std', 'np.std', (['pifValue'], {}), '(pifValue)\n', (47934, 47944), True, 'import numpy as np\n'), ((48018, 48037), 'numpy.argsort', 'np.argsort', (['pifStds'], {}), '(pifStds)\n', (48028, 48037), True, 'import numpy as np\n'), ((49420, 49450), 'numpy.array', 'np.array', (['self.correctedValues'], {}), '(self.correctedValues)\n', (49428, 49450), True, 'import numpy as np\n'), ((50398, 50418), 'numpy.array', 'np.array', (['self.rData'], {}), '(self.rData)\n', (50406, 50418), True, 'import numpy as np\n'), ((50421, 50441), 'numpy.array', 'np.array', (['self.rData'], {}), '(self.rData)\n', (50429, 50441), True, 'import numpy as np\n'), ((51042, 51062), 'numpy.array', 'np.array', (['self.gData'], {}), '(self.gData)\n', (51050, 51062), True, 'import numpy as np\n'), ((51065, 51085), 'numpy.array', 'np.array', (['self.gData'], {}), '(self.gData)\n', (51073, 51085), True, 'import numpy as np\n'), ((51710, 51730), 'numpy.array', 'np.array', (['self.bData'], {}), '(self.bData)\n', (51718, 51730), True, 'import numpy as np\n'), ((51733, 51753), 'numpy.array', 'np.array', (['self.bData'], {}), '(self.bData)\n', (51741, 51753), True, 'import numpy as np\n'), ((52632, 52651), 'data_manager.dataManagement', 'dm.dataManagement', ([], {}), '()\n', (52649, 52651), True, 'import data_manager as dm\n'), ((52696, 52715), 'data_manager.dataManagement', 'dm.dataManagement', ([], {}), '()\n', (52713, 52715), True, 'import data_manager as dm\n'), ((52760, 52779), 'data_manager.dataManagement', 'dm.dataManagement', ([], {}), '()\n', (52777, 52779), True, 'import data_manager as dm\n'), ((7968, 7989), 'numpy.shape', 'np.shape', (['arrays_clip'], {}), '(arrays_clip)\n', (7976, 7989), True, 'import numpy as np\n'), ((9493, 9514), 'numpy.shape', 'np.shape', (['arrays_clip'], {}), '(arrays_clip)\n', (9501, 9514), True, 'import numpy as np\n'), ((9555, 9589), 'numpy.std', 'np.std', (['(255 * arrays_clip[:, i, j])'], {}), '(255 * arrays_clip[:, i, j])\n', (9561, 9589), True, 'import numpy as np\n'), ((10725, 10748), 'numpy.array', 'np.array', (['self.clipStds'], {}), '(self.clipStds)\n', (10733, 10748), True, 'import numpy as np\n'), ((10794, 10815), 'numpy.shape', 'np.shape', (['arrays_stds'], {}), '(arrays_stds)\n', (10802, 10815), True, 'import numpy as np\n'), ((11974, 11999), 'numpy.array', 'np.array', (['self.clipSlopes'], {}), '(self.clipSlopes)\n', (11982, 11999), True, 'import numpy as np\n'), ((12045, 12068), 'numpy.shape', 'np.shape', (['arrays_slopes'], {}), '(arrays_slopes)\n', (12053, 12068), True, 'import numpy as np\n'), ((31777, 31802), 'numpy.array', 'np.array', (['self.pifsValues'], {}), '(self.pifsValues)\n', (31785, 31802), True, 'import numpy as np\n'), ((38871, 38891), 'numpy.array', 'np.array', (['m_Insample'], {}), '(m_Insample)\n', (38879, 38891), True, 'import numpy as np\n'), ((40245, 40270), 'numpy.array', 'np.array', (['self.pifsValues'], {}), '(self.pifsValues)\n', (40253, 40270), True, 'import numpy as np\n'), ((40617, 40632), 'numpy.array', 'np.array', (['inSam'], {}), '(inSam)\n', (40625, 40632), True, 'import numpy as np\n'), ((40671, 40687), 'numpy.array', 'np.array', (['out_yd'], {}), '(out_yd)\n', (40679, 40687), True, 'import numpy as np\n'), ((40726, 40742), 'numpy.array', 'np.array', (['outSam'], {}), '(outSam)\n', (40734, 40742), True, 'import numpy as np\n'), ((40856, 40906), 'scipy.optimize.leastsq', 'leastsq', (['self.error', 'p0'], {'args': '(m_Insample, out_yd)'}), '(self.error, p0, args=(m_Insample, out_yd))\n', (40863, 40906), False, 'from scipy.optimize import leastsq\n'), ((41058, 41111), 'numpy.sum', 'np.sum', (['((outSam_lsm - out_yd) * (outSam_lsm - out_yd))'], {}), '((outSam_lsm - out_yd) * (outSam_lsm - out_yd))\n', (41064, 41111), True, 'import numpy as np\n'), ((41291, 41314), 'numpy.array', 'np.array', (['self.ih_w[tt]'], {}), '(self.ih_w[tt])\n', (41299, 41314), True, 'import numpy as np\n'), ((41368, 41391), 'numpy.array', 'np.array', (['self.ho_w[tt]'], {}), '(self.ho_w[tt])\n', (41376, 41391), True, 'import numpy as np\n'), ((41440, 41465), 'numpy.array', 'np.array', (['self.out_b0[tt]'], {}), '(self.out_b0[tt])\n', (41448, 41465), True, 'import numpy as np\n'), ((41515, 41541), 'numpy.array', 'np.array', (['self.hide_b0[tt]'], {}), '(self.hide_b0[tt])\n', (41523, 41541), True, 'import numpy as np\n'), ((42926, 42955), 'numpy.array', 'np.array', (['self.pifsTestValues'], {}), '(self.pifsTestValues)\n', (42934, 42955), True, 'import numpy as np\n'), ((44274, 44297), 'numpy.array', 'np.array', (['self.ih_w[tt]'], {}), '(self.ih_w[tt])\n', (44282, 44297), True, 'import numpy as np\n'), ((44351, 44374), 'numpy.array', 'np.array', (['self.ho_w[tt]'], {}), '(self.ho_w[tt])\n', (44359, 44374), True, 'import numpy as np\n'), ((44423, 44448), 'numpy.array', 'np.array', (['self.out_b0[tt]'], {}), '(self.out_b0[tt])\n', (44431, 44448), True, 'import numpy as np\n'), ((44498, 44524), 'numpy.array', 'np.array', (['self.hide_b0[tt]'], {}), '(self.hide_b0[tt])\n', (44506, 44524), True, 'import numpy as np\n'), ((45669, 45685), 'numpy.mean', 'np.mean', (['(z0 * z0)'], {}), '(z0 * z0)\n', (45676, 45685), True, 'import numpy as np\n'), ((47867, 47892), 'numpy.array', 'np.array', (['self.pifsValues'], {}), '(self.pifsValues)\n', (47875, 47892), True, 'import numpy as np\n'), ((49284, 49303), 'numpy.array', 'np.array', (['corrValue'], {}), '(corrValue)\n', (49292, 49303), True, 'import numpy as np\n'), ((10886, 10907), 'numpy.shape', 'np.shape', (['arrays_stds'], {}), '(arrays_stds)\n', (10894, 10907), True, 'import numpy as np\n'), ((12139, 12162), 'numpy.shape', 'np.shape', (['arrays_slopes'], {}), '(arrays_slopes)\n', (12147, 12162), True, 'import numpy as np\n'), ((33906, 33921), 'numpy.sum', 'np.sum', (['(z0 * z0)'], {}), '(z0 * z0)\n', (33912, 33921), True, 'import numpy as np\n'), ((35437, 35457), 'numpy.array', 'np.array', (['m_Insample'], {}), '(m_Insample)\n', (35445, 35457), True, 'import numpy as np\n'), ((39935, 39958), 'numpy.array', 'np.array', (['self.ih_w[tt]'], {}), '(self.ih_w[tt])\n', (39943, 39958), True, 'import numpy as np\n'), ((40016, 40039), 'numpy.array', 'np.array', (['self.ho_w[tt]'], {}), '(self.ho_w[tt])\n', (40024, 40039), True, 'import numpy as np\n'), ((40092, 40117), 'numpy.array', 'np.array', (['self.out_b0[tt]'], {}), '(self.out_b0[tt])\n', (40100, 40117), True, 'import numpy as np\n'), ((40171, 40197), 'numpy.array', 'np.array', (['self.hide_b0[tt]'], {}), '(self.hide_b0[tt])\n', (40179, 40197), True, 'import numpy as np\n'), ((41658, 41678), 'numpy.array', 'np.array', (['m_Insample'], {}), '(m_Insample)\n', (41666, 41678), True, 'import numpy as np\n'), ((44696, 44716), 'numpy.array', 'np.array', (['m_Insample'], {}), '(m_Insample)\n', (44704, 44716), True, 'import numpy as np\n'), ((48416, 48439), 'numpy.array', 'np.array', (['self.ih_w[tt]'], {}), '(self.ih_w[tt])\n', (48424, 48439), True, 'import numpy as np\n'), ((48496, 48519), 'numpy.array', 'np.array', (['self.ho_w[tt]'], {}), '(self.ho_w[tt])\n', (48504, 48519), True, 'import numpy as np\n'), ((48571, 48596), 'numpy.array', 'np.array', (['self.out_b0[tt]'], {}), '(self.out_b0[tt])\n', (48579, 48596), True, 'import numpy as np\n'), ((48649, 48675), 'numpy.array', 'np.array', (['self.hide_b0[tt]'], {}), '(self.hide_b0[tt])\n', (48657, 48675), True, 'import numpy as np\n'), ((11104, 11134), 'numpy.max', 'np.max', (['arrays_values[:, i, j]'], {}), '(arrays_values[:, i, j])\n', (11110, 11134), True, 'import numpy as np\n'), ((12362, 12392), 'numpy.max', 'np.max', (['arrays_values[:, i, j]'], {}), '(arrays_values[:, i, j])\n', (12368, 12392), True, 'import numpy as np\n'), ((33737, 33762), 'numpy.shape', 'np.shape', (['array_Outsample'], {}), '(array_Outsample)\n', (33745, 33762), True, 'import numpy as np\n'), ((40322, 40342), 'numpy.array', 'np.array', (['m_Insample'], {}), '(m_Insample)\n', (40330, 40342), True, 'import numpy as np\n'), ((48309, 48334), 'numpy.array', 'np.array', (['self.clipValues'], {}), '(self.clipValues)\n', (48317, 48334), True, 'import numpy as np\n'), ((48722, 48747), 'numpy.array', 'np.array', (['self.clipValues'], {}), '(self.clipValues)\n', (48730, 48747), True, 'import numpy as np\n'), ((48838, 48857), 'numpy.array', 'np.array', (['train_img'], {}), '(train_img)\n', (48846, 48857), True, 'import numpy as np\n'), ((33801, 33826), 'numpy.array', 'np.array', (['self.pifsValues'], {}), '(self.pifsValues)\n', (33809, 33826), True, 'import numpy as np\n'), ((48945, 48964), 'numpy.array', 'np.array', (['train_img'], {}), '(train_img)\n', (48953, 48964), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Variable
import time
import math
import random
import ray
import copy, sys
from functools import partial
# Quaternion utility functions. Due to python relative imports and directory structure can't cleanly use cassie.quaternion_function
def inverse_quaternion(quaternion):
result = np.copy(quaternion)
result[1:4] = -result[1:4]
return result
def quaternion_product(q1, q2):
result = np.zeros(4)
result[0] = q1[0]*q2[0]-q1[1]*q2[1]-q1[2]*q2[2]-q1[3]*q2[3]
result[1] = q1[0]*q2[1]+q2[0]*q1[1]+q1[2]*q2[3]-q1[3]*q2[2]
result[2] = q1[0]*q2[2]-q1[1]*q2[3]+q1[2]*q2[0]+q1[3]*q2[1]
result[3] = q1[0]*q2[3]+q1[1]*q2[2]-q1[2]*q2[1]+q1[3]*q2[0]
return result
def rotate_by_quaternion(vector, quaternion):
q1 = np.copy(quaternion)
q2 = np.zeros(4)
q2[1:4] = np.copy(vector)
q3 = inverse_quaternion(quaternion)
q = quaternion_product(q2, q3)
q = quaternion_product(q1, q)
result = q[1:4]
return result
def euler2quat(z=0, y=0, x=0):
z = z/2.0
y = y/2.0
x = x/2.0
cz = math.cos(z)
sz = math.sin(z)
cy = math.cos(y)
sy = math.sin(y)
cx = math.cos(x)
sx = math.sin(x)
result = np.array([
cx*cy*cz - sx*sy*sz,
cx*sy*sz + cy*cz*sx,
cx*cz*sy - sx*cy*sz,
cx*cy*sz + sx*cz*sy])
if result[0] < 0:
result = -result
return result
@ray.remote
class eval_worker(object):
def __init__(self, id_num, env_fn, policy, num_steps, max_speed, min_speed):
self.id_num = id_num
self.cassie_env = env_fn()
self.policy = copy.deepcopy(policy)
self.num_steps = num_steps
self.max_speed = max_speed
self.min_speed = min_speed
@torch.no_grad()
def run_test(self, speed_schedule, orient_schedule):
start_t = time.time()
save_data = np.zeros(6)
state = torch.Tensor(self.cassie_env.reset_for_test(full_reset=True))
self.cassie_env.speed = 0.5
self.cassie_env.side_speed = 0
self.cassie_env.phase_add = 1
num_commands = len(orient_schedule)
count = 0
orient_ind = 0
speed_ind = 1
orient_add = 0
passed = 1
while not (speed_ind == num_commands and orient_ind == num_commands and count == self.num_steps) and passed:
# Update speed command
if count == self.num_steps:
count = 0
self.cassie_env.speed = speed_schedule[speed_ind]
self.cassie_env.speed = np.clip(self.cassie_env.speed, self.min_speed, self.max_speed)
if self.cassie_env.speed > 1.4:
self.cassie_env.phase_add = 1.5
else:
self.cassie_env.phase_add = 1
speed_ind += 1
# Update orientation command
elif count == self.num_steps // 2:
orient_add += orient_schedule[orient_ind]
orient_ind += 1
# Update orientation
# TODO: Make update orientation function in each env to this will work with an abitrary environment
quaternion = euler2quat(z=orient_add, y=0, x=0)
iquaternion = inverse_quaternion(quaternion)
curr_orient = state[1:5]
curr_transvel = state[15:18]
new_orient = quaternion_product(iquaternion, curr_orient)
if new_orient[0] < 0:
new_orient = -new_orient
new_translationalVelocity = rotate_by_quaternion(curr_transvel, iquaternion)
state[1:5] = torch.FloatTensor(new_orient)
state[15:18] = torch.FloatTensor(new_translationalVelocity)
# Get action
action = self.policy(state, True)
action = action.data.numpy()
state, reward, done, _ = self.cassie_env.step(action)
state = torch.Tensor(state)
if self.cassie_env.sim.qpos()[2] < 0.4:
passed = 0
count += 1
if passed:
save_data[0] = passed
save_data[1] = -1
else:
save_data[:] = np.array([passed, count//(self.num_steps//2), self.cassie_env.speed, orient_add,\
self.cassie_env.speed-speed_schedule[max(0, speed_ind-2)], orient_schedule[orient_ind-1]])
return self.id_num, save_data, time.time() - start_t
def eval_commands_multi(env_fn, policy, num_steps=200, num_commands=4, max_speed=3, min_speed=0, num_iters=4, num_procs=4, filename="test_eval_command.npy"):
start_t1 = time.time()
ray.init(num_cpus=num_procs)
total_data = np.zeros((num_iters, 6))
# Make all args
all_speed_schedule = np.zeros((num_iters, num_commands))
all_orient_schedule = np.zeros((num_iters, num_commands))
for i in range(num_iters):
all_speed_schedule[i, 0] = 0.5
for j in range(num_commands-1):
speed_add = random.choice([-1, 1])*random.uniform(0.4, 1.3)
if all_speed_schedule[i, j] + speed_add < min_speed or all_speed_schedule[i, j] + speed_add > max_speed:
speed_add *= -1
all_speed_schedule[i, j+1] = all_speed_schedule[i, j] + speed_add
orient_schedule = np.random.uniform(np.pi/6, np.pi/3, num_commands)
orient_sign = np.random.choice((-1, 1), num_commands)
all_orient_schedule[i, :] = orient_schedule * orient_sign
# Make and start eval workers
workers = [eval_worker.remote(i, env_fn, policy, num_steps, max_speed, min_speed) for i in range(num_procs)]
eval_ids = [workers[i].run_test.remote(all_speed_schedule[i, :], all_orient_schedule[i, :]) for i in range(num_procs)]
print("started workers")
curr_arg_ind = num_procs
curr_data_ind = 0
bar_width = 30
sys.stdout.write(progress_bar(0, num_iters, bar_width, 0))
sys.stdout.flush()
eval_start = time.time()
while curr_arg_ind < num_iters:
done_id = ray.wait(eval_ids, num_returns=1, timeout=None)[0][0]
worker_id, data, eval_time = ray.get(done_id)
total_data[curr_data_ind, :] = data
eval_ids.remove(done_id)
eval_ids.append(workers[worker_id].run_test.remote(all_speed_schedule[curr_arg_ind, :], all_orient_schedule[curr_arg_ind, :]))
curr_arg_ind += 1
curr_data_ind += 1
sys.stdout.write("\r{}".format(progress_bar(curr_data_ind, num_iters, bar_width, (time.time()-eval_start))))
sys.stdout.flush()
result = ray.get(eval_ids)
for ret_tuple in result:
total_data[curr_data_ind, :] = ret_tuple[1]
curr_data_ind += 1
sys.stdout.write("\r{}".format(progress_bar(num_iters, num_iters, bar_width, time.time()-eval_start)))
print("")
print("Got all results")
np.save(filename, total_data)
print("total time: ", time.time() - start_t1)
ray.shutdown()
def progress_bar(curr_ind, total_ind, bar_width, elapsed_time):
num_bar = int((curr_ind / total_ind) // (1/bar_width))
num_space = int(bar_width - num_bar)
outstring = "[{}]".format("-"*num_bar + " "*num_space)
outstring += " {:.2f}% complete".format(curr_ind / total_ind * 100)
if elapsed_time == 0:
time_left = "N/A"
outstring += " {:.1f} elapsed, {} left".format(elapsed_time, time_left)
else:
time_left = elapsed_time/curr_ind*(total_ind-curr_ind)
outstring += " {:.1f} elapsed, {:.1f} left".format(elapsed_time, time_left)
return outstring
def report_stats(filename):
data = np.load(filename)
num_iters = data.shape[0]
pass_rate = np.sum(data[:, 0]) / num_iters
success_inds = np.where(data[:, 0] == 1)[0]
# data[success_inds, 1] = -1
speed_fail_inds = np.where(data[:, 1] == 0)[0]
orient_fail_inds = np.where(data[:, 1] == 1)[0]
print("pass rate: ", pass_rate)
# print("speed failure: ", data[speed_fail_inds, 4])
# print("orient failure: ", data[orient_fail_inds, 5])
speed_change = data[speed_fail_inds, 4]
orient_change = data[orient_fail_inds, 5]
speed_neg_inds = np.where(speed_change < 0)
speed_pos_inds = np.where(speed_change > 0)
orient_neg_inds = np.where(orient_change < 0)
orient_pos_inds = np.where(orient_change > 0)
print("Number of speed failures: ", len(speed_fail_inds))
print("Number of orient failures: ", len(orient_fail_inds))
if len(speed_fail_inds) == 0:
avg_pos_speed = "N/A"
avg_neg_speed = "N/A"
else:
avg_pos_speed = np.mean(speed_change[speed_pos_inds])
avg_neg_speed = np.mean(speed_change[speed_neg_inds])
if len(orient_fail_inds) == 0:
avg_pos_orient = "N/A"
avg_neg_orient = "N/A"
else:
avg_pos_orient = np.mean(orient_change[orient_pos_inds])
avg_neg_orient = np.mean(orient_change[orient_neg_inds])
print("avg pos speed failure: ", avg_pos_speed)
print("avg neg speed failure: ", avg_neg_speed)
print("avg pos orient failure: ", avg_pos_orient)
print("avg neg orient failure: ", avg_neg_orient)
@torch.no_grad()
def eval_commands(cassie_env, policy, num_steps=200, num_commands=2, max_speed=3, min_speed=0, num_iters=1):
# save_data will hold whether passed or not (1 or 0), whether orient command or speed command caused failure (1, 0),
# speed and orient command at failure, and speed and orient change at failure
save_data = np.zeros((num_iters, 6))
start_t = time.time()
for j in range(num_iters):
state = torch.Tensor(cassie_env.reset_for_test())
cassie_env.speed = 0.5
cassie_env.side_speed = 0
cassie_env.phase_add = 1
speed_schedule = [0.5]
for i in range(num_commands-1):
speed_add = random.choice([-1, 1])*random.uniform(0.4, 1.3)
if speed_schedule[i] + speed_add < min_speed or speed_schedule[i] + speed_add > max_speed:
speed_add *= -1
speed_schedule.append(speed_schedule[i] + speed_add)
orient_schedule = np.random.uniform(np.pi/6, np.pi/3, num_commands)
orient_sign = np.random.choice((-1, 1), num_commands)
orient_schedule = orient_schedule * orient_sign
# print("Speed schedule: ", speed_schedule)
# print("Orient schedule: ", orient_schedule)
count = 0
orient_ind = 0
speed_ind = 1
orient_add = 0
passed = 1
while not (speed_ind == num_commands and orient_ind == num_commands and count == num_steps) and passed:
if count == num_steps:
count = 0
cassie_env.speed = speed_schedule[speed_ind]
cassie_env.speed = np.clip(cassie_env.speed, min_speed, max_speed)
if cassie_env.speed > 1.4:
cassie_env.phase_add = 1.5
else:
cassie_env.phase_add = 1
speed_ind += 1
# print("Current speed: ", cassie_env.speed, speed_ind)
elif count == num_steps // 2:
orient_add += orient_schedule[orient_ind]
orient_ind += 1
# print("Current orient add: ", orient_add, orient_ind)
# Update orientation
quaternion = euler2quat(z=orient_add, y=0, x=0)
iquaternion = inverse_quaternion(quaternion)
curr_orient = state[1:5]
curr_transvel = state[15:18]
new_orient = quaternion_product(iquaternion, curr_orient)
if new_orient[0] < 0:
new_orient = -new_orient
new_translationalVelocity = rotate_by_quaternion(curr_transvel, iquaternion)
state[1:5] = torch.FloatTensor(new_orient)
state[15:18] = torch.FloatTensor(new_translationalVelocity)
# Get action
action = policy(state, True)
action = action.data.numpy()
state, reward, done, _ = cassie_env.step(action)
state = torch.Tensor(state)
if cassie_env.sim.qpos()[2] < 0.4:
# print("Failed")
passed = 0
count += 1
if passed:
# print("passed")
save_data[j, 0] = passed
save_data[j, 1] = -1
else:
# print("didnt pass")
save_data[j, :] = np.array([passed, count//(num_steps//2), cassie_env.speed, orient_add,\
cassie_env.speed-speed_schedule[max(0, speed_ind-2)], orient_schedule[orient_ind-1]])
print("time: ", time.time() - start_t)
return save_data
def vis_commands(cassie_env, policy, num_steps=200, num_commands=4, max_speed=1, min_speed=0):
state = torch.Tensor(cassie_env.reset_for_test())
cassie_env.speed = 0.5
cassie_env.side_speed = 0
cassie_env.phase_add = 1
# orient_schedule = np.pi/4*np.arange(8)
# speed_schedule = np.random.uniform(-1.5, 1.5, 4)
speed_schedule = [0.5]
for i in range(num_commands-1):
speed_add = random.choice([-1, 1])*random.uniform(0.4, 1.3)
if speed_schedule[i] + speed_add < min_speed or speed_schedule[i] + speed_add > max_speed:
speed_add *= -1
speed_schedule.append(speed_schedule[i] + speed_add)
orient_schedule = np.random.uniform(np.pi/6, np.pi/3, num_commands)
orient_sign = np.random.choice((-1, 1), num_commands)
orient_schedule = orient_schedule * orient_sign
print("Speed schedule: ", speed_schedule)
print("Orient schedule: ", orient_schedule)
dt = 0.05
speedup = 3
count = 0
orient_ind = 0
speed_ind = 0
orient_add = 0
# print("Current orient add: ", orient_add)
render_state = cassie_env.render()
with torch.no_grad():
while render_state:
if (not cassie_env.vis.ispaused()):
# orient_add = orient_schedule[math.floor(count/num_steps)]
if count == num_steps:
count = 0
speed_ind += 1
if speed_ind >= len(speed_schedule):
print("speed Done")
exit()
cassie_env.speed = speed_schedule[speed_ind]
cassie_env.speed = np.clip(cassie_env.speed, 0, 3)
if cassie_env.speed > 1.4:
cassie_env.phase_add = 1.5
print("Current speed: ", cassie_env.speed)
elif count == num_steps // 2:
orient_ind += 1
if orient_ind >= len(orient_schedule):
print("orient Done")
exit()
orient_add += orient_schedule[orient_ind]
print("Current orient add: ", orient_add)
# Update orientation
quaternion = euler2quat(z=orient_add, y=0, x=0)
iquaternion = inverse_quaternion(quaternion)
curr_orient = state[1:5]
curr_transvel = state[15:18]
new_orient = quaternion_product(iquaternion, curr_orient)
if new_orient[0] < 0:
new_orient = -new_orient
new_translationalVelocity = rotate_by_quaternion(curr_transvel, iquaternion)
state[1:5] = torch.FloatTensor(new_orient)
state[15:18] = torch.FloatTensor(new_translationalVelocity)
# Get action
action = policy(state, True)
action = action.data.numpy()
state, reward, done, _ = cassie_env.step(action)
if cassie_env.sim.qpos()[2] < 0.4:
print("Failed")
exit()
else:
state = torch.Tensor(state)
count += 1
render_state = cassie_env.render()
time.sleep(dt / speedup)
################################
##### DEPRACATED FUNCTIONS #####
################################
@ray.remote
@torch.no_grad()
def eval_commands_worker(env_fn, policy, num_steps, num_commands, max_speed, min_speed, num_iters):
cassie_env = env_fn()
# save_data will hold whether passed or not (1 or 0), whether orient command or speed command caused failure (1, 0),
# speed and orient command at failure, and speed and orient change at failure
save_data = np.zeros((num_iters, 6))
start_t = time.time()
for j in range(num_iters):
state = torch.Tensor(cassie_env.reset_for_test())
cassie_env.speed = 0.5
cassie_env.side_speed = 0
cassie_env.phase_add = 1
speed_schedule = [0.5]
for i in range(num_commands-1):
speed_add = random.choice([-1, 1])*random.uniform(0.4, 1.3)
if speed_schedule[i] + speed_add < min_speed or speed_schedule[i] + speed_add > max_speed:
speed_add *= -1
speed_schedule.append(speed_schedule[i] + speed_add)
orient_schedule = np.random.uniform(np.pi/6, np.pi/3, num_commands)
orient_sign = np.random.choice((-1, 1), num_commands)
orient_schedule = orient_schedule * orient_sign
count = 0
orient_ind = 0
speed_ind = 1
orient_add = 0
passed = 1
while not (speed_ind == num_commands and orient_ind == num_commands and count == num_steps) and passed:
if count == num_steps:
count = 0
cassie_env.speed = speed_schedule[speed_ind]
cassie_env.speed = np.clip(cassie_env.speed, min_speed, max_speed)
if cassie_env.speed > 1.4:
cassie_env.phase_add = 1.5
else:
cassie_env.phase_add = 1
speed_ind += 1
elif count == num_steps // 2:
orient_add += orient_schedule[orient_ind]
orient_ind += 1
# Update orientation
quaternion = euler2quat(z=orient_add, y=0, x=0)
iquaternion = inverse_quaternion(quaternion)
curr_orient = state[1:5]
curr_transvel = state[15:18]
new_orient = quaternion_product(iquaternion, curr_orient)
if new_orient[0] < 0:
new_orient = -new_orient
new_translationalVelocity = rotate_by_quaternion(curr_transvel, iquaternion)
state[1:5] = torch.FloatTensor(new_orient)
state[15:18] = torch.FloatTensor(new_translationalVelocity)
# Get action
action = policy(state, True)
action = action.data.numpy()
state, reward, done, _ = cassie_env.step(action)
state = torch.Tensor(state)
if cassie_env.sim.qpos()[2] < 0.4:
passed = 0
count += 1
if passed:
save_data[j, 0] = passed
save_data[j, 1] = -1
else:
save_data[j, :] = np.array([passed, count//(num_steps//2), cassie_env.speed, orient_add,\
cassie_env.speed-speed_schedule[max(0, speed_ind-2)], orient_schedule[orient_ind-1]])
# if save_data[j, 1] == 0:
# print("speed diff: ", speed_schedule[speed_ind-1]-speed_schedule[speed_ind-2])
# print("curr speed: ", cassie_env.speed)
# print("speed schedule: ", speed_schedule)
# print("speed ind: ", speed_ind)
# print("curr schedule: ", speed_schedule[speed_ind-1])
return save_data, time.time() - start_t
# TODO: Change to create workers, then pass a single iter to each one. This way, in case a worker finishes before the others
# it can start running more iters. Can also add running stats of how many more tests to run, w/ loading bar
def eval_commands_multi_old(env_fn, policy, num_steps=200, num_commands=4, max_speed=3, min_speed=0, num_iters=4, num_procs=4, filename="test_eval_command.npy"):
start_t1 = time.time()
ray.init(num_cpus=num_procs)
result_ids = []
for i in range(num_procs):
curr_iters = num_iters // num_procs
if i == num_procs - 1: # is last process to get launched, do remaining iters if not evenly divided between procs
curr_iters = num_iters - i*curr_iters
print("curr iters: ", curr_iters)
args = (env_fn, policy, num_steps, num_commands, max_speed, min_speed, curr_iters)
print("Starting worker ", i)
result_ids.append(eval_commands_worker.remote(*args))
result = ray.get(result_ids)
# print(result)
print("Got all results")
total_data = np.concatenate([result[i][0] for i in range(num_procs)], axis=0)
# print("timings: ", [result[i][1] for i in range(num_procs)])
# print("sim timings: ", [result[i][2] for i in range(num_procs)])
# # max_force = np.concatenate(result, axis=1)
# print("total_data: ", total_data)
np.save(filename, total_data)
print("total time: ", time.time() - start_t1)
ray.shutdown() | [
"numpy.clip",
"time.sleep",
"math.cos",
"numpy.array",
"copy.deepcopy",
"ray.init",
"numpy.save",
"numpy.load",
"numpy.mean",
"numpy.where",
"sys.stdout.flush",
"random.uniform",
"random.choice",
"ray.get",
"numpy.random.choice",
"torch.Tensor",
"ray.wait",
"time.time",
"numpy.co... | [((9054, 9069), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9067, 9069), False, 'import torch\n'), ((15957, 15972), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15970, 15972), False, 'import torch\n'), ((375, 394), 'numpy.copy', 'np.copy', (['quaternion'], {}), '(quaternion)\n', (382, 394), True, 'import numpy as np\n'), ((481, 492), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (489, 492), True, 'import numpy as np\n'), ((805, 824), 'numpy.copy', 'np.copy', (['quaternion'], {}), '(quaternion)\n', (812, 824), True, 'import numpy as np\n'), ((831, 842), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (839, 842), True, 'import numpy as np\n'), ((854, 869), 'numpy.copy', 'np.copy', (['vector'], {}), '(vector)\n', (861, 869), True, 'import numpy as np\n'), ((1085, 1096), 'math.cos', 'math.cos', (['z'], {}), '(z)\n', (1093, 1096), False, 'import math\n'), ((1106, 1117), 'math.sin', 'math.sin', (['z'], {}), '(z)\n', (1114, 1117), False, 'import math\n'), ((1127, 1138), 'math.cos', 'math.cos', (['y'], {}), '(y)\n', (1135, 1138), False, 'import math\n'), ((1148, 1159), 'math.sin', 'math.sin', (['y'], {}), '(y)\n', (1156, 1159), False, 'import math\n'), ((1169, 1180), 'math.cos', 'math.cos', (['x'], {}), '(x)\n', (1177, 1180), False, 'import math\n'), ((1190, 1201), 'math.sin', 'math.sin', (['x'], {}), '(x)\n', (1198, 1201), False, 'import math\n'), ((1216, 1346), 'numpy.array', 'np.array', (['[cx * cy * cz - sx * sy * sz, cx * sy * sz + cy * cz * sx, cx * cz * sy - \n sx * cy * sz, cx * cy * sz + sx * cz * sy]'], {}), '([cx * cy * cz - sx * sy * sz, cx * sy * sz + cy * cz * sx, cx * cz *\n sy - sx * cy * sz, cx * cy * sz + sx * cz * sy])\n', (1224, 1346), True, 'import numpy as np\n'), ((1766, 1781), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1779, 1781), False, 'import torch\n'), ((4589, 4600), 'time.time', 'time.time', ([], {}), '()\n', (4598, 4600), False, 'import time\n'), ((4605, 4633), 'ray.init', 'ray.init', ([], {'num_cpus': 'num_procs'}), '(num_cpus=num_procs)\n', (4613, 4633), False, 'import ray\n'), ((4651, 4675), 'numpy.zeros', 'np.zeros', (['(num_iters, 6)'], {}), '((num_iters, 6))\n', (4659, 4675), True, 'import numpy as np\n'), ((4721, 4756), 'numpy.zeros', 'np.zeros', (['(num_iters, num_commands)'], {}), '((num_iters, num_commands))\n', (4729, 4756), True, 'import numpy as np\n'), ((4783, 4818), 'numpy.zeros', 'np.zeros', (['(num_iters, num_commands)'], {}), '((num_iters, num_commands))\n', (4791, 4818), True, 'import numpy as np\n'), ((5868, 5886), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5884, 5886), False, 'import copy, sys\n'), ((5904, 5915), 'time.time', 'time.time', ([], {}), '()\n', (5913, 5915), False, 'import time\n'), ((6506, 6523), 'ray.get', 'ray.get', (['eval_ids'], {}), '(eval_ids)\n', (6513, 6523), False, 'import ray\n'), ((6786, 6815), 'numpy.save', 'np.save', (['filename', 'total_data'], {}), '(filename, total_data)\n', (6793, 6815), True, 'import numpy as np\n'), ((6870, 6884), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (6882, 6884), False, 'import ray\n'), ((7531, 7548), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (7538, 7548), True, 'import numpy as np\n'), ((8073, 8099), 'numpy.where', 'np.where', (['(speed_change < 0)'], {}), '(speed_change < 0)\n', (8081, 8099), True, 'import numpy as np\n'), ((8121, 8147), 'numpy.where', 'np.where', (['(speed_change > 0)'], {}), '(speed_change > 0)\n', (8129, 8147), True, 'import numpy as np\n'), ((8170, 8197), 'numpy.where', 'np.where', (['(orient_change < 0)'], {}), '(orient_change < 0)\n', (8178, 8197), True, 'import numpy as np\n'), ((8220, 8247), 'numpy.where', 'np.where', (['(orient_change > 0)'], {}), '(orient_change > 0)\n', (8228, 8247), True, 'import numpy as np\n'), ((9398, 9422), 'numpy.zeros', 'np.zeros', (['(num_iters, 6)'], {}), '((num_iters, 6))\n', (9406, 9422), True, 'import numpy as np\n'), ((9437, 9448), 'time.time', 'time.time', ([], {}), '()\n', (9446, 9448), False, 'import time\n'), ((13217, 13270), 'numpy.random.uniform', 'np.random.uniform', (['(np.pi / 6)', '(np.pi / 3)', 'num_commands'], {}), '(np.pi / 6, np.pi / 3, num_commands)\n', (13234, 13270), True, 'import numpy as np\n'), ((13285, 13324), 'numpy.random.choice', 'np.random.choice', (['(-1, 1)', 'num_commands'], {}), '((-1, 1), num_commands)\n', (13301, 13324), True, 'import numpy as np\n'), ((16318, 16342), 'numpy.zeros', 'np.zeros', (['(num_iters, 6)'], {}), '((num_iters, 6))\n', (16326, 16342), True, 'import numpy as np\n'), ((16357, 16368), 'time.time', 'time.time', ([], {}), '()\n', (16366, 16368), False, 'import time\n'), ((19878, 19889), 'time.time', 'time.time', ([], {}), '()\n', (19887, 19889), False, 'import time\n'), ((19894, 19922), 'ray.init', 'ray.init', ([], {'num_cpus': 'num_procs'}), '(num_cpus=num_procs)\n', (19902, 19922), False, 'import ray\n'), ((20435, 20454), 'ray.get', 'ray.get', (['result_ids'], {}), '(result_ids)\n', (20442, 20454), False, 'import ray\n'), ((20819, 20848), 'numpy.save', 'np.save', (['filename', 'total_data'], {}), '(filename, total_data)\n', (20826, 20848), True, 'import numpy as np\n'), ((20903, 20917), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (20915, 20917), False, 'import ray\n'), ((1633, 1654), 'copy.deepcopy', 'copy.deepcopy', (['policy'], {}), '(policy)\n', (1646, 1654), False, 'import copy, sys\n'), ((1857, 1868), 'time.time', 'time.time', ([], {}), '()\n', (1866, 1868), False, 'import time\n'), ((1889, 1900), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (1897, 1900), True, 'import numpy as np\n'), ((5254, 5307), 'numpy.random.uniform', 'np.random.uniform', (['(np.pi / 6)', '(np.pi / 3)', 'num_commands'], {}), '(np.pi / 6, np.pi / 3, num_commands)\n', (5271, 5307), True, 'import numpy as np\n'), ((5326, 5365), 'numpy.random.choice', 'np.random.choice', (['(-1, 1)', 'num_commands'], {}), '((-1, 1), num_commands)\n', (5342, 5365), True, 'import numpy as np\n'), ((6061, 6077), 'ray.get', 'ray.get', (['done_id'], {}), '(done_id)\n', (6068, 6077), False, 'import ray\n'), ((6469, 6487), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6485, 6487), False, 'import copy, sys\n'), ((7595, 7613), 'numpy.sum', 'np.sum', (['data[:, 0]'], {}), '(data[:, 0])\n', (7601, 7613), True, 'import numpy as np\n'), ((7645, 7670), 'numpy.where', 'np.where', (['(data[:, 0] == 1)'], {}), '(data[:, 0] == 1)\n', (7653, 7670), True, 'import numpy as np\n'), ((7729, 7754), 'numpy.where', 'np.where', (['(data[:, 1] == 0)'], {}), '(data[:, 1] == 0)\n', (7737, 7754), True, 'import numpy as np\n'), ((7781, 7806), 'numpy.where', 'np.where', (['(data[:, 1] == 1)'], {}), '(data[:, 1] == 1)\n', (7789, 7806), True, 'import numpy as np\n'), ((8502, 8539), 'numpy.mean', 'np.mean', (['speed_change[speed_pos_inds]'], {}), '(speed_change[speed_pos_inds])\n', (8509, 8539), True, 'import numpy as np\n'), ((8564, 8601), 'numpy.mean', 'np.mean', (['speed_change[speed_neg_inds]'], {}), '(speed_change[speed_neg_inds])\n', (8571, 8601), True, 'import numpy as np\n'), ((8734, 8773), 'numpy.mean', 'np.mean', (['orient_change[orient_pos_inds]'], {}), '(orient_change[orient_pos_inds])\n', (8741, 8773), True, 'import numpy as np\n'), ((8799, 8838), 'numpy.mean', 'np.mean', (['orient_change[orient_neg_inds]'], {}), '(orient_change[orient_neg_inds])\n', (8806, 8838), True, 'import numpy as np\n'), ((10005, 10058), 'numpy.random.uniform', 'np.random.uniform', (['(np.pi / 6)', '(np.pi / 3)', 'num_commands'], {}), '(np.pi / 6, np.pi / 3, num_commands)\n', (10022, 10058), True, 'import numpy as np\n'), ((10077, 10116), 'numpy.random.choice', 'np.random.choice', (['(-1, 1)', 'num_commands'], {}), '((-1, 1), num_commands)\n', (10093, 10116), True, 'import numpy as np\n'), ((13669, 13684), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13682, 13684), False, 'import torch\n'), ((16925, 16978), 'numpy.random.uniform', 'np.random.uniform', (['(np.pi / 6)', '(np.pi / 3)', 'num_commands'], {}), '(np.pi / 6, np.pi / 3, num_commands)\n', (16942, 16978), True, 'import numpy as np\n'), ((16997, 17036), 'numpy.random.choice', 'np.random.choice', (['(-1, 1)', 'num_commands'], {}), '((-1, 1), num_commands)\n', (17013, 17036), True, 'import numpy as np\n'), ((3610, 3639), 'torch.FloatTensor', 'torch.FloatTensor', (['new_orient'], {}), '(new_orient)\n', (3627, 3639), False, 'import torch\n'), ((3667, 3711), 'torch.FloatTensor', 'torch.FloatTensor', (['new_translationalVelocity'], {}), '(new_translationalVelocity)\n', (3684, 3711), False, 'import torch\n'), ((3911, 3930), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (3923, 3930), False, 'import torch\n'), ((6842, 6853), 'time.time', 'time.time', ([], {}), '()\n', (6851, 6853), False, 'import time\n'), ((11654, 11683), 'torch.FloatTensor', 'torch.FloatTensor', (['new_orient'], {}), '(new_orient)\n', (11671, 11683), False, 'import torch\n'), ((11711, 11755), 'torch.FloatTensor', 'torch.FloatTensor', (['new_translationalVelocity'], {}), '(new_translationalVelocity)\n', (11728, 11755), False, 'import torch\n'), ((11945, 11964), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (11957, 11964), False, 'import torch\n'), ((12495, 12506), 'time.time', 'time.time', ([], {}), '()\n', (12504, 12506), False, 'import time\n'), ((12959, 12981), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (12972, 12981), False, 'import random\n'), ((12982, 13006), 'random.uniform', 'random.uniform', (['(0.4)', '(1.3)'], {}), '(0.4, 1.3)\n', (12996, 13006), False, 'import random\n'), ((15818, 15842), 'time.sleep', 'time.sleep', (['(dt / speedup)'], {}), '(dt / speedup)\n', (15828, 15842), False, 'import time\n'), ((18324, 18353), 'torch.FloatTensor', 'torch.FloatTensor', (['new_orient'], {}), '(new_orient)\n', (18341, 18353), False, 'import torch\n'), ((18381, 18425), 'torch.FloatTensor', 'torch.FloatTensor', (['new_translationalVelocity'], {}), '(new_translationalVelocity)\n', (18398, 18425), False, 'import torch\n'), ((18615, 18634), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (18627, 18634), False, 'import torch\n'), ((19445, 19456), 'time.time', 'time.time', ([], {}), '()\n', (19454, 19456), False, 'import time\n'), ((20875, 20886), 'time.time', 'time.time', ([], {}), '()\n', (20884, 20886), False, 'import time\n'), ((2566, 2628), 'numpy.clip', 'np.clip', (['self.cassie_env.speed', 'self.min_speed', 'self.max_speed'], {}), '(self.cassie_env.speed, self.min_speed, self.max_speed)\n', (2573, 2628), True, 'import numpy as np\n'), ((4393, 4404), 'time.time', 'time.time', ([], {}), '()\n', (4402, 4404), False, 'import time\n'), ((4953, 4975), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (4966, 4975), False, 'import random\n'), ((4976, 5000), 'random.uniform', 'random.uniform', (['(0.4)', '(1.3)'], {}), '(0.4, 1.3)\n', (4990, 5000), False, 'import random\n'), ((5970, 6017), 'ray.wait', 'ray.wait', (['eval_ids'], {'num_returns': '(1)', 'timeout': 'None'}), '(eval_ids, num_returns=1, timeout=None)\n', (5978, 6017), False, 'import ray\n'), ((9731, 9753), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (9744, 9753), False, 'import random\n'), ((9754, 9778), 'random.uniform', 'random.uniform', (['(0.4)', '(1.3)'], {}), '(0.4, 1.3)\n', (9768, 9778), False, 'import random\n'), ((10654, 10701), 'numpy.clip', 'np.clip', (['cassie_env.speed', 'min_speed', 'max_speed'], {}), '(cassie_env.speed, min_speed, max_speed)\n', (10661, 10701), True, 'import numpy as np\n'), ((15240, 15269), 'torch.FloatTensor', 'torch.FloatTensor', (['new_orient'], {}), '(new_orient)\n', (15257, 15269), False, 'import torch\n'), ((15301, 15345), 'torch.FloatTensor', 'torch.FloatTensor', (['new_translationalVelocity'], {}), '(new_translationalVelocity)\n', (15318, 15345), False, 'import torch\n'), ((16651, 16673), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (16664, 16673), False, 'import random\n'), ((16674, 16698), 'random.uniform', 'random.uniform', (['(0.4)', '(1.3)'], {}), '(0.4, 1.3)\n', (16688, 16698), False, 'import random\n'), ((17468, 17515), 'numpy.clip', 'np.clip', (['cassie_env.speed', 'min_speed', 'max_speed'], {}), '(cassie_env.speed, min_speed, max_speed)\n', (17475, 17515), True, 'import numpy as np\n'), ((6713, 6724), 'time.time', 'time.time', ([], {}), '()\n', (6722, 6724), False, 'import time\n'), ((14178, 14209), 'numpy.clip', 'np.clip', (['cassie_env.speed', '(0)', '(3)'], {}), '(cassie_env.speed, 0, 3)\n', (14185, 14209), True, 'import numpy as np\n'), ((15695, 15714), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (15707, 15714), False, 'import torch\n'), ((6434, 6445), 'time.time', 'time.time', ([], {}), '()\n', (6443, 6445), False, 'import time\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import subprocess
import time
from pprint import pprint
import argparse
import numpy as np
import paddle.fluid.dygraph as dg
from paddle import fluid
from visualdl import LogWriter
import utils
from parakeet.utils import io
from waveflow import WaveFlow
def add_options_to_parser(parser):
parser.add_argument(
'--model',
type=str,
default='waveflow',
help="general name of the model")
parser.add_argument(
'--name', type=str, help="specific name of the training model")
parser.add_argument(
'--root', type=str, help="root path of the LJSpeech dataset")
parser.add_argument(
'--use_gpu',
type=utils.str2bool,
default=True,
help="option to use gpu training")
parser.add_argument(
'--iteration',
type=int,
default=None,
help=("which iteration of checkpoint to load, "
"default to load the latest checkpoint"))
parser.add_argument(
'--checkpoint',
type=str,
default=None,
help="path of the checkpoint to load")
def train(config):
use_gpu = config.use_gpu
# Get the rank of the current training process.
rank = dg.parallel.Env().local_rank
nranks = dg.parallel.Env().nranks
parallel = nranks > 1
if rank == 0:
# Print the whole config setting.
pprint(vars(config))
# Make checkpoint directory.
run_dir = os.path.join("runs", config.model, config.name)
checkpoint_dir = os.path.join(run_dir, "checkpoint")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# Create tensorboard logger.
vdl = LogWriter(os.path.join(run_dir, "logs")) \
if rank == 0 else None
# Configurate device
place = fluid.CUDAPlace(rank) if use_gpu else fluid.CPUPlace()
with dg.guard(place):
# Fix random seed.
seed = config.seed
random.seed(seed)
np.random.seed(seed)
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
print("Random Seed: ", seed)
# Build model.
model = WaveFlow(config, checkpoint_dir, parallel, rank, nranks, vdl)
iteration = model.build()
while iteration < config.max_iterations:
# Run one single training step.
model.train_step(iteration)
iteration += 1
if iteration % config.test_every == 0:
# Run validation step.
model.valid_step(iteration)
if rank == 0 and iteration % config.save_every == 0:
# Save parameters.
model.save(iteration)
# Close TensorBoard.
if rank == 0:
vdl.close()
if __name__ == "__main__":
# Create parser.
parser = argparse.ArgumentParser(description="Train WaveFlow model")
#formatter_class='default_argparse')
add_options_to_parser(parser)
utils.add_config_options_to_parser(parser)
# Parse argument from both command line and yaml config file.
# For conflicting updates to the same field,
# the preceding update will be overwritten by the following one.
config = parser.parse_args()
config = io.add_yaml_config_to_args(config)
# Force to use fp32 in model training
vars(config)["use_fp16"] = False
train(config)
| [
"os.path.exists",
"argparse.ArgumentParser",
"parakeet.utils.io.add_yaml_config_to_args",
"os.makedirs",
"paddle.fluid.dygraph.guard",
"paddle.fluid.default_startup_program",
"paddle.fluid.dygraph.parallel.Env",
"os.path.join",
"utils.add_config_options_to_parser",
"paddle.fluid.CPUPlace",
"rand... | [((2076, 2123), 'os.path.join', 'os.path.join', (['"""runs"""', 'config.model', 'config.name'], {}), "('runs', config.model, config.name)\n", (2088, 2123), False, 'import os\n'), ((2145, 2180), 'os.path.join', 'os.path.join', (['run_dir', '"""checkpoint"""'], {}), "(run_dir, 'checkpoint')\n", (2157, 2180), False, 'import os\n'), ((3460, 3519), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train WaveFlow model"""'}), "(description='Train WaveFlow model')\n", (3483, 3519), False, 'import argparse\n'), ((3599, 3641), 'utils.add_config_options_to_parser', 'utils.add_config_options_to_parser', (['parser'], {}), '(parser)\n', (3633, 3641), False, 'import utils\n'), ((3874, 3908), 'parakeet.utils.io.add_yaml_config_to_args', 'io.add_yaml_config_to_args', (['config'], {}), '(config)\n', (3900, 3908), False, 'from parakeet.utils import io\n'), ((1845, 1862), 'paddle.fluid.dygraph.parallel.Env', 'dg.parallel.Env', ([], {}), '()\n', (1860, 1862), True, 'import paddle.fluid.dygraph as dg\n'), ((1887, 1904), 'paddle.fluid.dygraph.parallel.Env', 'dg.parallel.Env', ([], {}), '()\n', (1902, 1904), True, 'import paddle.fluid.dygraph as dg\n'), ((2192, 2222), 'os.path.exists', 'os.path.exists', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (2206, 2222), False, 'import os\n'), ((2232, 2259), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (2243, 2259), False, 'import os\n'), ((2418, 2439), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['rank'], {}), '(rank)\n', (2433, 2439), False, 'from paddle import fluid\n'), ((2456, 2472), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (2470, 2472), False, 'from paddle import fluid\n'), ((2483, 2498), 'paddle.fluid.dygraph.guard', 'dg.guard', (['place'], {}), '(place)\n', (2491, 2498), True, 'import paddle.fluid.dygraph as dg\n'), ((2562, 2579), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2573, 2579), False, 'import random\n'), ((2588, 2608), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2602, 2608), True, 'import numpy as np\n'), ((2801, 2862), 'waveflow.WaveFlow', 'WaveFlow', (['config', 'checkpoint_dir', 'parallel', 'rank', 'nranks', 'vdl'], {}), '(config, checkpoint_dir, parallel, rank, nranks, vdl)\n', (2809, 2862), False, 'from waveflow import WaveFlow\n'), ((2314, 2343), 'os.path.join', 'os.path.join', (['run_dir', '"""logs"""'], {}), "(run_dir, 'logs')\n", (2326, 2343), False, 'import os\n'), ((2617, 2648), 'paddle.fluid.default_startup_program', 'fluid.default_startup_program', ([], {}), '()\n', (2646, 2648), False, 'from paddle import fluid\n'), ((2676, 2704), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (2702, 2704), False, 'from paddle import fluid\n')] |
#!/usr/bin/env python3
import torch
import os
import dgl
import torch.utils.data
import numpy as np
import networkx as nx
from glob import glob
import dgl.function as fn
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# changed configuration to this instead of argparse for easier interaction
CUDA = True
SEED = 1
BATCH_SIZE = 1
LOG_INTERVAL = 10
EPOCHS = 100
NSITES = 20 #number of sites
GDIM = 1521
HDIM1 = 1014
HDIM2 = 507
ZDIMS = 169
#load dataloader instances directly into gpu memory
cuda = torch.device('cuda')
torch.manual_seed(SEED)
if CUDA:
torch.cuda.manual_seed(SEED)
kwargs = {'num_workers': 1, 'pin_memory': True} if CUDA else {}
#create customized dataset
class CustomDataset(Dataset):
def __init__(self,data_root):
self.samples = []
#self.transform = transforms.Compose([transforms.ToTensor()])
for label in os.listdir(data_root):
labels_folder = os.path.join(data_root, label)
for name in glob(os.path.join(labels_folder,'*.npy')):
self.samples.append((label,name))
print('data root: %s' % data_root)
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
label,name=self.samples[idx]
print('label is %s' % label)
print('name is %s' % name)
G = nx.MultiGraph()
#load numpy array from saved .npy file
a = np.load(name)
#reshape stacked numpy array to 2d
b = np.reshape(a, (39,39), order='C')
#convert reshaped numpy array to networkx graph
D = nx.nx.convert.to_networkx_graph(b, create_using=nx.MultiGraph)
G.add_nodes_from(D.nodes)
G.add_edges_from(D.edges)
#convert netowrkx graph to dgl graph
graph=dgl.DGLGraph()
graph.from_networkx(G)
return graph, label
#create custom collate funtion
def collate(samples):
graphs, labels = map(list, zip(*samples))
batched_graph = dgl.batch(graphs)
labels=np.asarray(labels, dtype='float')
return batched_graph, torch.Tensor(labels)
#sends a message of node feature h
msg = fn.copy_src(src='h', out='m')
def reduce(nodes):
"""Take an average over all neighbor node features hu and use it to
overwrite the original node feature."""
accum = torch.mean(nodes.mailbox['m'], 1)
return {'h': accum}
class NodeApplyModule(nn.Module):
"""Update the node feature hv with ReLU(Whv+b)."""
def __init__(self, in_feats, out_feats, activation):
super(NodeApplyModule, self).__init__()
self.linear = nn.Linear(in_feats, out_feats)
self.activation = activation
def forward(self, node):
h = self.linear(node.data['h'])
h = self.activation(h)
return {'h' : h}
class GCN(nn.Module):
def __init__(self, in_feats, out_feats, activation):
super(GCN, self).__init__()
self.apply_mod = NodeApplyModule(in_feats, out_feats, activation)
def forward(self, g, feature):
# Initialize the node features with h.
g.ndata['h'] = feature
g.update_all(msg, reduce)
g.apply_nodes(func=self.apply_mod)
return g.ndata.pop('h')
#vae using gcn
class VAE(nn.Module):
def __init__(self, g_dim, h_dim1, h_dim2, z_dim, n_classes):
super(VAE, self).__init__()
# encoder
self.fc1 = GCN(g_dim, h_dim1, F.relu)
self.fc2 = GCN(h_dim1, h_dim2, F.relu)
self.fc31 = GCN(h_dim2, z_dim, F.linear) #mu
self.fc32 = GCN(h_dim2, z_dim, F.linear) #logvar
# decoder
self.fc4 = GCN(z_dim, h_dim2, F.relu)
self.fc5 = GCN(h_dim2, h_dim1, F.relu)
self.fc6 = GCN(h_dim1, g_dim, F.sigmoid)
def encoder(self, g):
h = self.fc1(g)
h = self.fc2(h)
return self.fc31(h), self.fc32(h) # mu, log_var
def sampling(self, mu, log_var):
std = torch.exp(0.5*log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu) # return z sample
def decoder(self, z):
h = self.fc4(z)
h = self.fc5(h)
return self.fc6(h)
def forward(self, g):
h = g.in_degrees().view(-1, 1).float()
for conv in self.layers:
h = conv(g, h)
g.ndata['h'] = h
hg = dgl.mean_nodes(g, 'h')
mu, log_var = self.encoder(hg)
z = self.sampling(mu, log_var)
return self.decoder(z), mu, log_var
model = VAE(g_dim=GDIM, h_dim1= HDIM1, h_dim2=HDIM2, z_dim=ZDIMS, n_classes=NSITES)
if CUDA:
model.cuda()
#loss function
def loss_function(recon_g, g, mu, log_var):
BCE = F.binary_cross_entropy(recon_g, g.view(-1, 1521), reduction='sum')
KLD = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())
return BCE + KLD
optimizer = optim.Adam(model.parameters(), lr=1e-3)
#load data
train_dir = './data01/train/'
test_dir = './data01/test/'
trainset = CustomDataset(train_dir)
testset = CustomDataset(test_dir)
train_loader = DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate, **kwargs)
test_loader = DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=collate, **kwargs)
#train and test
def train(epoch):
model.train()
train_loss = 0
for batch_idx, (data, _) in enumerate(train_loader):
data = data.to(cuda)
optimizer.zero_grad()
recon_batch, mu, log_var = model(data)
loss = loss_function(recon_batch, data, mu, log_var)
loss.backward()
train_loss += loss.item()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item() / len(data)))
print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss / len(train_loader.dataset)))
def test(epoch):
model.eval()
test_loss= 0
with torch.no_grad():
for data, _ in test_loader:
data = data.to(cuda)
recon, mu, log_var = model(data)
test_loss += loss_function(recon, data, mu, log_var).item()
test_loss /= len(test_loader.dataset)
print('====> Test set loss: {:.4f}'.format(test_loss))
if __name__ == "__main__":
for epoch in range(1, EPOCHS + 1):
train(epoch)
test(epoch)
sample = torch.randn(BATCH_SIZE, ZDIMS)
with torch.no_grad():
sample = sample.to(cuda)
sample = model.decode(sample).cpu()
#save_image(sample.data.view(BATCH_SIZE, 2, 39, 39),
# '/home/lussier/fMRI_VQ_VAE/results/practice/dglsample_' + str(epoch) + '.png')
| [
"torch.exp",
"dgl.mean_nodes",
"os.listdir",
"numpy.reshape",
"torch.mean",
"dgl.function.copy_src",
"networkx.nx.convert.to_networkx_graph",
"numpy.asarray",
"networkx.MultiGraph",
"dgl.DGLGraph",
"torch.randn",
"torch.Tensor",
"torch.randn_like",
"torch.device",
"torch.manual_seed",
... | [((566, 586), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (578, 586), False, 'import torch\n'), ((587, 610), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (604, 610), False, 'import torch\n'), ((2233, 2262), 'dgl.function.copy_src', 'fn.copy_src', ([], {'src': '"""h"""', 'out': '"""m"""'}), "(src='h', out='m')\n", (2244, 2262), True, 'import dgl.function as fn\n'), ((5109, 5201), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)', 'collate_fn': 'collate'}), '(trainset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=\n collate, **kwargs)\n', (5119, 5201), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((5211, 5303), 'torch.utils.data.DataLoader', 'DataLoader', (['testset'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(False)', 'collate_fn': 'collate'}), '(testset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=\n collate, **kwargs)\n', (5221, 5303), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((624, 652), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['SEED'], {}), '(SEED)\n', (646, 652), False, 'import torch\n'), ((2081, 2098), 'dgl.batch', 'dgl.batch', (['graphs'], {}), '(graphs)\n', (2090, 2098), False, 'import dgl\n'), ((2110, 2143), 'numpy.asarray', 'np.asarray', (['labels'], {'dtype': '"""float"""'}), "(labels, dtype='float')\n", (2120, 2143), True, 'import numpy as np\n'), ((2411, 2444), 'torch.mean', 'torch.mean', (["nodes.mailbox['m']", '(1)'], {}), "(nodes.mailbox['m'], 1)\n", (2421, 2444), False, 'import torch\n'), ((944, 965), 'os.listdir', 'os.listdir', (['data_root'], {}), '(data_root)\n', (954, 965), False, 'import os\n'), ((1436, 1451), 'networkx.MultiGraph', 'nx.MultiGraph', ([], {}), '()\n', (1449, 1451), True, 'import networkx as nx\n'), ((1511, 1524), 'numpy.load', 'np.load', (['name'], {}), '(name)\n', (1518, 1524), True, 'import numpy as np\n'), ((1581, 1615), 'numpy.reshape', 'np.reshape', (['a', '(39, 39)'], {'order': '"""C"""'}), "(a, (39, 39), order='C')\n", (1591, 1615), True, 'import numpy as np\n'), ((1692, 1754), 'networkx.nx.convert.to_networkx_graph', 'nx.nx.convert.to_networkx_graph', (['b'], {'create_using': 'nx.MultiGraph'}), '(b, create_using=nx.MultiGraph)\n', (1723, 1754), True, 'import networkx as nx\n'), ((1883, 1897), 'dgl.DGLGraph', 'dgl.DGLGraph', ([], {}), '()\n', (1895, 1897), False, 'import dgl\n'), ((2170, 2190), 'torch.Tensor', 'torch.Tensor', (['labels'], {}), '(labels)\n', (2182, 2190), False, 'import torch\n'), ((2686, 2716), 'torch.nn.Linear', 'nn.Linear', (['in_feats', 'out_feats'], {}), '(in_feats, out_feats)\n', (2695, 2716), False, 'from torch import nn, optim\n'), ((4017, 4041), 'torch.exp', 'torch.exp', (['(0.5 * log_var)'], {}), '(0.5 * log_var)\n', (4026, 4041), False, 'import torch\n'), ((4054, 4075), 'torch.randn_like', 'torch.randn_like', (['std'], {}), '(std)\n', (4070, 4075), False, 'import torch\n'), ((4417, 4439), 'dgl.mean_nodes', 'dgl.mean_nodes', (['g', '"""h"""'], {}), "(g, 'h')\n", (4431, 4439), False, 'import dgl\n'), ((6130, 6145), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6143, 6145), False, 'import torch\n'), ((6579, 6609), 'torch.randn', 'torch.randn', (['BATCH_SIZE', 'ZDIMS'], {}), '(BATCH_SIZE, ZDIMS)\n', (6590, 6609), False, 'import torch\n'), ((1011, 1041), 'os.path.join', 'os.path.join', (['data_root', 'label'], {}), '(data_root, label)\n', (1023, 1041), False, 'import os\n'), ((6623, 6638), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6636, 6638), False, 'import torch\n'), ((1076, 1112), 'os.path.join', 'os.path.join', (['labels_folder', '"""*.npy"""'], {}), "(labels_folder, '*.npy')\n", (1088, 1112), False, 'import os\n')] |
import tempfile
import gzip
import shutil
import warnings
from itertools import chain
import numpy as np
from heparchy.data.event import ShowerData
from heparchy.utils import structure_pmu
class HepMC:
"""Returns an iterator over events in the given HepMC file.
Event data is provided as a `heparchy.data.ShowerData` object.
Parameters
----------
path : string
Location of the HepMC file.
If this file is compressed with gzip, a temporary decompressed
file will be created and cleaned up within the scope of the
context manager.
See also
--------
`heparchy.data.ShowerData` : Container for Monte-Carlo shower data.
Examples
--------
>>> with HepMC('test.hepmc.gz') as hep_f:
... for event in hep_f:
... print(event.pmu)
[( 0. , 0. , 6.49999993e+03, 6.50000000e+03)
( 1.42635177, -1.2172366 , 1.36418624e+03, 1.36418753e+03)
( 0.21473153, -0.31874408, 1.04370539e+01, 1.04441276e+01) ...
(-2.40079685, 1.56211274, 2.78633756e+00, 3.99596030e+00)
(-0.34612959, 0.37377605, 5.25064994e-01, 7.31578753e-01)
(-0.00765114, 0.00780012, -8.58527843e-03, 1.38956379e-02)]
[( ...
Notes
-----
If you wish to keep a given event in memory, you must use
`ShowerData`'s `copy()` method.
This is because the `HepMC` iterator avoids the substantial cost of
repeated object instantiation by re-using a single `ShowerData`
instance, with its efficient setter methods to update the data it
contains.
"""
import pyhepmc_ng as __hepmc
import networkx as __nx
def __init__(self, path):
from typicle import Types
self.__types = Types()
self.path = path
self.__gunz_f = None
self.data = ShowerData.empty()
# context manager
def __enter__(self):
try:
self.__buffer = self.__hepmc.open(self.path, 'r')
except UnicodeDecodeError:
self.__gunz_f = tempfile.NamedTemporaryFile()
with gzip.open(self.path, 'rb') as gz_f:
shutil.copyfileobj(gz_f, self.__gunz_f)
self.__buffer = self.__hepmc.open(self.__gunz_f.name, 'r')
except:
raise
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.__buffer.close()
if self.__gunz_f is not None:
self.__gunz_f.close()
# iterable
def __iter__(self):
self.__iter = iter(self.__buffer)
return self
def __next__(self):
# make contents of file available everywhere
self.__content = next(self.__iter)
# read in the particle data
self.data.flush_cache()
(self.data.edges,
self.data.pmu,
self.data.pdg,
self.data.final) = self.__pcl_data()
return self.data
def __pcl_data(self):
pcls = self.__content.particles
node_id = lambda obj: int(obj.id)
def pcl_data(pcl):
edge_idxs = [pcl.production_vertex, pcl.end_vertex]
edge_idxs = tuple(node_id(vtx) if vtx != None
else node_id(pcl)
for vtx in edge_idxs)
pmu, pdg, status = tuple(pcl.momentum), pcl.pid, pcl.status
return edge_idxs, pmu, pdg, status
edges, pmus, pdgs, statuses = zip(*map(pcl_data, pcls))
edges = np.fromiter(chain.from_iterable(edges), dtype=self.__types.int)
edges = edges.reshape((-1, 2))
pmu = np.array(list(pmus), dtype=self.__types.pmu[0][1])
pmu = structure_pmu(pmu)
pdg = np.fromiter(pdgs, dtype=self.__types.int)
is_leaf = np.fromiter(
map(lambda status: status == 1, statuses),
dtype=self.__types.bool
)
return edges, pmu, pdg, is_leaf
| [
"heparchy.data.event.ShowerData.empty",
"numpy.fromiter",
"shutil.copyfileobj",
"gzip.open",
"heparchy.utils.structure_pmu",
"itertools.chain.from_iterable",
"typicle.Types",
"tempfile.NamedTemporaryFile"
] | [((1731, 1738), 'typicle.Types', 'Types', ([], {}), '()\n', (1736, 1738), False, 'from typicle import Types\n'), ((1813, 1831), 'heparchy.data.event.ShowerData.empty', 'ShowerData.empty', ([], {}), '()\n', (1829, 1831), False, 'from heparchy.data.event import ShowerData\n'), ((3619, 3637), 'heparchy.utils.structure_pmu', 'structure_pmu', (['pmu'], {}), '(pmu)\n', (3632, 3637), False, 'from heparchy.utils import structure_pmu\n'), ((3652, 3693), 'numpy.fromiter', 'np.fromiter', (['pdgs'], {'dtype': 'self.__types.int'}), '(pdgs, dtype=self.__types.int)\n', (3663, 3693), True, 'import numpy as np\n'), ((3449, 3475), 'itertools.chain.from_iterable', 'chain.from_iterable', (['edges'], {}), '(edges)\n', (3468, 3475), False, 'from itertools import chain\n'), ((2017, 2046), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (2044, 2046), False, 'import tempfile\n'), ((2064, 2090), 'gzip.open', 'gzip.open', (['self.path', '"""rb"""'], {}), "(self.path, 'rb')\n", (2073, 2090), False, 'import gzip\n'), ((2116, 2155), 'shutil.copyfileobj', 'shutil.copyfileobj', (['gz_f', 'self.__gunz_f'], {}), '(gz_f, self.__gunz_f)\n', (2134, 2155), False, 'import shutil\n')] |
import matplotlib.pyplot as plt
import numpy as np
from SMYLEutils import colormap_utils as mycolors
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.util import add_cyclic_point
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
import matplotlib.ticker as mticker
from matplotlib.colors import BoundaryNorm
def contourmap_bothoceans_robinson_pos(fig, dat, lon, lat, ci, cmin, cmax, titlestr,
x1, x2, y1, y2, labels=True, cmap="blue2red", fontsize=15, centrallon=0):
""" plot a contour map of 2D data dat with coordinates lon and lat
Input:
fig = the figure identifier
dat = the data to be plotted
lon = the longitude coordinate
lat = the latitude coordinate
ci = the contour interval
cmin = the minimum of the contour range
cmax = the maximum of the contour range
titlestr = the title of the map
x1 = position of the left edge
x2 = position of the right edge
y1 = position of the bottom edge
y2 = position of the top edge
labels = True/False (ticks and labels are plotted if true)
cmap = color map (only set up for blue2red at the moment)
"""
# set up contour levels and color map
nlevs = (cmax-cmin)/ci + 1
clevs = np.arange(cmin, cmax+ci, ci)
if (cmap == "blue2red"):
mymap = mycolors.blue2red_cmap(nlevs)
if (cmap == "precip"):
mymap = mycolors.precip_cmap(nlevs)
ax = fig.add_axes([x1, y1, x2-x1, y2-y1], projection=ccrs.Robinson(central_longitude=centrallon))
ax.set_aspect('auto')
ax.add_feature(cfeature.COASTLINE)
ax.set_title(titlestr, fontsize=fontsize)
dat, lon = add_cyclic_point(dat, coord=lon)
ax.contourf(lon, lat, dat, levels=clevs, cmap = mymap, extend="max", transform=ccrs.PlateCarree())
ax.set_global()
return ax
def map_contourf_global_subplot(fig, dat, lon, lat, ci, cmin, cmax, titlestr, leftstr, rightstr,
nrow,ncol,subplot, proj, labels=True, showland=True, extend='neither', grid="latlon", cmap="blue2red", fontsize=15):
""" plot a contour map of 2D data dat with coordinates lon and lat
Input:
fig = the figure identifier
dat = the data to be plotted
lon = the longitude coordinate
lat = the latitude coordinate
ci = the contour interval:
cmin = the minimum of the contour range
cmax = the maximum of the contour range
titlestr = the title of the map
nrow = number of rows in multipanel plot
ncol = number of columns in multipanel plot
subplot = subplot number
proj = cartopy map projection to use
labels = True/False (ticks and labels are plotted if true)
showland = True/False (if False, fill over land)
grid = ('latlon','camfv','pop')
cmap = color map (only set up for blue2red at the moment)
"""
# set up contour levels and color map
nlevs = (cmax-cmin)/ci + 1
clevs = np.arange(cmin, cmax+ci, ci)
if (cmap == "blue2red"):
mymap = mycolors.blue2red_cmap(nlevs)
#mymap.set_over('pink')
#mymap.set_under('cyan')
elif (cmap == "precip"):
mymap = mycolors.precip_cmap(nlevs)
else:
mymap = cmap
ax = fig.add_subplot(nrow,ncol,subplot, projection=proj)
ax.set_aspect('auto')
ax.set_title(titlestr, fontsize=fontsize)
ax.set_title(leftstr, fontsize=fontsize,loc='left')
ax.set_title(rightstr, fontsize=fontsize,loc='right')
if showland:
ax.add_feature(cfeature.COASTLINE)
else:
ax.add_feature(cfeature.LAND, edgecolor='black',linewidth=0.1, facecolor='grey', zorder=1)
if grid=="latlon" or grid=="camfv":
dat, lon = add_cyclic_point(dat, coord=lon)
cntr = ax.contourf(lon, lat, dat,
levels=clevs,
cmap = mymap, extend=extend, transform=ccrs.PlateCarree())
elif grid=="pop":
lon, lat, dat = adjust_pop_grid(lon, lat, dat)
cntr = ax.contourf(lon, lat, dat,
levels=clevs,
cmap = mymap, extend=extend, transform=ccrs.PlateCarree())
else:
raise ValueError('ERROR: unknown grid')
ax.set_global()
return ax, cntr
def adjust_pop_grid(tlon,tlat,field):
nj = tlon.shape[0]
ni = tlon.shape[1]
xL = int(ni/2 - 1)
xR = int(xL + ni)
tlon = np.where(np.greater_equal(tlon,min(tlon[:,0])),tlon-360.,tlon)
lon = np.concatenate((tlon,tlon+360.),1)
lon = lon[:,xL:xR]
if ni == 320:
lon[367:-3,0] = lon[367:-3,0]+360.
lon = lon - 360.
lon = np.hstack((lon,lon[:,0:1]+360.))
if ni == 320:
lon[367:,-1] = lon[367:,-1] - 360.
#-- trick cartopy into doing the right thing:
# it gets confused when the cyclic coords are identical
lon[:,0] = lon[:,0]-1e-8
#-- periodicity
lat = np.concatenate((tlat,tlat),1)
lat = lat[:,xL:xR]
lat = np.hstack((lat,lat[:,0:1]))
field = np.ma.concatenate((field,field),1)
field = field[:,xL:xR]
field = np.ma.hstack((field,field[:,0:1]))
return lon,lat,field
def map_pcolor_global_subplot(fig, dat, lon, lat, ci, cmin, cmax, titlestr,
nrow,ncol,subplot, proj, labels=True, showland=True,
grid="latlon", cmap="blue2red", facecolor="white",
fontsize=15,centrallon=0,tricontour=False,cutoff=0.5):
""" plot a contour map of 2D data dat with coordinates lon and lat
Input:
fig = the figure identifier
dat = the data to be plotted
lon = the longitude coordinate
lat = the latitude coordinate
ci = the contour interval:
cmin = the minimum of the contour range
cmax = the maximum of the contour range
titlestr = the title of the map
nrow = number of rows in multipanel plot
ncol = number of columns in multipanel plot
subplot = subplot number
proj = cartopy map projection to use
labels = True/False (ticks and labels are plotted if true)
showland = True/False (if False, fill over land)
grid = ('latlon','camfv','camse','pop')
cmap = color map (only set up for blue2red at the moment)
"""
# set up contour levels and color map
nlevs = (cmax-cmin)/ci + 1
clevs = np.arange(cmin, cmax+ci, ci)
if (cmap == "blue2red"):
cmap = mycolors.blue2red_cmap(nlevs)
#mymap.set_over('pink')
#mymap.set_under('cyan')
elif (cmap == "precip"):
cmap = mycolors.precip_cmap(nlevs)
elif (cmap == "blue2red_acc"):
cmap = mycolors.blue2red_acc_cmap(clevs,cutoff)
else:
cmap = mpl.cm.get_cmap(cmap)
norm = BoundaryNorm(clevs, ncolors=cmap.N, clip=True)
ax = fig.add_subplot(nrow,ncol,subplot, projection=proj)
ax.set_aspect('auto')
ax.set_title(titlestr, fontsize=fontsize)
ax.set_facecolor(facecolor)
if showland:
ax.add_feature(cfeature.COASTLINE)
else:
ax.add_feature(cfeature.LAND, edgecolor='black',linewidth=0.1, facecolor='grey', zorder=1)
if grid=="latlon" or grid=="camfv":
dat, lon = add_cyclic_point(dat, coord=lon)
cntr = ax.pcolormesh(lon, lat, dat, shading='nearest',vmin=clevs.min(),vmax=clevs.max(), cmap = cmap, norm=norm, rasterized=True, transform=ccrs.PlateCarree())
elif grid=="camse":
tri, z = get_refined_triang(lon,lat, dat)
cntr = ax.tripcolor(tri, z, shading='flat',vmin=clevs.min(),vmax=clevs.max(), cmap = cmap, norm=norm, rasterized=True, transform=ccrs.PlateCarree())
elif grid=="pop":
lon, lat, dat = adjust_pop_grid(lon, lat, dat)
cntr = ax.pcolormesh(lon, lat, dat, shading='nearest',vmin=clevs.min(),vmax=clevs.max(), cmap = cmap, norm=norm, rasterized=True, transform=ccrs.PlateCarree())
else:
raise ValueError('ERROR: unknown grid')
# ax.set_global()
return ax,cntr
def map_pvalsig_global_subplot(axis, pvals, lon, lat, siglvl,
facecolor='none', edgecolor='k',s=10,marker="."):
""" scatterplot of 2D pvals with coordinates lon and lat on top of axis.
Input:
fig = the figure identifier
dat = the data to be plotted
lon = the longitude coordinate
lat = the latitude coordinate
siglvl = plot dots anywhere below this significance level
"""
lon2d,lat2d = np.meshgrid(lon, lat)
tmplon = np.where(pvals>siglvl,lon2d,np.nan)
tmplat = np.where(pvals>siglvl,lat2d,np.nan)
axis.scatter(tmplon,tmplat,facecolor=facecolor, edgecolor=edgecolor,s=s,marker=marker)
| [
"numpy.hstack",
"numpy.where",
"SMYLEutils.colormap_utils.blue2red_cmap",
"cartopy.crs.PlateCarree",
"numpy.ma.hstack",
"matplotlib.colors.BoundaryNorm",
"numpy.concatenate",
"SMYLEutils.colormap_utils.blue2red_acc_cmap",
"numpy.meshgrid",
"numpy.ma.concatenate",
"cartopy.util.add_cyclic_point",... | [((1391, 1421), 'numpy.arange', 'np.arange', (['cmin', '(cmax + ci)', 'ci'], {}), '(cmin, cmax + ci, ci)\n', (1400, 1421), True, 'import numpy as np\n'), ((1799, 1831), 'cartopy.util.add_cyclic_point', 'add_cyclic_point', (['dat'], {'coord': 'lon'}), '(dat, coord=lon)\n', (1815, 1831), False, 'from cartopy.util import add_cyclic_point\n'), ((3196, 3226), 'numpy.arange', 'np.arange', (['cmin', '(cmax + ci)', 'ci'], {}), '(cmin, cmax + ci, ci)\n', (3205, 3226), True, 'import numpy as np\n'), ((4769, 4808), 'numpy.concatenate', 'np.concatenate', (['(tlon, tlon + 360.0)', '(1)'], {}), '((tlon, tlon + 360.0), 1)\n', (4783, 4808), True, 'import numpy as np\n'), ((4920, 4957), 'numpy.hstack', 'np.hstack', (['(lon, lon[:, 0:1] + 360.0)'], {}), '((lon, lon[:, 0:1] + 360.0))\n', (4929, 4957), True, 'import numpy as np\n'), ((5192, 5223), 'numpy.concatenate', 'np.concatenate', (['(tlat, tlat)', '(1)'], {}), '((tlat, tlat), 1)\n', (5206, 5223), True, 'import numpy as np\n'), ((5255, 5284), 'numpy.hstack', 'np.hstack', (['(lat, lat[:, 0:1])'], {}), '((lat, lat[:, 0:1]))\n', (5264, 5284), True, 'import numpy as np\n'), ((5296, 5332), 'numpy.ma.concatenate', 'np.ma.concatenate', (['(field, field)', '(1)'], {}), '((field, field), 1)\n', (5313, 5332), True, 'import numpy as np\n'), ((5370, 5406), 'numpy.ma.hstack', 'np.ma.hstack', (['(field, field[:, 0:1])'], {}), '((field, field[:, 0:1]))\n', (5382, 5406), True, 'import numpy as np\n'), ((6774, 6804), 'numpy.arange', 'np.arange', (['cmin', '(cmax + ci)', 'ci'], {}), '(cmin, cmax + ci, ci)\n', (6783, 6804), True, 'import numpy as np\n'), ((7169, 7215), 'matplotlib.colors.BoundaryNorm', 'BoundaryNorm', (['clevs'], {'ncolors': 'cmap.N', 'clip': '(True)'}), '(clevs, ncolors=cmap.N, clip=True)\n', (7181, 7215), False, 'from matplotlib.colors import BoundaryNorm\n'), ((8933, 8954), 'numpy.meshgrid', 'np.meshgrid', (['lon', 'lat'], {}), '(lon, lat)\n', (8944, 8954), True, 'import numpy as np\n'), ((8968, 9007), 'numpy.where', 'np.where', (['(pvals > siglvl)', 'lon2d', 'np.nan'], {}), '(pvals > siglvl, lon2d, np.nan)\n', (8976, 9007), True, 'import numpy as np\n'), ((9017, 9056), 'numpy.where', 'np.where', (['(pvals > siglvl)', 'lat2d', 'np.nan'], {}), '(pvals > siglvl, lat2d, np.nan)\n', (9025, 9056), True, 'import numpy as np\n'), ((1466, 1495), 'SMYLEutils.colormap_utils.blue2red_cmap', 'mycolors.blue2red_cmap', (['nlevs'], {}), '(nlevs)\n', (1488, 1495), True, 'from SMYLEutils import colormap_utils as mycolors\n'), ((1540, 1567), 'SMYLEutils.colormap_utils.precip_cmap', 'mycolors.precip_cmap', (['nlevs'], {}), '(nlevs)\n', (1560, 1567), True, 'from SMYLEutils import colormap_utils as mycolors\n'), ((3271, 3300), 'SMYLEutils.colormap_utils.blue2red_cmap', 'mycolors.blue2red_cmap', (['nlevs'], {}), '(nlevs)\n', (3293, 3300), True, 'from SMYLEutils import colormap_utils as mycolors\n'), ((3957, 3989), 'cartopy.util.add_cyclic_point', 'add_cyclic_point', (['dat'], {'coord': 'lon'}), '(dat, coord=lon)\n', (3973, 3989), False, 'from cartopy.util import add_cyclic_point\n'), ((6848, 6877), 'SMYLEutils.colormap_utils.blue2red_cmap', 'mycolors.blue2red_cmap', (['nlevs'], {}), '(nlevs)\n', (6870, 6877), True, 'from SMYLEutils import colormap_utils as mycolors\n'), ((7612, 7644), 'cartopy.util.add_cyclic_point', 'add_cyclic_point', (['dat'], {'coord': 'lon'}), '(dat, coord=lon)\n', (7628, 7644), False, 'from cartopy.util import add_cyclic_point\n'), ((1626, 1669), 'cartopy.crs.Robinson', 'ccrs.Robinson', ([], {'central_longitude': 'centrallon'}), '(central_longitude=centrallon)\n', (1639, 1669), True, 'import cartopy.crs as ccrs\n'), ((1915, 1933), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1931, 1933), True, 'import cartopy.crs as ccrs\n'), ((3411, 3438), 'SMYLEutils.colormap_utils.precip_cmap', 'mycolors.precip_cmap', (['nlevs'], {}), '(nlevs)\n', (3431, 3438), True, 'from SMYLEutils import colormap_utils as mycolors\n'), ((6987, 7014), 'SMYLEutils.colormap_utils.precip_cmap', 'mycolors.precip_cmap', (['nlevs'], {}), '(nlevs)\n', (7007, 7014), True, 'from SMYLEutils import colormap_utils as mycolors\n'), ((4150, 4168), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (4166, 4168), True, 'import cartopy.crs as ccrs\n'), ((7065, 7106), 'SMYLEutils.colormap_utils.blue2red_acc_cmap', 'mycolors.blue2red_acc_cmap', (['clevs', 'cutoff'], {}), '(clevs, cutoff)\n', (7091, 7106), True, 'from SMYLEutils import colormap_utils as mycolors\n'), ((7794, 7812), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (7810, 7812), True, 'import cartopy.crs as ccrs\n'), ((4417, 4435), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (4433, 4435), True, 'import cartopy.crs as ccrs\n'), ((8035, 8053), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (8051, 8053), True, 'import cartopy.crs as ccrs\n'), ((8289, 8307), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (8305, 8307), True, 'import cartopy.crs as ccrs\n')] |
import numpy as np
import scipy.constants as const
import scipy.interpolate as si
import sys
import voigt
"""
This file contains functions related to atmospheric extinction.
extinction(): Function to calculate the extinction coefficients.
resamp(): Function to resample an array at different values.
downsamp(): Function to downsample an array to a lower resolution.
"""
def extinction(nu, P, T, molmass, gf, Elow, dia, dias, Z, atm, osamp,
molind, ratio):
"""
This function calculates the extinction coefficients.
Inputs
------
nu : float. Central wavenumber of the line (in cm-1).
P : array of floats. Pressure of layer in the atmosphere in cgs units.
T : array of floats. Temperature of layer in the atmosphere.
molmass: float. Mass of the line-producing species.
gf : float. Weighted oscillator strength of the line.
Z : array of floats. Partition function.
Elow : float. Energy of the lower state.
dia : float. Diameter of the molecule.
dias : array. Diameters of each molecule in the atmosphere.
atm : array. Contains information of layer in atmospheric file. Format
is [[abundance1, molarmass1], [abundance2, molarmass2], etc]
osamp : int. Oversampling factor for the Voigt profile.
molind : int. Index of line-producing molecule in `atm` array.
ratio : float. Ratio of isotope of line-producing molecule.
Outputs
-------
K : array of floats. Broadened opacity spectrum.
nurng: array of floats. Wavenumbers associated with `K`.
"""
# Constants in CGS
c = const.c * 100.
e = const.e * 10. * const.c #Coulomb -> statC conv
me = const.m_e * 1000.
R = const.R * 1e7
h = const.h * 1e7
k = const.k * 1e7
amu = const.physical_constants['atomic mass constant'][0] * 1000.
Nava = const.N_A
# Molecule information:
mass = molmass * amu # mass in g
nd = P / (k * T) # number density in cm-3
# Gaussian HWHM: Goody (1995), Cubillos et al. BART paper eqn 21
alpha = nu / c * (2. * np.log(2.) * k * T / mass)**0.5
# Lorentzian HWHM: Goody (1995), Cubillos et al. BART paper eqn 22
gamma = np.sum((dia/2. + dias/2.)**2 * \
(2. * k * T / np.pi)**0.5 / c * \
nd * atm[:,0] * \
(1./mass + 1./(atm[:,1] * amu))**0.5)
# Generate range of values to calculate the Voigt profile at
if (alpha >= gamma):
nurng = np.linspace(nu - 20.*alpha, nu + 20.*alpha, num=osamp)
elif (gamma > alpha):
nurng = np.linspace(nu - 20.*gamma, nu + 20.*gamma, num=osamp)
# Calculate the broadened opacity, Cubillos et al. BART paper eqns 19, 20
K = const.pi * e**2 / c**2 / me * \
nd * ratio * \
gf / Z * np.exp(-h*c*Elow/k/T) * \
(1 - np.exp(-h*c*nu/k/T)) * \
voigt.V(nurng, alpha, gamma, shift=nu)
# Divide out the density
K /= (P * molmass / k / T / Nava)
return K, nurng
def resamp(K, nurng, nuspec, shift=False):
"""
This function takes the computed opacity from extinction() and resamples it
to the desired resolution.
Note that this function shifts the peak of the opacity spectrum to coincide
with the nearest value in `nuspec`
Inputs
------
K : array of floats. Broadened opacity spectrum.
nurng : array of floats. Wavenumbers associated with `K`.
nuspec: array of floats. Wavenumbers of the desired output spectrum.
shift : bool. Determines whether line peaks should be shifted
to a `nuspec` value.
Outputs
-------
Kspec: array of floats. `K` resampled to correspond to `nuspec`.
"""
# Make cubic spline w/ 0 outside `nurng`, evaluate at `nuspec`
opa = si.interp1d(nurng-shift, K, bounds_error=False, fill_value=0)
Kspec = opa(nuspec)
return Kspec
def downsamp(K, nurng, nuspec):
"""
This function takes the computed opacity from extinction() and downsamples
it to the desired resolution.
Inputs
------
K : array of floats. Broadened opacity spectrum.
nurng : array of floats. Wavenumbers associated with `K`.
nuspec: array of floats. Wavenumbers of the desired output spectrum.
Outputs
-------
Kspec: array of floats. `K` resampled to correspond to `nuspec`.
"""
# Find indices where nurng fits into nuspec
Kspec = np.zeros(nuspec.shape)
inds = np.digitize(nurng, nuspec)
m = 0
for wn in inds:
if wn > 0 and wn < len(nuspec):
Kspec[wn] += K[m]
m += 1
return Kspec
| [
"voigt.V",
"numpy.digitize",
"numpy.log",
"scipy.interpolate.interp1d",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.linspace"
] | [((2288, 2429), 'numpy.sum', 'np.sum', (['((dia / 2.0 + dias / 2.0) ** 2 * (2.0 * k * T / np.pi) ** 0.5 / c * nd *\n atm[:, 0] * (1.0 / mass + 1.0 / (atm[:, 1] * amu)) ** 0.5)'], {}), '((dia / 2.0 + dias / 2.0) ** 2 * (2.0 * k * T / np.pi) ** 0.5 / c *\n nd * atm[:, 0] * (1.0 / mass + 1.0 / (atm[:, 1] * amu)) ** 0.5)\n', (2294, 2429), True, 'import numpy as np\n'), ((4042, 4105), 'scipy.interpolate.interp1d', 'si.interp1d', (['(nurng - shift)', 'K'], {'bounds_error': '(False)', 'fill_value': '(0)'}), '(nurng - shift, K, bounds_error=False, fill_value=0)\n', (4053, 4105), True, 'import scipy.interpolate as si\n'), ((4679, 4701), 'numpy.zeros', 'np.zeros', (['nuspec.shape'], {}), '(nuspec.shape)\n', (4687, 4701), True, 'import numpy as np\n'), ((4714, 4740), 'numpy.digitize', 'np.digitize', (['nurng', 'nuspec'], {}), '(nurng, nuspec)\n', (4725, 4740), True, 'import numpy as np\n'), ((2643, 2703), 'numpy.linspace', 'np.linspace', (['(nu - 20.0 * alpha)', '(nu + 20.0 * alpha)'], {'num': 'osamp'}), '(nu - 20.0 * alpha, nu + 20.0 * alpha, num=osamp)\n', (2654, 2703), True, 'import numpy as np\n'), ((3103, 3141), 'voigt.V', 'voigt.V', (['nurng', 'alpha', 'gamma'], {'shift': 'nu'}), '(nurng, alpha, gamma, shift=nu)\n', (3110, 3141), False, 'import voigt\n'), ((2741, 2801), 'numpy.linspace', 'np.linspace', (['(nu - 20.0 * gamma)', '(nu + 20.0 * gamma)'], {'num': 'osamp'}), '(nu - 20.0 * gamma, nu + 20.0 * gamma, num=osamp)\n', (2752, 2801), True, 'import numpy as np\n'), ((3002, 3031), 'numpy.exp', 'np.exp', (['(-h * c * Elow / k / T)'], {}), '(-h * c * Elow / k / T)\n', (3008, 3031), True, 'import numpy as np\n'), ((3053, 3080), 'numpy.exp', 'np.exp', (['(-h * c * nu / k / T)'], {}), '(-h * c * nu / k / T)\n', (3059, 3080), True, 'import numpy as np\n'), ((2172, 2183), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (2178, 2183), True, 'import numpy as np\n')] |
"""
@Author : TeJas.Lotankar
Description:
------------
Various filters to apply on image for applying changes
- Blur filter
- Edge Filter
-
//TODO Add More filters as per necessity
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
import os
import streamlit as st
from PIL import Image
def blur_filter_img(img_obj, kernel_size=5):
"""
Description:
------------
Blur filter.
Blurs the edges and grains in the image, can be used for reducing noise.
Params:
-------
img_path : str
path to the image to be processed
kernel_size : int (Default : 5)
size of the kernel to use for applying filter on the image.
Returns:
--------
Processed image
"""
#Blur filter, AKA box filter
#Normalization
img = img_obj
kernel = (kernel_size, kernel_size)
filteredImg = cv2.blur(img, kernel)
return filteredImg
#--------------------------------------------
# def laplacian_filter_img(img_obj, kernel_size=7):
# """
# Description:
# ------------
# Edge detection filter, laplacian filter used after Guassian blur
# Params:
# -------
# img_path : str
# path to the image to be processed
# kernel_size : int (Default : 7)
# size of the kernel to use for applying filter on the image.
# Returns:
# --------
# Processed image
# """
# #Edge detection filter, laplacian filter used after Guassian blur
# img = img_obj
# kernel = (kernel_size, kernel_size)
# blur = cv2.GaussianBlur(img,kernel,0)
# filteredImg = cv2.Laplacian(blur,cv2.CV_64F, ksize=kernel_size)
# filteredImg = ((filteredImg/filteredImg.max())*255).astype(np.uint8)
# return filteredImg
def canny_edge_filter_img(img_obj, minVal=100, maxVal=200):
"""
Description:
------------
Edge detection filter, Canny Edge filter is used.
Params:
-------
img_path : str
path to the image to be processed
minVal : int (Default : 100)
Minimum threshold value.
maxVal : int (Default : 200)
Maximum threshold value.
Returns:
--------
Processed image
"""
filteredImg = cv2.Canny(img_obj, minVal, maxVal)
return filteredImg
def filtering_app():
st.header("Filtering Techniques")
st.markdown("> An image filter is a technique through which size, colors, \
shading and other characteristics of an image are altered. \
An image filter is used to transform the image using different graphical editing techniques.")
st.markdown("---")
inp_img = st.file_uploader("Upload your image, (jpg, png)", type=['jpg','png', 'jpeg'])
if inp_img:
img_pil = Image.open(inp_img)
img_cv2 = np.array(img_pil)
st.image(img_pil, caption = "Original Image", use_column_width=True)
st.markdown("---")
filter_type = st.selectbox("Select Filter Type",
("Blur_Filter", "Canny_Edge_Filter")) #, "Laplacian_Filter"))
if filter_type == "Blur_Filter":
kernel_size = st.slider("kernel Size", 1, 11, 5, 1)
out_img = blur_filter_img(img_cv2, kernel_size)
out_img_pil = Image.fromarray(out_img)
st.subheader("Filtered Image : Blur/Box Filter")
st.image(out_img_pil, caption = "Image after Filtering", use_column_width=True)
# elif filter_type == "Laplacian_Filter":
# st.info("Kernel value should be odd")
# kernel_size = st.slider("kernel Size", 1, 11, 7, 1)
# out_img = edge_filter_img(img_cv2, kernel_size)
# out_img_pil = Image.fromarray(out_img)
# st.subheader("Filtered Image : Laplacian Filter")
# st.image(out_img_pil, caption = "Image after Filtering", use_column_width=True)
elif filter_type == "Canny_Edge_Filter":
minVal = st.slider("Minimum Thresold Value", 1, 300, 100, 1)
maxVal = st.slider("Maximum Thresold Value", 1, 300, 200, 1)
out_img = canny_edge_filter_img(img_cv2, minVal, maxVal)
out_img_pil = Image.fromarray(out_img)
st.subheader("Filtered Image : Canny Edge Filter")
st.image(out_img_pil, caption = "Image after Filtering", use_column_width=True) | [
"streamlit.markdown",
"PIL.Image.open",
"streamlit.image",
"PIL.Image.fromarray",
"streamlit.file_uploader",
"streamlit.slider",
"numpy.array",
"streamlit.subheader",
"streamlit.selectbox",
"cv2.Canny",
"streamlit.header",
"cv2.blur"
] | [((818, 839), 'cv2.blur', 'cv2.blur', (['img', 'kernel'], {}), '(img, kernel)\n', (826, 839), False, 'import cv2\n'), ((2025, 2059), 'cv2.Canny', 'cv2.Canny', (['img_obj', 'minVal', 'maxVal'], {}), '(img_obj, minVal, maxVal)\n', (2034, 2059), False, 'import cv2\n'), ((2106, 2139), 'streamlit.header', 'st.header', (['"""Filtering Techniques"""'], {}), "('Filtering Techniques')\n", (2115, 2139), True, 'import streamlit as st\n'), ((2141, 2386), 'streamlit.markdown', 'st.markdown', (['"""> An image filter is a technique through which size, colors, \t\tshading and other characteristics of an image are altered. \t\tAn image filter is used to transform the image using different graphical editing techniques."""'], {}), "(\n '> An image filter is a technique through which size, colors, \\t\\tshading and other characteristics of an image are altered. \\t\\tAn image filter is used to transform the image using different graphical editing techniques.'\n )\n", (2152, 2386), True, 'import streamlit as st\n'), ((2378, 2396), 'streamlit.markdown', 'st.markdown', (['"""---"""'], {}), "('---')\n", (2389, 2396), True, 'import streamlit as st\n'), ((2409, 2487), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload your image, (jpg, png)"""'], {'type': "['jpg', 'png', 'jpeg']"}), "('Upload your image, (jpg, png)', type=['jpg', 'png', 'jpeg'])\n", (2425, 2487), True, 'import streamlit as st\n'), ((2513, 2532), 'PIL.Image.open', 'Image.open', (['inp_img'], {}), '(inp_img)\n', (2523, 2532), False, 'from PIL import Image\n'), ((2545, 2562), 'numpy.array', 'np.array', (['img_pil'], {}), '(img_pil)\n', (2553, 2562), True, 'import numpy as np\n'), ((2568, 2634), 'streamlit.image', 'st.image', (['img_pil'], {'caption': '"""Original Image"""', 'use_column_width': '(True)'}), "(img_pil, caption='Original Image', use_column_width=True)\n", (2576, 2634), True, 'import streamlit as st\n'), ((2639, 2657), 'streamlit.markdown', 'st.markdown', (['"""---"""'], {}), "('---')\n", (2650, 2657), True, 'import streamlit as st\n'), ((2675, 2747), 'streamlit.selectbox', 'st.selectbox', (['"""Select Filter Type"""', "('Blur_Filter', 'Canny_Edge_Filter')"], {}), "('Select Filter Type', ('Blur_Filter', 'Canny_Edge_Filter'))\n", (2687, 2747), True, 'import streamlit as st\n'), ((2828, 2865), 'streamlit.slider', 'st.slider', (['"""kernel Size"""', '(1)', '(11)', '(5)', '(1)'], {}), "('kernel Size', 1, 11, 5, 1)\n", (2837, 2865), True, 'import streamlit as st\n'), ((2934, 2958), 'PIL.Image.fromarray', 'Image.fromarray', (['out_img'], {}), '(out_img)\n', (2949, 2958), False, 'from PIL import Image\n'), ((2966, 3014), 'streamlit.subheader', 'st.subheader', (['"""Filtered Image : Blur/Box Filter"""'], {}), "('Filtered Image : Blur/Box Filter')\n", (2978, 3014), True, 'import streamlit as st\n'), ((3018, 3095), 'streamlit.image', 'st.image', (['out_img_pil'], {'caption': '"""Image after Filtering"""', 'use_column_width': '(True)'}), "(out_img_pil, caption='Image after Filtering', use_column_width=True)\n", (3026, 3095), True, 'import streamlit as st\n'), ((3540, 3591), 'streamlit.slider', 'st.slider', (['"""Minimum Thresold Value"""', '(1)', '(300)', '(100)', '(1)'], {}), "('Minimum Thresold Value', 1, 300, 100, 1)\n", (3549, 3591), True, 'import streamlit as st\n'), ((3604, 3655), 'streamlit.slider', 'st.slider', (['"""Maximum Thresold Value"""', '(1)', '(300)', '(200)', '(1)'], {}), "('Maximum Thresold Value', 1, 300, 200, 1)\n", (3613, 3655), True, 'import streamlit as st\n'), ((3733, 3757), 'PIL.Image.fromarray', 'Image.fromarray', (['out_img'], {}), '(out_img)\n', (3748, 3757), False, 'from PIL import Image\n'), ((3765, 3815), 'streamlit.subheader', 'st.subheader', (['"""Filtered Image : Canny Edge Filter"""'], {}), "('Filtered Image : Canny Edge Filter')\n", (3777, 3815), True, 'import streamlit as st\n'), ((3819, 3896), 'streamlit.image', 'st.image', (['out_img_pil'], {'caption': '"""Image after Filtering"""', 'use_column_width': '(True)'}), "(out_img_pil, caption='Image after Filtering', use_column_width=True)\n", (3827, 3896), True, 'import streamlit as st\n')] |
# -*- coding: utf-8 -*-
"""
"""
# import standard libraries
import os
import subprocess
from itertools import product
from math import ceil
# import third-party libraries
import numpy as np
import cv2
from colour import RGB_to_RGB, RGB_COLOURSPACES, RGB_to_XYZ, XYZ_to_xyY
# import my libraries
import test_pattern_generator2 as tpg
import ty_utility as util
import font_control as fc
import color_space as cs
import transfer_functions as tf
import plot_utility as pu
from create_gamut_booundary_lut import make_jzazbz_gb_lut_fname_method_c,\
TyLchLut
from jzazbz import jzczhz_to_jzazbz
from ty_utility import add_suffix_to_filename
# information
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2021 - <NAME>'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = '<NAME>'
__email__ = 'toru.ver.11 at-sign gmail.com'
__all__ = []
def modify_to_n_times(x, n):
if x % n != 0:
y = int(ceil(x / n)) * n
else:
y = x
return y
def dot_mesh_pattern(
width_org=1920, height_org=1080, dot_size=2, color=[1, 1, 0]):
width = modify_to_n_times(width_org, dot_size * 2)
height = modify_to_n_times(height_org, dot_size * 2)
img = np.ones((height, width, 3)) * np.array(color)
fname = f"./img/{width_org}x{height_org}_dotsize-{dot_size}_rgbmask-"
fname += f"{color[0]}{color[1]}{color[2]}.png"
zero_idx_h = ((np.arange(width) // (2**(dot_size-1))) % 2) == 0
idx_even = np.hstack([zero_idx_h for x in range(dot_size)])
idx_odd = np.hstack([~zero_idx_h for x in range(dot_size)])
idx_even_odd = np.hstack([idx_even, idx_odd])
idx_all_line = np.tile(
idx_even_odd, height//(2 * dot_size)).reshape(height, width)
img[idx_all_line] = 0
img = img[:height_org, :width_org]
print(fname)
tpg.img_wirte_float_as_16bit_int(fname, img)
fname_icc = util.add_suffix_to_filename(fname=fname, suffix="_with_icc")
icc_profile = './icc_profile/Gamma2.4_DCI-P3_D65.icc'
cmd = ['convert', fname, '-profile', icc_profile, fname_icc]
subprocess.run(cmd)
def create_dot_pattern():
resolution_list = [[2778, 1284], [2532, 1170]]
dot_size_list = [1, 2]
color_list = [[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 1]]
for resolution, dot_size, color in product(
resolution_list, dot_size_list, color_list):
width = resolution[0]
height = resolution[1]
dot_mesh_pattern(
width_org=width, height_org=height, dot_size=dot_size, color=color)
def create_abl_check_pattern(width_panel=2778, height_panel=1284):
fps = 60
sec = 8
frame = fps * sec
width_total = 1920
height_total = 1080
width = width_total
height = int(round(height_panel/width_panel * width))
for idx in range(frame):
rate = (np.sin(np.pi/(frame - 1)*idx - np.pi/2) + 1) / 2
img = np.zeros((height_total, width_total, 3))
local_width = int(round(width * rate))
local_height = int(round(height * rate))
st_pos = (
(width_total//2) - (local_width//2),
(height_total//2) - (local_height//2))
ed_pos = (st_pos[0]+local_width, st_pos[1]+local_height)
cv2.rectangle(img, st_pos, ed_pos, (1.0, 1.0, 1.0), -1)
percent = (local_width * local_height)\
/ (width * height) * 100
text_drawer = fc.TextDrawer(
img, text=f"{percent:.02f}%",
pos=(int(width_total*0.04), int(width_total*0.08)),
font_color=(0.25, 0.25, 0.25), font_size=30)
text_drawer.draw()
fname = "/work/overuse/2021/00_iphone_movie/img_seq/"
fname += f"iPhone_abl_{width_panel}x{height_panel}_{idx:04d}.png"
print(fname)
tpg.img_wirte_float_as_16bit_int(fname, img)
def create_patch_specific_area(
panel_width=2778, panel_height=1284, area_rate=20.0,
color_st2084=[0.8, 0.5, 0.2], luminance=1000, src_cs=cs.BT709):
width = 1920
height = 1080
tf_str = tf.ST2084
img = np.zeros((height, width, 3))
width_vertual = panel_width/panel_height*height
height_vertual = height
block_size = int(
round((area_rate/100 * width_vertual * height_vertual) ** 0.5))
st_pos = ((width//2) - (block_size//2), (height//2) - (block_size//2))
ed_pos = (st_pos[0]+block_size, st_pos[1]+block_size)
color_with_lumiannce = calc_linear_color_from_primary(
color=color_st2084, luminance=luminance)
cv2.rectangle(img, st_pos, ed_pos, color_with_lumiannce, -1)
large_xyz = RGB_to_XYZ(
color_with_lumiannce, cs.D65, cs.D65,
RGB_COLOURSPACES[src_cs].matrix_RGB_to_XYZ)
xyY = XYZ_to_xyY(large_xyz)
img = tf.oetf(np.clip(img, 0.0, 1.0), tf_str)
text = f"for_{panel_width}x{panel_height}, {src_cs}, "
text += f"xyY=({xyY[0]:.03f}, "
text += f"{xyY[1]:.03f}, {xyY[2]*10000:.1f})"
text_drawer = fc.TextDrawer(
img, text=text, pos=(10, 10),
font_color=(0.25, 0.25, 0.25), font_size=20)
text_drawer.draw()
fname = f"./img/iPhone13_color_patch_for_{panel_width}x{panel_height}_"
fname += f"{src_cs}_"
fname += f"rgb_{color_with_lumiannce[0]:.2f}-"
fname += f"{color_with_lumiannce[1]:.2f}-"
fname += f"{color_with_lumiannce[2]:.2f}_{tf_str}_{luminance}-nits.png"
print(fname)
tpg.img_wirte_float_as_16bit_int(fname, img)
def calc_linear_color_from_primary(color=[1, 0, 0], luminance=1000):
"""
supported transfer characteristics is ST2084 only.
"""
color_luminance\
= tf.eotf(np.array(color), tf.ST2084)\
/ tf.PEAK_LUMINANCE[tf.ST2084] * luminance
return color_luminance
def create_iphone_13_primary_patch(area_rate=0.4*100):
color_list = [[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 1]]
resolution_list = [[2778, 1284], [2532, 1170]]
luminance_list = [100, 1000, 4000, 10000]
src_cs_list = [cs.BT709, cs.P3_D65, cs.BT2020]
for resolution, luminance, color, src_cs in product(
resolution_list, luminance_list, color_list, src_cs_list):
width = resolution[0]
height = resolution[1]
create_patch_specific_area(
panel_width=width, panel_height=height, area_rate=area_rate,
color_st2084=color, luminance=luminance, src_cs=src_cs)
def conv_img_from_bt2020_to_bt709_using_3x3_matrix():
# in_fname = "./img/bt2020_bt709_hue_chroma_1920x1080_h_num-32.png"
in_fname = "./img/iPhone13_color_patch_for_2778x1284_P3-D65-on-ITU-R "
in_fname += "BT.2020_rgb_0.10-0.00-0.00_SMPTE ST2084_1000-nits.png"
out_fname = util.add_suffix_to_filename(
fname=in_fname, suffix="_bt709_with_matrix")
tf_str = tf.ST2084
img_non_linear = tpg.img_read_as_float(in_fname)
img_linear_2020 = tf.eotf(img_non_linear, tf_str)
img_linear_709 = RGB_to_RGB(
RGB=img_linear_2020,
input_colourspace=RGB_COLOURSPACES[cs.BT2020],
output_colourspace=RGB_COLOURSPACES[cs.P3_D65])
img_non_linear_709 = tf.oetf(np.clip(img_linear_709, 0.0, 1.0), tf_str)
tpg.img_wirte_float_as_16bit_int(out_fname, img_non_linear_709)
def plot_bt2020_vs_dci_p3():
bt2020 = tpg.get_primaries(cs.BT2020)[0]
p3_d65 = tpg.get_primaries(cs.P3_D65)[0]
cmf_xy = tpg._get_cmfs_xy()
fig, ax1 = pu.plot_1_graph(
fontsize=20,
figsize=(8, 9),
bg_color=(0.90, 0.90, 0.90),
graph_title="Chromaticity Diagram",
graph_title_size=None,
xlabel="x", ylabel="y",
axis_label_size=None,
legend_size=17,
xlim=[0.65, 0.72],
ylim=[0.28, 0.34],
xtick=None,
ytick=None,
xtick_size=None, ytick_size=None,
linewidth=3,
minor_xtick_num=None,
minor_ytick_num=None)
ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=4)
ax1.plot(
(cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
'-k', lw=4)
ax1.plot(
bt2020[..., 0], bt2020[..., 1], '-', color=pu.RED, label="BT.2020")
ax1.plot(
p3_d65[..., 0], p3_d65[..., 1], '-', color=pu.SKY, label="DCI-P3")
pu.show_and_save(
fig=fig, legend_loc='upper right', save_fname="./img/p3_gamut.png")
def create_gray_patch_core(
width, height, panel_width, panel_height,
st_pos, ed_pos, cv_float):
img = np.zeros((height, width, 3))
color = [cv_float, cv_float, cv_float]
cv2.rectangle(img, st_pos, ed_pos, color, -1)
text = f"for_{panel_width}x{panel_height}, {cv_float*1023:.0f} CV"
text_drawer = fc.TextDrawer(
img, text=text, pos=(10, 10),
font_color=(0.25, 0.25, 0.25), font_size=20)
text_drawer.draw()
fname = f"./img/iPhone13_color_patch_for_{panel_width}x{panel_height}_"
fname += f"{int(cv_float*1023):04d}-CV.png"
print(fname)
tpg.img_wirte_float_as_16bit_int(fname, img)
def create_gray_patch(panel_width=2778, panel_height=1284, area_rate=0.4*100):
width = 1920
height = 1080
step_num = 33 # 33 or 65
each_step = 1024 // (step_num - 1)
cv_list = np.arange(step_num) * each_step
cv_list[-1] = 1023
width_vertual = panel_width/panel_height*height
height_vertual = height
block_size = int(
round((area_rate/100 * width_vertual * height_vertual) ** 0.5))
st_pos = ((width//2) - (block_size//2), (height//2) - (block_size//2))
ed_pos = (st_pos[0]+block_size, st_pos[1]+block_size)
for cv in cv_list:
create_gray_patch_core(
width=width, height=height,
panel_width=panel_width, panel_height=panel_height,
st_pos=st_pos, ed_pos=ed_pos, cv_float=cv/1023)
def get_rgb_st2084_cv_from_luminance(luminance):
val = tf.oetf_from_luminance(luminance, tf.ST2084)
return np.array([val, val, val])
def careate_tone_mapping_check_pattern(bg_luminance_start=600):
g_width = 3840
g_height = 2160
b_h_num = 4
b_v_num = 4
tile_num = 6
font_size = 28
fg_luminance = 10000
height = int(round(g_height / 1.5))
width = height
b_width = width // b_h_num
b_height = height // b_v_num
comp_st_pos = [
(g_width // 2) - int(b_width * (b_h_num/2)),
(g_height // 2) - int(b_height * (b_v_num/2))
]
img = np.zeros((g_height, g_width, 3))
v_img_buf = []
for v_idx in range(b_v_num):
h_img_buf = []
for h_idx in range(b_h_num):
idx = v_idx * b_h_num + h_idx
bg_luminance = bg_luminance_start + 100 * idx
print(f"bg_luminance = {bg_luminance}")
b_img_nonlinear = tpg.make_tile_pattern(
width=b_width, height=b_height,
h_tile_num=tile_num, v_tile_num=tile_num,
low_level=get_rgb_st2084_cv_from_luminance(bg_luminance),
high_level=get_rgb_st2084_cv_from_luminance(fg_luminance),
dtype=np.float32)
text = f"bg_luminance = {bg_luminance} nit"
_, text_height = fc.get_text_width_height(
text, fc.NOTO_SANS_MONO_BOLD, font_size)
text_drawer = fc.TextDrawer(
b_img_nonlinear, text=text,
pos=(int(text_height*0.2), int(text_height*0.2)),
font_color=(0, 0, 0),
font_size=font_size,
bg_transfer_functions=tf.ST2084,
fg_transfer_functions=tf.ST2084,
font_path=fc.NOTO_SANS_MONO_BOLD)
text_drawer.draw()
tpg.draw_outline(b_img_nonlinear, np.array([0, 0, 0]), 1)
h_img_buf.append(b_img_nonlinear)
v_img_buf.append(np.hstack(h_img_buf))
tp_img = np.vstack(v_img_buf)
tpg.merge(img, tp_img, comp_st_pos)
text = f"fg_luminance = {fg_luminance} nit"
_, text_height = fc.get_text_width_height(
text, fc.NOTO_SANS_MONO_BOLD, font_size)
text_drawer = fc.TextDrawer(
img, text=text,
pos=(comp_st_pos[0], comp_st_pos[1] - int(text_height * 1.2)),
font_color=(0.5, 0.5, 0.5),
font_size=font_size,
bg_transfer_functions=tf.ST2084,
fg_transfer_functions=tf.ST2084,
font_path=fc.NOTO_SANS_MONO_BOLD)
text_drawer.draw()
fname = f"./img/tone_map_tp_{bg_luminance_start}.png"
tpg.img_wirte_float_as_16bit_int(fname, img)
def create_tone_mapping_check_pattern_all():
careate_tone_mapping_check_pattern(bg_luminance_start=600)
careate_tone_mapping_check_pattern(bg_luminance_start=600+1600*1)
careate_tone_mapping_check_pattern(bg_luminance_start=600+1600*2)
careate_tone_mapping_check_pattern(bg_luminance_start=100)
careate_tone_mapping_check_pattern(bg_luminance_start=100+1600*1)
careate_tone_mapping_check_pattern(bg_luminance_start=100+1600*2)
def calc_cusp_rgb_value(hue_num, color_space_name, luminance):
lut_name = make_jzazbz_gb_lut_fname_method_c(
color_space_name=color_space_name, luminance=luminance)
lut = TyLchLut(np.load(lut_name))
hue_list = np.linspace(0, 360, hue_num, endpoint=False)
cusp_list = np.zeros((hue_num, 3))
for h_idx, hue in enumerate(hue_list):
cusp_list[h_idx] = lut.get_cusp_without_intp(hue)
jzazbz = jzczhz_to_jzazbz(cusp_list)
rgb = cs.jzazbz_to_rgb(
jzazbz=jzazbz, color_space_name=color_space_name)
return rgb
def create_per_nit_patch_img(
b_width_array, b_height_array, rgb_array):
# base_img = np.ones((b_height, b_width, 3))
luminance_num = rgb_array.shape[0]
hue_num = rgb_array.shape[1]
v_buf = []
for l_idx in range(luminance_num):
b_height = b_height_array[l_idx]
h_buf = []
for h_idx in range(hue_num):
b_width = b_width_array[h_idx]
base_img = np.ones((b_height, b_width, 3))
temp_img = base_img * rgb_array[l_idx][h_idx]
tpg.draw_outline(temp_img, [0, 0, 0], 1)
h_buf.append(temp_img)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
return img
def create_cusp_per_nit_image_without_text(
width=1600, height=1080, color_space_name=cs.P3_D65,
luminance_list=[108, 643, 1143, 2341, 6470]):
aspect_reate = width / height
v_block_num = len(luminance_list)
h_block_num = int(round(aspect_reate * v_block_num))
b_height_array = tpg.equal_devision(height, v_block_num)
b_width_array = tpg.equal_devision(width, h_block_num)
rgb_array = np.zeros((v_block_num, h_block_num, 3))
for idx, luminance in enumerate(luminance_list):
rgb_array[idx] = calc_cusp_rgb_value(
h_block_num, color_space_name, luminance)
patch_img = create_per_nit_patch_img(
b_width_array=b_width_array, b_height_array=b_height_array,
rgb_array=rgb_array)
return patch_img, b_height_array, b_width_array
def font_v_size_determinater(font, text="sample", limit_height=48):
st_font_size = 100
font_size_list = np.arange(st_font_size + 1)[::-1]
for font_size in font_size_list:
_, height = fc.get_text_width_height(
text=text, font_path=font, font_size=font_size)
if height < limit_height:
break
return font_size
def font_h_size_determinater(font, text="sample", limit_width=48):
st_font_size = 100
font_size_list = np.arange(st_font_size + 1)[::-1]
for font_size in font_size_list:
width, _ = fc.get_text_width_height(
text=text, font_path=font, font_size=font_size)
if width < limit_width:
break
return font_size
def font_size_determinater(font, text, limit_width, limit_heigt):
font_v = font_v_size_determinater(
font=font, text=text, limit_height=limit_heigt)
font_h = font_h_size_determinater(
font=font, text=text, limit_width=limit_width)
font_size = min(font_v, font_h)
width, height = fc.get_text_width_height(
text=text, font_path=font, font_size=font_size)
return font_size, width, height
def calc_luminance_list(min_lumi=100, max_lumi=2000):
cv_list = [x * 16 for x in range(65)]
cv_list[-1] = cv_list[-1] - 1
luminance_list = [
int(round(tf.eotf_to_luminance(x/1023, tf.ST2084)))
for x in cv_list]
luminance_list = [
x for x in luminance_list if (x >= min_lumi) and (x <= max_lumi)]
return luminance_list
def add_luminance_text_information(
img, luminance_list, height_list, v_idx, font_size, font, font_height,
font_color):
v_offset = height_list[v_idx] // 2 - font_height // 2
st_pos = [0, int(np.sum(height_list[:v_idx])) + v_offset]
text = f" Peak Luminance {luminance_list[v_idx]:5d} nits "
font_drawer = fc.TextDrawer(
img=img, text=text, pos=st_pos, font_color=font_color,
font_size=font_size, bg_transfer_functions=tf.ST2084,
fg_transfer_functions=tf.ST2084, font_path=font)
font_drawer.draw()
def cusp_per_nit_pattern(min_lumi=100, max_lumi=2000, width=1920, height=1080):
width_rate = 0.8
bg_luminance = 0.5
bg_linear = tf.eotf(
tf.oetf_from_luminance(bg_luminance, tf.ST2084), tf.ST2084)
text_luminance = 20
text_cv = tf.oetf_from_luminance(text_luminance, tf.ST2084)
font_color = (text_cv, text_cv, text_cv)
total_block_width = int(width * width_rate)
rate = height // 1080
font = fc.NOTO_SANS_MONO_REGULAR
info_font_size = 24 * rate
_, info_text_height = fc.get_text_width_height(
text="sample", font_path=font, font_size=info_font_size)
info_text_margin = int(info_text_height * 0.4)
info_height = info_text_height + info_text_margin * 2
total_block_height = height - info_height
print(f"total_block_height={total_block_height}")
color_space_name = cs.P3_D65
font_size = None
luminance_list = calc_luminance_list(min_lumi=min_lumi, max_lumi=max_lumi)
print(luminance_list)
patch_img, height_array, _ = create_cusp_per_nit_image_without_text(
width=total_block_width, height=total_block_height,
color_space_name=color_space_name, luminance_list=luminance_list)
max_text = " Peak Luminance 10000 nits "
font_size, _, font_height = font_size_determinater(
font=font, text=max_text,
limit_width=int((width-total_block_width)*0.9),
limit_heigt=int(np.min(height_array)*0.9))
print(f"font_size={font_size}")
img = np.ones((height, width, 3)) * bg_linear
tpg.merge(img, patch_img, (width - patch_img.shape[1], 0))
img_non_linear = tf.oetf(np.clip(img, 0.0, 1.0), tf.ST2084)
# add luminance information
for v_idx in range(len(height_array)):
add_luminance_text_information(
img=img_non_linear, luminance_list=luminance_list,
height_list=height_array, v_idx=v_idx, font_color=font_color,
font_size=font_size, font=font, font_height=font_height)
# add basic text information
info_img = np.zeros((info_height, width, 3))
text = f" Jzazbz Cusp Pattern, ST2084, {color_space_name}, "
text += f"{width}x{height}, Revision 2"
st_pos = [0, info_text_margin]
font_drawer = fc.TextDrawer(
img=info_img, text=text, pos=st_pos, font_color=font_color,
font_size=font_size, bg_transfer_functions=tf.ST2084,
fg_transfer_functions=tf.ST2084, font_path=font)
font_drawer.draw()
tpg.merge(img_non_linear, info_img, [0, height - info_height])
fname = f"./img/jzazbz_cusp_{width}x{height}_"
fname += f"{color_space_name}_{min_lumi}-{max_lumi}_nits.png"
tpg.img_wirte_float_as_16bit_int(fname, img_non_linear)
def add_hard_clipping_core(src_fname, clip_luminance=1000):
dst_fname = add_suffix_to_filename(
fname=src_fname, suffix=f"_w_clip_{clip_luminance}-nits")
img = tpg.img_read_as_float(src_fname)
clip_cv = tf.oetf_from_luminance(clip_luminance, tf.ST2084)
img[img > clip_cv] = clip_cv
text = f"w/ clipping ({clip_luminance} nits)"
font_color = (0, 0.6, 0.6)
font_size = 60
font = fc.NOTO_SANS_MONO_BLACK
font_drawer = fc.TextDrawer(
img=img, text=text, pos=(0, 0), font_color=font_color,
font_size=font_size, bg_transfer_functions=tf.ST2084,
fg_transfer_functions=tf.ST2084, font_path=font)
font_drawer.draw()
tpg.img_wirte_float_as_16bit_int(dst_fname, img)
def add_without_hard_clipping_core(src_fname):
dst_fname = add_suffix_to_filename(fname=src_fname, suffix="_wo_clip")
img = tpg.img_read_as_float(src_fname)
text = "w/o clipping"
font_color = (0.0, 0.6, 0.6)
font_size = 60
font = fc.NOTO_SANS_MONO_BLACK
font_drawer = fc.TextDrawer(
img=img, text=text, pos=(0, 0), font_color=font_color,
font_size=font_size, bg_transfer_functions=tf.ST2084,
fg_transfer_functions=tf.ST2084, font_path=font)
font_drawer.draw()
tpg.img_wirte_float_as_16bit_int(dst_fname, img)
def add_hard_clipping_to_cusp_pattern():
width = 3840
height = 2160
color_space_name = cs.P3_D65
min_lumi = 100
max_lumi = 10000
src_fname = f"./img/jzazbz_cusp_{width}x{height}_"
src_fname += f"{color_space_name}_{min_lumi}-{max_lumi}_nits.png"
add_hard_clipping_core(src_fname=src_fname, clip_luminance=1000)
add_hard_clipping_core(src_fname=src_fname, clip_luminance=1600)
add_hard_clipping_core(src_fname=src_fname, clip_luminance=2100)
add_without_hard_clipping_core(src_fname=src_fname)
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# create_dot_pattern()
# create_abl_check_pattern(width_panel=2778, height_panel=1284)
# create_abl_check_pattern(width_panel=2532, height_panel=1170)
# create_patch_specific_area(
# panel_width=2778, panel_height=1284, area_rate=0.4*100,
# color_linear=[1, 0, 0],
# luminance=1000, src_cs=cs.BT709, dst_cs=cs.BT2020, tf_str=tf.ST2084)
# create_iphone_13_primary_patch(area_rate=0.4*100)
# conv_img_from_bt2020_to_bt709_using_3x3_matrix()
# plot_bt2020_vs_dci_p3()
# create_gray_patch(panel_width=2778, panel_height=1284, area_rate=0.4*100)
# cusp_per_nit_pattern(width=3840, height=2160, min_lumi=100, max_lumi=1000)
# cusp_per_nit_pattern(width=3840, height=2160, min_lumi=100, max_lumi=2000)
# cusp_per_nit_pattern(width=3840, height=2160, min_lumi=100, max_lumi=10000)
add_hard_clipping_to_cusp_pattern()
| [
"cv2.rectangle",
"numpy.clip",
"jzazbz.jzczhz_to_jzazbz",
"numpy.hstack",
"colour.XYZ_to_xyY",
"font_control.TextDrawer",
"font_control.get_text_width_height",
"transfer_functions.eotf_to_luminance",
"numpy.array",
"color_space.jzazbz_to_rgb",
"numpy.sin",
"plot_utility.plot_1_graph",
"numpy... | [((1615, 1645), 'numpy.hstack', 'np.hstack', (['[idx_even, idx_odd]'], {}), '([idx_even, idx_odd])\n', (1624, 1645), True, 'import numpy as np\n'), ((1831, 1875), 'test_pattern_generator2.img_wirte_float_as_16bit_int', 'tpg.img_wirte_float_as_16bit_int', (['fname', 'img'], {}), '(fname, img)\n', (1863, 1875), True, 'import test_pattern_generator2 as tpg\n'), ((1893, 1953), 'ty_utility.add_suffix_to_filename', 'util.add_suffix_to_filename', ([], {'fname': 'fname', 'suffix': '"""_with_icc"""'}), "(fname=fname, suffix='_with_icc')\n", (1920, 1953), True, 'import ty_utility as util\n'), ((2081, 2100), 'subprocess.run', 'subprocess.run', (['cmd'], {}), '(cmd)\n', (2095, 2100), False, 'import subprocess\n'), ((2308, 2359), 'itertools.product', 'product', (['resolution_list', 'dot_size_list', 'color_list'], {}), '(resolution_list, dot_size_list, color_list)\n', (2315, 2359), False, 'from itertools import product\n'), ((4040, 4068), 'numpy.zeros', 'np.zeros', (['(height, width, 3)'], {}), '((height, width, 3))\n', (4048, 4068), True, 'import numpy as np\n'), ((4489, 4549), 'cv2.rectangle', 'cv2.rectangle', (['img', 'st_pos', 'ed_pos', 'color_with_lumiannce', '(-1)'], {}), '(img, st_pos, ed_pos, color_with_lumiannce, -1)\n', (4502, 4549), False, 'import cv2\n'), ((4567, 4664), 'colour.RGB_to_XYZ', 'RGB_to_XYZ', (['color_with_lumiannce', 'cs.D65', 'cs.D65', 'RGB_COLOURSPACES[src_cs].matrix_RGB_to_XYZ'], {}), '(color_with_lumiannce, cs.D65, cs.D65, RGB_COLOURSPACES[src_cs].\n matrix_RGB_to_XYZ)\n', (4577, 4664), False, 'from colour import RGB_to_RGB, RGB_COLOURSPACES, RGB_to_XYZ, XYZ_to_xyY\n'), ((4687, 4708), 'colour.XYZ_to_xyY', 'XYZ_to_xyY', (['large_xyz'], {}), '(large_xyz)\n', (4697, 4708), False, 'from colour import RGB_to_RGB, RGB_COLOURSPACES, RGB_to_XYZ, XYZ_to_xyY\n'), ((4924, 5016), 'font_control.TextDrawer', 'fc.TextDrawer', (['img'], {'text': 'text', 'pos': '(10, 10)', 'font_color': '(0.25, 0.25, 0.25)', 'font_size': '(20)'}), '(img, text=text, pos=(10, 10), font_color=(0.25, 0.25, 0.25),\n font_size=20)\n', (4937, 5016), True, 'import font_control as fc\n'), ((5351, 5395), 'test_pattern_generator2.img_wirte_float_as_16bit_int', 'tpg.img_wirte_float_as_16bit_int', (['fname', 'img'], {}), '(fname, img)\n', (5383, 5395), True, 'import test_pattern_generator2 as tpg\n'), ((6001, 6066), 'itertools.product', 'product', (['resolution_list', 'luminance_list', 'color_list', 'src_cs_list'], {}), '(resolution_list, luminance_list, color_list, src_cs_list)\n', (6008, 6066), False, 'from itertools import product\n'), ((6610, 6682), 'ty_utility.add_suffix_to_filename', 'util.add_suffix_to_filename', ([], {'fname': 'in_fname', 'suffix': '"""_bt709_with_matrix"""'}), "(fname=in_fname, suffix='_bt709_with_matrix')\n", (6637, 6682), True, 'import ty_utility as util\n'), ((6737, 6768), 'test_pattern_generator2.img_read_as_float', 'tpg.img_read_as_float', (['in_fname'], {}), '(in_fname)\n', (6758, 6768), True, 'import test_pattern_generator2 as tpg\n'), ((6791, 6822), 'transfer_functions.eotf', 'tf.eotf', (['img_non_linear', 'tf_str'], {}), '(img_non_linear, tf_str)\n', (6798, 6822), True, 'import transfer_functions as tf\n'), ((6844, 6975), 'colour.RGB_to_RGB', 'RGB_to_RGB', ([], {'RGB': 'img_linear_2020', 'input_colourspace': 'RGB_COLOURSPACES[cs.BT2020]', 'output_colourspace': 'RGB_COLOURSPACES[cs.P3_D65]'}), '(RGB=img_linear_2020, input_colourspace=RGB_COLOURSPACES[cs.\n BT2020], output_colourspace=RGB_COLOURSPACES[cs.P3_D65])\n', (6854, 6975), False, 'from colour import RGB_to_RGB, RGB_COLOURSPACES, RGB_to_XYZ, XYZ_to_xyY\n'), ((7077, 7140), 'test_pattern_generator2.img_wirte_float_as_16bit_int', 'tpg.img_wirte_float_as_16bit_int', (['out_fname', 'img_non_linear_709'], {}), '(out_fname, img_non_linear_709)\n', (7109, 7140), True, 'import test_pattern_generator2 as tpg\n'), ((7275, 7293), 'test_pattern_generator2._get_cmfs_xy', 'tpg._get_cmfs_xy', ([], {}), '()\n', (7291, 7293), True, 'import test_pattern_generator2 as tpg\n'), ((7310, 7671), 'plot_utility.plot_1_graph', 'pu.plot_1_graph', ([], {'fontsize': '(20)', 'figsize': '(8, 9)', 'bg_color': '(0.9, 0.9, 0.9)', 'graph_title': '"""Chromaticity Diagram"""', 'graph_title_size': 'None', 'xlabel': '"""x"""', 'ylabel': '"""y"""', 'axis_label_size': 'None', 'legend_size': '(17)', 'xlim': '[0.65, 0.72]', 'ylim': '[0.28, 0.34]', 'xtick': 'None', 'ytick': 'None', 'xtick_size': 'None', 'ytick_size': 'None', 'linewidth': '(3)', 'minor_xtick_num': 'None', 'minor_ytick_num': 'None'}), "(fontsize=20, figsize=(8, 9), bg_color=(0.9, 0.9, 0.9),\n graph_title='Chromaticity Diagram', graph_title_size=None, xlabel='x',\n ylabel='y', axis_label_size=None, legend_size=17, xlim=[0.65, 0.72],\n ylim=[0.28, 0.34], xtick=None, ytick=None, xtick_size=None, ytick_size=\n None, linewidth=3, minor_xtick_num=None, minor_ytick_num=None)\n", (7325, 7671), True, 'import plot_utility as pu\n'), ((8131, 8220), 'plot_utility.show_and_save', 'pu.show_and_save', ([], {'fig': 'fig', 'legend_loc': '"""upper right"""', 'save_fname': '"""./img/p3_gamut.png"""'}), "(fig=fig, legend_loc='upper right', save_fname=\n './img/p3_gamut.png')\n", (8147, 8220), True, 'import plot_utility as pu\n'), ((8350, 8378), 'numpy.zeros', 'np.zeros', (['(height, width, 3)'], {}), '((height, width, 3))\n', (8358, 8378), True, 'import numpy as np\n'), ((8426, 8471), 'cv2.rectangle', 'cv2.rectangle', (['img', 'st_pos', 'ed_pos', 'color', '(-1)'], {}), '(img, st_pos, ed_pos, color, -1)\n', (8439, 8471), False, 'import cv2\n'), ((8562, 8654), 'font_control.TextDrawer', 'fc.TextDrawer', (['img'], {'text': 'text', 'pos': '(10, 10)', 'font_color': '(0.25, 0.25, 0.25)', 'font_size': '(20)'}), '(img, text=text, pos=(10, 10), font_color=(0.25, 0.25, 0.25),\n font_size=20)\n', (8575, 8654), True, 'import font_control as fc\n'), ((8837, 8881), 'test_pattern_generator2.img_wirte_float_as_16bit_int', 'tpg.img_wirte_float_as_16bit_int', (['fname', 'img'], {}), '(fname, img)\n', (8869, 8881), True, 'import test_pattern_generator2 as tpg\n'), ((9725, 9769), 'transfer_functions.oetf_from_luminance', 'tf.oetf_from_luminance', (['luminance', 'tf.ST2084'], {}), '(luminance, tf.ST2084)\n', (9747, 9769), True, 'import transfer_functions as tf\n'), ((9781, 9806), 'numpy.array', 'np.array', (['[val, val, val]'], {}), '([val, val, val])\n', (9789, 9806), True, 'import numpy as np\n'), ((10274, 10306), 'numpy.zeros', 'np.zeros', (['(g_height, g_width, 3)'], {}), '((g_height, g_width, 3))\n', (10282, 10306), True, 'import numpy as np\n'), ((11664, 11684), 'numpy.vstack', 'np.vstack', (['v_img_buf'], {}), '(v_img_buf)\n', (11673, 11684), True, 'import numpy as np\n'), ((11690, 11725), 'test_pattern_generator2.merge', 'tpg.merge', (['img', 'tp_img', 'comp_st_pos'], {}), '(img, tp_img, comp_st_pos)\n', (11699, 11725), True, 'import test_pattern_generator2 as tpg\n'), ((11796, 11861), 'font_control.get_text_width_height', 'fc.get_text_width_height', (['text', 'fc.NOTO_SANS_MONO_BOLD', 'font_size'], {}), '(text, fc.NOTO_SANS_MONO_BOLD, font_size)\n', (11820, 11861), True, 'import font_control as fc\n'), ((12274, 12318), 'test_pattern_generator2.img_wirte_float_as_16bit_int', 'tpg.img_wirte_float_as_16bit_int', (['fname', 'img'], {}), '(fname, img)\n', (12306, 12318), True, 'import test_pattern_generator2 as tpg\n'), ((12853, 12946), 'create_gamut_booundary_lut.make_jzazbz_gb_lut_fname_method_c', 'make_jzazbz_gb_lut_fname_method_c', ([], {'color_space_name': 'color_space_name', 'luminance': 'luminance'}), '(color_space_name=color_space_name,\n luminance=luminance)\n', (12886, 12946), False, 'from create_gamut_booundary_lut import make_jzazbz_gb_lut_fname_method_c, TyLchLut\n'), ((13005, 13049), 'numpy.linspace', 'np.linspace', (['(0)', '(360)', 'hue_num'], {'endpoint': '(False)'}), '(0, 360, hue_num, endpoint=False)\n', (13016, 13049), True, 'import numpy as np\n'), ((13066, 13088), 'numpy.zeros', 'np.zeros', (['(hue_num, 3)'], {}), '((hue_num, 3))\n', (13074, 13088), True, 'import numpy as np\n'), ((13204, 13231), 'jzazbz.jzczhz_to_jzazbz', 'jzczhz_to_jzazbz', (['cusp_list'], {}), '(cusp_list)\n', (13220, 13231), False, 'from jzazbz import jzczhz_to_jzazbz\n'), ((13242, 13308), 'color_space.jzazbz_to_rgb', 'cs.jzazbz_to_rgb', ([], {'jzazbz': 'jzazbz', 'color_space_name': 'color_space_name'}), '(jzazbz=jzazbz, color_space_name=color_space_name)\n', (13258, 13308), True, 'import color_space as cs\n'), ((13982, 13998), 'numpy.vstack', 'np.vstack', (['v_buf'], {}), '(v_buf)\n', (13991, 13998), True, 'import numpy as np\n'), ((14327, 14366), 'test_pattern_generator2.equal_devision', 'tpg.equal_devision', (['height', 'v_block_num'], {}), '(height, v_block_num)\n', (14345, 14366), True, 'import test_pattern_generator2 as tpg\n'), ((14387, 14425), 'test_pattern_generator2.equal_devision', 'tpg.equal_devision', (['width', 'h_block_num'], {}), '(width, h_block_num)\n', (14405, 14425), True, 'import test_pattern_generator2 as tpg\n'), ((14443, 14482), 'numpy.zeros', 'np.zeros', (['(v_block_num, h_block_num, 3)'], {}), '((v_block_num, h_block_num, 3))\n', (14451, 14482), True, 'import numpy as np\n'), ((15871, 15943), 'font_control.get_text_width_height', 'fc.get_text_width_height', ([], {'text': 'text', 'font_path': 'font', 'font_size': 'font_size'}), '(text=text, font_path=font, font_size=font_size)\n', (15895, 15943), True, 'import font_control as fc\n'), ((16699, 16878), 'font_control.TextDrawer', 'fc.TextDrawer', ([], {'img': 'img', 'text': 'text', 'pos': 'st_pos', 'font_color': 'font_color', 'font_size': 'font_size', 'bg_transfer_functions': 'tf.ST2084', 'fg_transfer_functions': 'tf.ST2084', 'font_path': 'font'}), '(img=img, text=text, pos=st_pos, font_color=font_color,\n font_size=font_size, bg_transfer_functions=tf.ST2084,\n fg_transfer_functions=tf.ST2084, font_path=font)\n', (16712, 16878), True, 'import font_control as fc\n'), ((17176, 17225), 'transfer_functions.oetf_from_luminance', 'tf.oetf_from_luminance', (['text_luminance', 'tf.ST2084'], {}), '(text_luminance, tf.ST2084)\n', (17198, 17225), True, 'import transfer_functions as tf\n'), ((17439, 17525), 'font_control.get_text_width_height', 'fc.get_text_width_height', ([], {'text': '"""sample"""', 'font_path': 'font', 'font_size': 'info_font_size'}), "(text='sample', font_path=font, font_size=\n info_font_size)\n", (17463, 17525), True, 'import font_control as fc\n'), ((18439, 18497), 'test_pattern_generator2.merge', 'tpg.merge', (['img', 'patch_img', '(width - patch_img.shape[1], 0)'], {}), '(img, patch_img, (width - patch_img.shape[1], 0))\n', (18448, 18497), True, 'import test_pattern_generator2 as tpg\n'), ((18933, 18966), 'numpy.zeros', 'np.zeros', (['(info_height, width, 3)'], {}), '((info_height, width, 3))\n', (18941, 18966), True, 'import numpy as np\n'), ((19134, 19318), 'font_control.TextDrawer', 'fc.TextDrawer', ([], {'img': 'info_img', 'text': 'text', 'pos': 'st_pos', 'font_color': 'font_color', 'font_size': 'font_size', 'bg_transfer_functions': 'tf.ST2084', 'fg_transfer_functions': 'tf.ST2084', 'font_path': 'font'}), '(img=info_img, text=text, pos=st_pos, font_color=font_color,\n font_size=font_size, bg_transfer_functions=tf.ST2084,\n fg_transfer_functions=tf.ST2084, font_path=font)\n', (19147, 19318), True, 'import font_control as fc\n'), ((19364, 19426), 'test_pattern_generator2.merge', 'tpg.merge', (['img_non_linear', 'info_img', '[0, height - info_height]'], {}), '(img_non_linear, info_img, [0, height - info_height])\n', (19373, 19426), True, 'import test_pattern_generator2 as tpg\n'), ((19549, 19604), 'test_pattern_generator2.img_wirte_float_as_16bit_int', 'tpg.img_wirte_float_as_16bit_int', (['fname', 'img_non_linear'], {}), '(fname, img_non_linear)\n', (19581, 19604), True, 'import test_pattern_generator2 as tpg\n'), ((19683, 19768), 'ty_utility.add_suffix_to_filename', 'add_suffix_to_filename', ([], {'fname': 'src_fname', 'suffix': 'f"""_w_clip_{clip_luminance}-nits"""'}), "(fname=src_fname, suffix=f'_w_clip_{clip_luminance}-nits'\n )\n", (19705, 19768), False, 'from ty_utility import add_suffix_to_filename\n'), ((19783, 19815), 'test_pattern_generator2.img_read_as_float', 'tpg.img_read_as_float', (['src_fname'], {}), '(src_fname)\n', (19804, 19815), True, 'import test_pattern_generator2 as tpg\n'), ((19830, 19879), 'transfer_functions.oetf_from_luminance', 'tf.oetf_from_luminance', (['clip_luminance', 'tf.ST2084'], {}), '(clip_luminance, tf.ST2084)\n', (19852, 19879), True, 'import transfer_functions as tf\n'), ((20067, 20246), 'font_control.TextDrawer', 'fc.TextDrawer', ([], {'img': 'img', 'text': 'text', 'pos': '(0, 0)', 'font_color': 'font_color', 'font_size': 'font_size', 'bg_transfer_functions': 'tf.ST2084', 'fg_transfer_functions': 'tf.ST2084', 'font_path': 'font'}), '(img=img, text=text, pos=(0, 0), font_color=font_color,\n font_size=font_size, bg_transfer_functions=tf.ST2084,\n fg_transfer_functions=tf.ST2084, font_path=font)\n', (20080, 20246), True, 'import font_control as fc\n'), ((20292, 20340), 'test_pattern_generator2.img_wirte_float_as_16bit_int', 'tpg.img_wirte_float_as_16bit_int', (['dst_fname', 'img'], {}), '(dst_fname, img)\n', (20324, 20340), True, 'import test_pattern_generator2 as tpg\n'), ((20406, 20464), 'ty_utility.add_suffix_to_filename', 'add_suffix_to_filename', ([], {'fname': 'src_fname', 'suffix': '"""_wo_clip"""'}), "(fname=src_fname, suffix='_wo_clip')\n", (20428, 20464), False, 'from ty_utility import add_suffix_to_filename\n'), ((20475, 20507), 'test_pattern_generator2.img_read_as_float', 'tpg.img_read_as_float', (['src_fname'], {}), '(src_fname)\n', (20496, 20507), True, 'import test_pattern_generator2 as tpg\n'), ((20640, 20819), 'font_control.TextDrawer', 'fc.TextDrawer', ([], {'img': 'img', 'text': 'text', 'pos': '(0, 0)', 'font_color': 'font_color', 'font_size': 'font_size', 'bg_transfer_functions': 'tf.ST2084', 'fg_transfer_functions': 'tf.ST2084', 'font_path': 'font'}), '(img=img, text=text, pos=(0, 0), font_color=font_color,\n font_size=font_size, bg_transfer_functions=tf.ST2084,\n fg_transfer_functions=tf.ST2084, font_path=font)\n', (20653, 20819), True, 'import font_control as fc\n'), ((20865, 20913), 'test_pattern_generator2.img_wirte_float_as_16bit_int', 'tpg.img_wirte_float_as_16bit_int', (['dst_fname', 'img'], {}), '(dst_fname, img)\n', (20897, 20913), True, 'import test_pattern_generator2 as tpg\n'), ((1227, 1254), 'numpy.ones', 'np.ones', (['(height, width, 3)'], {}), '((height, width, 3))\n', (1234, 1254), True, 'import numpy as np\n'), ((1257, 1272), 'numpy.array', 'np.array', (['color'], {}), '(color)\n', (1265, 1272), True, 'import numpy as np\n'), ((2895, 2935), 'numpy.zeros', 'np.zeros', (['(height_total, width_total, 3)'], {}), '((height_total, width_total, 3))\n', (2903, 2935), True, 'import numpy as np\n'), ((3225, 3280), 'cv2.rectangle', 'cv2.rectangle', (['img', 'st_pos', 'ed_pos', '(1.0, 1.0, 1.0)', '(-1)'], {}), '(img, st_pos, ed_pos, (1.0, 1.0, 1.0), -1)\n', (3238, 3280), False, 'import cv2\n'), ((3760, 3804), 'test_pattern_generator2.img_wirte_float_as_16bit_int', 'tpg.img_wirte_float_as_16bit_int', (['fname', 'img'], {}), '(fname, img)\n', (3792, 3804), True, 'import test_pattern_generator2 as tpg\n'), ((4728, 4750), 'numpy.clip', 'np.clip', (['img', '(0.0)', '(1.0)'], {}), '(img, 0.0, 1.0)\n', (4735, 4750), True, 'import numpy as np\n'), ((7029, 7062), 'numpy.clip', 'np.clip', (['img_linear_709', '(0.0)', '(1.0)'], {}), '(img_linear_709, 0.0, 1.0)\n', (7036, 7062), True, 'import numpy as np\n'), ((7185, 7213), 'test_pattern_generator2.get_primaries', 'tpg.get_primaries', (['cs.BT2020'], {}), '(cs.BT2020)\n', (7202, 7213), True, 'import test_pattern_generator2 as tpg\n'), ((7230, 7258), 'test_pattern_generator2.get_primaries', 'tpg.get_primaries', (['cs.P3_D65'], {}), '(cs.P3_D65)\n', (7247, 7258), True, 'import test_pattern_generator2 as tpg\n'), ((9081, 9100), 'numpy.arange', 'np.arange', (['step_num'], {}), '(step_num)\n', (9090, 9100), True, 'import numpy as np\n'), ((12971, 12988), 'numpy.load', 'np.load', (['lut_name'], {}), '(lut_name)\n', (12978, 12988), True, 'import numpy as np\n'), ((14943, 14970), 'numpy.arange', 'np.arange', (['(st_font_size + 1)'], {}), '(st_font_size + 1)\n', (14952, 14970), True, 'import numpy as np\n'), ((15035, 15107), 'font_control.get_text_width_height', 'fc.get_text_width_height', ([], {'text': 'text', 'font_path': 'font', 'font_size': 'font_size'}), '(text=text, font_path=font, font_size=font_size)\n', (15059, 15107), True, 'import font_control as fc\n'), ((15308, 15335), 'numpy.arange', 'np.arange', (['(st_font_size + 1)'], {}), '(st_font_size + 1)\n', (15317, 15335), True, 'import numpy as np\n'), ((15399, 15471), 'font_control.get_text_width_height', 'fc.get_text_width_height', ([], {'text': 'text', 'font_path': 'font', 'font_size': 'font_size'}), '(text=text, font_path=font, font_size=font_size)\n', (15423, 15471), True, 'import font_control as fc\n'), ((17078, 17125), 'transfer_functions.oetf_from_luminance', 'tf.oetf_from_luminance', (['bg_luminance', 'tf.ST2084'], {}), '(bg_luminance, tf.ST2084)\n', (17100, 17125), True, 'import transfer_functions as tf\n'), ((18394, 18421), 'numpy.ones', 'np.ones', (['(height, width, 3)'], {}), '((height, width, 3))\n', (18401, 18421), True, 'import numpy as np\n'), ((18527, 18549), 'numpy.clip', 'np.clip', (['img', '(0.0)', '(1.0)'], {}), '(img, 0.0, 1.0)\n', (18534, 18549), True, 'import numpy as np\n'), ((1665, 1712), 'numpy.tile', 'np.tile', (['idx_even_odd', '(height // (2 * dot_size))'], {}), '(idx_even_odd, height // (2 * dot_size))\n', (1672, 1712), True, 'import numpy as np\n'), ((10999, 11064), 'font_control.get_text_width_height', 'fc.get_text_width_height', (['text', 'fc.NOTO_SANS_MONO_BOLD', 'font_size'], {}), '(text, fc.NOTO_SANS_MONO_BOLD, font_size)\n', (11023, 11064), True, 'import font_control as fc\n'), ((11629, 11649), 'numpy.hstack', 'np.hstack', (['h_img_buf'], {}), '(h_img_buf)\n', (11638, 11649), True, 'import numpy as np\n'), ((13755, 13786), 'numpy.ones', 'np.ones', (['(b_height, b_width, 3)'], {}), '((b_height, b_width, 3))\n', (13762, 13786), True, 'import numpy as np\n'), ((13857, 13897), 'test_pattern_generator2.draw_outline', 'tpg.draw_outline', (['temp_img', '[0, 0, 0]', '(1)'], {}), '(temp_img, [0, 0, 0], 1)\n', (13873, 13897), True, 'import test_pattern_generator2 as tpg\n'), ((13954, 13970), 'numpy.hstack', 'np.hstack', (['h_buf'], {}), '(h_buf)\n', (13963, 13970), True, 'import numpy as np\n'), ((21513, 21538), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (21528, 21538), False, 'import os\n'), ((954, 965), 'math.ceil', 'ceil', (['(x / n)'], {}), '(x / n)\n', (958, 965), False, 'from math import ceil\n'), ((1419, 1435), 'numpy.arange', 'np.arange', (['width'], {}), '(width)\n', (1428, 1435), True, 'import numpy as np\n'), ((2832, 2877), 'numpy.sin', 'np.sin', (['(np.pi / (frame - 1) * idx - np.pi / 2)'], {}), '(np.pi / (frame - 1) * idx - np.pi / 2)\n', (2838, 2877), True, 'import numpy as np\n'), ((5577, 5592), 'numpy.array', 'np.array', (['color'], {}), '(color)\n', (5585, 5592), True, 'import numpy as np\n'), ((11534, 11553), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (11542, 11553), True, 'import numpy as np\n'), ((16167, 16208), 'transfer_functions.eotf_to_luminance', 'tf.eotf_to_luminance', (['(x / 1023)', 'tf.ST2084'], {}), '(x / 1023, tf.ST2084)\n', (16187, 16208), True, 'import transfer_functions as tf\n'), ((16576, 16603), 'numpy.sum', 'np.sum', (['height_list[:v_idx]'], {}), '(height_list[:v_idx])\n', (16582, 16603), True, 'import numpy as np\n'), ((18321, 18341), 'numpy.min', 'np.min', (['height_array'], {}), '(height_array)\n', (18327, 18341), True, 'import numpy as np\n')] |
from typing import Union, Any, List, Tuple, Optional, Dict
import xarray as xr
from tqdm import tqdm
from pathlib import Path
import numpy as np
from rrmpg.models import GR4J
import pandas as pd
def get_data_dir() -> Path:
if Path(".").absolute().home().as_posix() == "/home/leest":
data_dir = Path("/DataDrive200/data")
elif Path(".").absolute().home().as_posix() == "/home/tommy":
data_dir = Path("/datadrive/data")
elif Path(".").absolute().home().as_posix() == "/soge-home/users/chri4118":
data_dir = Path("/lustre/soge1/projects/crop_yield/")
elif Path(".").absolute().home().as_posix() == "/Users/tommylees":
data_dir = Path("/Users/tommylees/Downloads")
else:
assert False, "What machine are you on?"
assert (
data_dir.exists()
), f"Expect data_dir: {data_dir} to exist. Current Working Directory: {Path('.').absolute()}"
return data_dir
def _fill_gaps_da(da: xr.DataArray, fill: Optional[str] = None, per_station: bool = True) -> xr.DataArray:
assert isinstance(da, xr.DataArray), "Expect da to be DataArray (not dataset)"
variable = da.name
if fill is None:
return da
else:
# fill gaps
if fill == "median":
# fill median
if per_station:
median = da.median(dim="time")
else:
median = da.median()
da = da.fillna(median)
elif fill == "interpolate":
# fill interpolation
da_df = da.to_dataframe().interpolate()
coords = [c for c in da_df.columns if c != variable]
da = da_df.to_xarray().assign_coords(
dict(zip(coords, da_df[coords].iloc[0].values))
)[variable]
return da
def fill_gaps(
ds: Union[xr.DataArray, xr.Dataset], fill: Optional[str] = None, per_station: bool = True,
) -> Union[xr.DataArray, xr.Dataset]:
if fill is None:
return ds
if isinstance(ds, xr.Dataset):
pbar = tqdm(ds.data_vars, desc=f"Filling gaps with method {fill}")
for v in pbar:
pbar.set_postfix_str(v)
ds[v] = _fill_gaps_da(ds[v], fill=fill, per_station=per_station)
else:
ds = _fill_gaps_da(ds, fill=fill, per_station=per_station)
return ds
def initialise_stores(ds: xr.Dataset) -> Tuple[np.ndarray]:
v = [v for v in ds.data_vars][0]
qsim_data = np.empty(ds[v].shape)
s_store_data = np.empty(ds[v].shape)
r_store_data = np.empty(ds[v].shape)
return (qsim_data, s_store_data, r_store_data)
def simulate_gr4j_for_one_station(ds: xr.Dataset, param_df: pd.DataFrame, station_id: int) -> xr.Dataset:
params: Dict[str, float] = param_df.loc[station_id].to_dict()
data = ds.sel(station_id=station_id)
model = GR4J(params=params)
qsim, s_store, r_store = model.simulate(
prec=data["precipitation"].values,
etp=data["pet"].values,
s_init=0,
r_init=0,
return_storage=True
)
qsim = qsim.flatten().reshape(-1, 1)
s_store = s_store.flatten().reshape(-1, 1)
r_store = r_store.flatten().reshape(-1, 1)
sim_ds = xr.Dataset(
{
"gr4j": (["time", "station_id"], qsim),
"s_store": (["time", "station_id"], s_store),
"r_store": (["time", "station_id"], r_store)
},
coords={"time": ds["time"], "station_id": [station_id]}
)
return sim_ds
| [
"pathlib.Path",
"rrmpg.models.GR4J",
"tqdm.tqdm",
"xarray.Dataset",
"numpy.empty"
] | [((2413, 2434), 'numpy.empty', 'np.empty', (['ds[v].shape'], {}), '(ds[v].shape)\n', (2421, 2434), True, 'import numpy as np\n'), ((2454, 2475), 'numpy.empty', 'np.empty', (['ds[v].shape'], {}), '(ds[v].shape)\n', (2462, 2475), True, 'import numpy as np\n'), ((2495, 2516), 'numpy.empty', 'np.empty', (['ds[v].shape'], {}), '(ds[v].shape)\n', (2503, 2516), True, 'import numpy as np\n'), ((2801, 2820), 'rrmpg.models.GR4J', 'GR4J', ([], {'params': 'params'}), '(params=params)\n', (2805, 2820), False, 'from rrmpg.models import GR4J\n'), ((3164, 3373), 'xarray.Dataset', 'xr.Dataset', (["{'gr4j': (['time', 'station_id'], qsim), 's_store': (['time', 'station_id'],\n s_store), 'r_store': (['time', 'station_id'], r_store)}"], {'coords': "{'time': ds['time'], 'station_id': [station_id]}"}), "({'gr4j': (['time', 'station_id'], qsim), 's_store': (['time',\n 'station_id'], s_store), 'r_store': (['time', 'station_id'], r_store)},\n coords={'time': ds['time'], 'station_id': [station_id]})\n", (3174, 3373), True, 'import xarray as xr\n'), ((310, 336), 'pathlib.Path', 'Path', (['"""/DataDrive200/data"""'], {}), "('/DataDrive200/data')\n", (314, 336), False, 'from pathlib import Path\n'), ((2012, 2071), 'tqdm.tqdm', 'tqdm', (['ds.data_vars'], {'desc': 'f"""Filling gaps with method {fill}"""'}), "(ds.data_vars, desc=f'Filling gaps with method {fill}')\n", (2016, 2071), False, 'from tqdm import tqdm\n'), ((422, 445), 'pathlib.Path', 'Path', (['"""/datadrive/data"""'], {}), "('/datadrive/data')\n", (426, 445), False, 'from pathlib import Path\n'), ((545, 587), 'pathlib.Path', 'Path', (['"""/lustre/soge1/projects/crop_yield/"""'], {}), "('/lustre/soge1/projects/crop_yield/')\n", (549, 587), False, 'from pathlib import Path\n'), ((678, 712), 'pathlib.Path', 'Path', (['"""/Users/tommylees/Downloads"""'], {}), "('/Users/tommylees/Downloads')\n", (682, 712), False, 'from pathlib import Path\n'), ((887, 896), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (891, 896), False, 'from pathlib import Path\n'), ((234, 243), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (238, 243), False, 'from pathlib import Path\n'), ((346, 355), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (350, 355), False, 'from pathlib import Path\n'), ((455, 464), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (459, 464), False, 'from pathlib import Path\n'), ((597, 606), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (601, 606), False, 'from pathlib import Path\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 19 13:46:40 2019
@author: KemenczkyP
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
import cv2 as cv
print(tf.__version__)
from kerasmodel import MyModel
from heatmap import HeatMap
#register Guided ReLU function
@tf.RegisterGradient("GuidedRelu")
def _GuidedReluGrad(op, grad):
gate_f = tf.cast(op.outputs[0] > 0, "float32") #for f^l > 0
gate_R = tf.cast(grad > 0, "float32") #for R^l+1 > 0
return gate_f * gate_R * grad
#%%
# Load dataset
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top',
'Trouser',
'Pullover',
'Dress',
'Coat',
'Sandal',
'Shirt',
'Sneaker',
'Bag',
'Ankle boot']
# scale data to [0,1]
train_images = (train_images / 255.0).astype(np.float32)
test_images = (test_images / 255.0).astype(np.float32)
#%%
#TRAIN
with tf.compat.v1.get_default_graph().gradient_override_map({'Relu': 'GuidedRelu'}): #use Guided ReLU
model = MyModel()
loss_fn = keras.losses.SparseCategoricalCrossentropy()
model.compile(optimizer='adam',
loss=loss_fn,
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=1,
batch_size = 100)
test_loss, test_acc = model.evaluate(test_images, test_labels)
#%%
#COMPUTE GRADIENTS AND VISUALIZE
#source:
# Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization
# doi: 10.1109/iccv.2017.74
random_sample = np.random.randint(0, test_images.shape[0], 1)[0]
image = test_images[random_sample:random_sample+1,:,:] #
image = tf.convert_to_tensor(image)
label = test_labels[random_sample:random_sample+1]
im = tf.convert_to_tensor(np.reshape(image,(-1,image.shape[1], image.shape[2],1)), tf.float32)
with tf.GradientTape() as logits_tape, tf.GradientTape() as loss_tape:
loss_tape.watch(image)
logits = model(image)
loss_value = loss_fn(label, logits)
lastc = model.last_conv_value
guide = loss_tape.gradient(loss_value, image)
grads = logits_tape.gradient(logits, lastc)
GAP_pool = tf.keras.layers.AveragePooling2D((lastc.shape[1], lastc.shape[2]),
padding= 'valid',
strides = (1,1))(grads)
grad_c=tf.zeros(lastc.shape[1:3], tf.float32)
for idx in range(0,GAP_pool.shape[3]):
grad_c=tf.nn.relu(grad_c+lastc[0,:,:,idx]*GAP_pool[0,:,:,idx])
grad_cam, heatmap = HeatMap(grad_c,guide, dims = 2)
#show with heatmap
image = image.numpy() * 255 #rescale to original
image = np.squeeze(np.uint8(image))
RGB_img = cv.cvtColor(image, cv.COLOR_GRAY2BGR) #convert to "RGB" (size)
heatmap_img = cv.applyColorMap(np.uint8(heatmap), cv.COLORMAP_JET)
fin = cv.addWeighted(heatmap_img, 0.7, RGB_img, 0.3, 0)
plt.imshow(fin)
#cv.imshow('image_w_heatmap', fin)
| [
"matplotlib.pyplot.imshow",
"numpy.uint8",
"numpy.reshape",
"tensorflow.nn.relu",
"kerasmodel.MyModel",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.AveragePooling2D",
"tensorflow.GradientTape",
"cv2.addWeighted",
"numpy.random.randint",
"cv2.cvtColor",
"he... | [((458, 491), 'tensorflow.RegisterGradient', 'tf.RegisterGradient', (['"""GuidedRelu"""'], {}), "('GuidedRelu')\n", (477, 491), True, 'import tensorflow as tf\n'), ((2963, 2993), 'heatmap.HeatMap', 'HeatMap', (['grad_c', 'guide'], {'dims': '(2)'}), '(grad_c, guide, dims=2)\n', (2970, 2993), False, 'from heatmap import HeatMap\n'), ((3111, 3148), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_GRAY2BGR'], {}), '(image, cv.COLOR_GRAY2BGR)\n', (3122, 3148), True, 'import cv2 as cv\n'), ((3249, 3298), 'cv2.addWeighted', 'cv.addWeighted', (['heatmap_img', '(0.7)', 'RGB_img', '(0.3)', '(0)'], {}), '(heatmap_img, 0.7, RGB_img, 0.3, 0)\n', (3263, 3298), True, 'import cv2 as cv\n'), ((3300, 3315), 'matplotlib.pyplot.imshow', 'plt.imshow', (['fin'], {}), '(fin)\n', (3310, 3315), True, 'import matplotlib.pyplot as plt\n'), ((535, 572), 'tensorflow.cast', 'tf.cast', (['(op.outputs[0] > 0)', '"""float32"""'], {}), "(op.outputs[0] > 0, 'float32')\n", (542, 572), True, 'import tensorflow as tf\n'), ((598, 626), 'tensorflow.cast', 'tf.cast', (['(grad > 0)', '"""float32"""'], {}), "(grad > 0, 'float32')\n", (605, 626), True, 'import tensorflow as tf\n'), ((1344, 1353), 'kerasmodel.MyModel', 'MyModel', ([], {}), '()\n', (1351, 1353), False, 'from kerasmodel import MyModel\n'), ((1368, 1412), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'keras.losses.SparseCategoricalCrossentropy', ([], {}), '()\n', (1410, 1412), False, 'from tensorflow import keras\n'), ((2013, 2040), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {}), '(image)\n', (2033, 2040), True, 'import tensorflow as tf\n'), ((2789, 2827), 'tensorflow.zeros', 'tf.zeros', (['lastc.shape[1:3]', 'tf.float32'], {}), '(lastc.shape[1:3], tf.float32)\n', (2797, 2827), True, 'import tensorflow as tf\n'), ((3083, 3098), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (3091, 3098), True, 'import numpy as np\n'), ((3206, 3223), 'numpy.uint8', 'np.uint8', (['heatmap'], {}), '(heatmap)\n', (3214, 3223), True, 'import numpy as np\n'), ((1890, 1935), 'numpy.random.randint', 'np.random.randint', (['(0)', 'test_images.shape[0]', '(1)'], {}), '(0, test_images.shape[0], 1)\n', (1907, 1935), True, 'import numpy as np\n'), ((2131, 2189), 'numpy.reshape', 'np.reshape', (['image', '(-1, image.shape[1], image.shape[2], 1)'], {}), '(image, (-1, image.shape[1], image.shape[2], 1))\n', (2141, 2189), True, 'import numpy as np\n'), ((2209, 2226), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2224, 2226), True, 'import tensorflow as tf\n'), ((2243, 2260), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2258, 2260), True, 'import tensorflow as tf\n'), ((2568, 2672), 'tensorflow.keras.layers.AveragePooling2D', 'tf.keras.layers.AveragePooling2D', (['(lastc.shape[1], lastc.shape[2])'], {'padding': '"""valid"""', 'strides': '(1, 1)'}), "((lastc.shape[1], lastc.shape[2]), padding=\n 'valid', strides=(1, 1))\n", (2600, 2672), True, 'import tensorflow as tf\n'), ((2886, 2951), 'tensorflow.nn.relu', 'tf.nn.relu', (['(grad_c + lastc[0, :, :, idx] * GAP_pool[0, :, :, idx])'], {}), '(grad_c + lastc[0, :, :, idx] * GAP_pool[0, :, :, idx])\n', (2896, 2951), True, 'import tensorflow as tf\n'), ((1235, 1267), 'tensorflow.compat.v1.get_default_graph', 'tf.compat.v1.get_default_graph', ([], {}), '()\n', (1265, 1267), True, 'import tensorflow as tf\n')] |
from factualaudio.data import noise
from factualaudio.decibel import to_decibels
import numpy as np
def waveform(axes, wave, sample_rate, *args, **kwargs):
return axes.plot(np.arange(0, wave.size) * (1000 / sample_rate), wave, *args, **kwargs)
# Equivalent to axes.amplitude_spectrum(), but plots on an RMS amplitude scale.
# (i.e. an input sine wave of RMS amplitude X will show up as X on the plot)
def rms_amplitude_spectrum(axes, wave, noise_level=1e-14, *args, **kwargs):
kwargs.setdefault("window", np.ones(wave.size))
kwargs.setdefault("scale", "dB")
# Add some noise to avoid numerical issues when converting to dB
wave += noise(wave.size) * noise_level
return axes.magnitude_spectrum(wave * np.sqrt(2), *args, **kwargs)
def transfer_function_gain(axes, transfer_function, corner_frequency=1000):
x = np.linspace(0, 20000, num=1000)
return axes.semilogx(x, to_decibels(np.absolute(transfer_function(x * (1j / corner_frequency)))))
def transfer_function_phase(axes, transfer_function, corner_frequency=1000):
x = np.linspace(0, 20000, num=1000)
return axes.semilogx(x, np.angle(transfer_function(x * (1j / corner_frequency)), deg=True))
| [
"numpy.sqrt",
"numpy.ones",
"factualaudio.data.noise",
"numpy.linspace",
"numpy.arange"
] | [((840, 871), 'numpy.linspace', 'np.linspace', (['(0)', '(20000)'], {'num': '(1000)'}), '(0, 20000, num=1000)\n', (851, 871), True, 'import numpy as np\n'), ((1060, 1091), 'numpy.linspace', 'np.linspace', (['(0)', '(20000)'], {'num': '(1000)'}), '(0, 20000, num=1000)\n', (1071, 1091), True, 'import numpy as np\n'), ((515, 533), 'numpy.ones', 'np.ones', (['wave.size'], {}), '(wave.size)\n', (522, 533), True, 'import numpy as np\n'), ((653, 669), 'factualaudio.data.noise', 'noise', (['wave.size'], {}), '(wave.size)\n', (658, 669), False, 'from factualaudio.data import noise\n'), ((178, 201), 'numpy.arange', 'np.arange', (['(0)', 'wave.size'], {}), '(0, wave.size)\n', (187, 201), True, 'import numpy as np\n'), ((726, 736), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (733, 736), True, 'import numpy as np\n')] |
import example_cy
import numpy as np
import mbx_fortran
import optimized_cy
import time
size = 9
height = 1
center_x = 4
center_y = 4
width_x = 2
width_y = 2
data = np.zeros((size, size), dtype=np.uint16)
num_loop = 10000
loops = list(range(0, num_loop))
start = time.time()
for loop in loops:
res_c = example_cy.gaussian(height, center_x, center_y, width_x, width_y, size, data)
res_c2 = np.reshape(res_c, (size, size))
print('Time taken base c: ' + str(round(time.time() - start, 3)) + ' s. Loops: ' + str(len(loops)))
start = time.time()
for loop in loops:
res_f = mbx_fortran.gaussian(height, center_x, center_y, width_x, width_y, size, data)
res_f2 = np.reshape(res_f, (size, size))
print('Time taken base c: ' + str(round(time.time() - start, 3)) + ' s. Loops: ' + str(len(loops)))
start = time.time()
for loop in loops:
res_c_opt = optimized_cy.gaussian(height, center_x, center_y, width_x, width_y, size, data)
res_c_opt2 = np.reshape(res_c_opt, (size, size))
print('Time taken base c: ' + str(round(time.time() - start, 3)) + ' s. Loops: ' + str(len(loops)))
| [
"numpy.reshape",
"example_cy.gaussian",
"optimized_cy.gaussian",
"mbx_fortran.gaussian",
"numpy.zeros",
"time.time"
] | [((167, 206), 'numpy.zeros', 'np.zeros', (['(size, size)'], {'dtype': 'np.uint16'}), '((size, size), dtype=np.uint16)\n', (175, 206), True, 'import numpy as np\n'), ((268, 279), 'time.time', 'time.time', ([], {}), '()\n', (277, 279), False, 'import time\n'), ((400, 431), 'numpy.reshape', 'np.reshape', (['res_c', '(size, size)'], {}), '(res_c, (size, size))\n', (410, 431), True, 'import numpy as np\n'), ((541, 552), 'time.time', 'time.time', ([], {}), '()\n', (550, 552), False, 'import time\n'), ((674, 705), 'numpy.reshape', 'np.reshape', (['res_f', '(size, size)'], {}), '(res_f, (size, size))\n', (684, 705), True, 'import numpy as np\n'), ((815, 826), 'time.time', 'time.time', ([], {}), '()\n', (824, 826), False, 'import time\n'), ((958, 993), 'numpy.reshape', 'np.reshape', (['res_c_opt', '(size, size)'], {}), '(res_c_opt, (size, size))\n', (968, 993), True, 'import numpy as np\n'), ((312, 389), 'example_cy.gaussian', 'example_cy.gaussian', (['height', 'center_x', 'center_y', 'width_x', 'width_y', 'size', 'data'], {}), '(height, center_x, center_y, width_x, width_y, size, data)\n', (331, 389), False, 'import example_cy\n'), ((585, 663), 'mbx_fortran.gaussian', 'mbx_fortran.gaussian', (['height', 'center_x', 'center_y', 'width_x', 'width_y', 'size', 'data'], {}), '(height, center_x, center_y, width_x, width_y, size, data)\n', (605, 663), False, 'import mbx_fortran\n'), ((863, 942), 'optimized_cy.gaussian', 'optimized_cy.gaussian', (['height', 'center_x', 'center_y', 'width_x', 'width_y', 'size', 'data'], {}), '(height, center_x, center_y, width_x, width_y, size, data)\n', (884, 942), False, 'import optimized_cy\n'), ((472, 483), 'time.time', 'time.time', ([], {}), '()\n', (481, 483), False, 'import time\n'), ((746, 757), 'time.time', 'time.time', ([], {}), '()\n', (755, 757), False, 'import time\n'), ((1034, 1045), 'time.time', 'time.time', ([], {}), '()\n', (1043, 1045), False, 'import time\n')] |
import csv
import os
import numpy as np
from foods3 import util
from gurobipy import *
county_size = 3109
def optimize_gurobi(supply_code, supply_corn, demand_code, demand_corn, dist_mat):
env = Env("gurobi_spatial_lca.log")
model = Model("lp_for_spatiallca")
var = []
# add constraint for corn product
# all flow value bigger than equals 0
no_of_supply = len(supply_code)
no_of_demand = len(demand_code)
var = []
sol = np.zeros(no_of_supply * no_of_demand)
for i, vs in enumerate(supply_code):
for j, vd in enumerate(demand_code):
var.append(model.addVar(0.0, min(supply_corn[i], demand_corn[j]), 0.0, GRB.CONTINUOUS, "S_s[{:d},{:d}]".format(i, j)))
model.update()
print("corn flow constraint = all number positive")
# Set objective: minimize cost
expr = LinExpr()
for i, vs in enumerate(supply_code):
for j, vd in enumerate(demand_code):
expr.addTerms(dist_mat[i][j], var[i * no_of_demand + j])
model.setObjective(expr, GRB.MINIMIZE)
# sum of supply(specific row's all columns) is small than product of corn
# Add constraint
for i, vs in enumerate(supply_code):
expr = LinExpr()
for j, vd in enumerate(demand_code):
expr.addTerms(1.0, var[i * no_of_demand + j])
model.addConstr(expr, GRB.LESS_EQUAL, supply_corn[i], "c{:d}".format(i + 1))
print("sum of corn flow from specific county smaller than total product of that county")
# sum of supply (specific column's all row) is equals to the demand of county
for j, vd in enumerate(demand_code):
expr = LinExpr()
for i, vs in enumerate(supply_code):
expr.addTerms(1.0, var[i * no_of_demand + j])
model.addConstr(expr, GRB.EQUAL, demand_corn[j], "d{:d}".format(j + 1))
print("all constraints are set.")
# Optimize model
model.optimize()
for i, vs in enumerate(supply_code):
for j, vd in enumerate(demand_code):
sol[i * no_of_demand + j] = var[i * no_of_demand + j].x
return sol
def read_csv_int(filename, col_idx):
values = []
with open(filename, "r", encoding='utf-8') as f:
csv_reader = csv.reader(f)
next(csv_reader)
for row in csv_reader:
v = row[col_idx]
values.append(int(v))
return values
def read_csv_float(filename, col_idx):
values = []
with open(filename, "r", encoding='utf-8') as f:
csv_reader = csv.reader(f)
next(csv_reader)
for row in csv_reader:
v = row[col_idx]
v = v.replace(",", "")
# print(v)
if v is None or v == "" or v.strip() == "-":
values.append(0)
else:
values.append(float(v))
return values
def read_csv_float_range(filename, col_idx, col_idx_end):
values = []
with open(filename, "r", encoding='utf-8') as f:
csv_reader = csv.reader(f)
next(csv_reader)
for row in csv_reader:
sum_value = 0.
for col in range(col_idx, col_idx_end):
v = row[col]
v = v.replace(",", "")
if v is None or v == "" or v.strip() == "-":
v = 0
else:
v = float(v)
sum_value += v
values.append(sum_value)
return values
def read_dist_matrix(filename):
matrix = np.zeros((county_size, county_size))
with open(filename, "r") as f:
csv_reader = csv.reader(f)
for i, row in enumerate(csv_reader):
for c in range(county_size):
matrix[i][c] = float(row[c])
return matrix
def expand_list(corn_demand_file, input_file, output_file):
demand = {}
with open(corn_demand_file, "r") as f:
reader = csv.reader(f)
next(reader)
for row in reader:
demand[row[0]] = [float(row[8]), float(row[9]), float(row[10]),
float(row[11]), float(row[12]), float(row[13]),
float(row[7])]
sub_sector = ["layer", "pullet", "turkey", "milkcow", "wetmill", "export", "others"]
data_list = []
with open(input_file, "r") as f:
reader = csv.reader(f)
header = next(reader)
for row in reader:
data_list.append(row)
expanded_list = []
for row in data_list:
if row[0] == "others":
weighted_col_idx = [3,]
target_county = row[1]
total_demand = sum(demand[target_county])
for ss in range(len(sub_sector)):
if total_demand == 0:
weight = 1
else:
weight = demand[target_county][ss] / total_demand
split_row = [row[x] if x not in weighted_col_idx else float(row[x])*weight for x in range(len(row))]
split_row[0] = sub_sector[ss]
if split_row[3] != 0:
expanded_list.append(split_row)
else:
expanded_list.append(row)
with open(output_file, "w") as f:
f.write(",".join(header))
f.write("\n")
for row in expanded_list:
f.write(",".join([str(x) for x in row]))
f.write("\n")
def main(output_filename, demand_filename):
county_code = read_csv_int("../input/county_FIPS.csv", 0)
supply_code = county_code[:]
supply_amount = read_csv_float(demand_filename, 1)
demand_code = []
for i in range(5):
demand_code.extend(county_code)
demand_amount = []
# cattle(0), poultry(1), ethanol(2), hog(3), others(4)
demand_amount.extend(read_csv_float(demand_filename, 3))
demand_amount.extend(read_csv_float(demand_filename, 5))
demand_amount.extend(read_csv_float(demand_filename, 6))
demand_amount.extend(read_csv_float(demand_filename, 4))
demand_amount.extend(read_csv_float_range(demand_filename, 7, 14))
print(sum(supply_amount))
print(sum(demand_amount))
all_imp_filename = "../input/allDist_imp.csv"
dist_imp_all_matrix = read_dist_matrix(all_imp_filename)
dist_mat = np.zeros((len(supply_code), len(demand_code)))
print("making distance matrix")
dist_mat[0:3109, 0 + 0 * 3109:3109 * 1] = dist_imp_all_matrix
dist_mat[0:3109, 0 + 1 * 3109:3109 * 2] = dist_imp_all_matrix
dist_mat[0:3109, 0 + 2 * 3109:3109 * 3] = dist_imp_all_matrix
dist_mat[0:3109, 0 + 3 * 3109:3109 * 4] = dist_imp_all_matrix
dist_mat[0:3109, 0 + 4 * 3109:3109 * 5] = dist_imp_all_matrix
print("run simulation model")
sol = optimize_gurobi(supply_code, supply_amount, demand_code, demand_amount, dist_mat)
no_of_demand = len(demand_code)
sector_name = ("cattle", "broiler", "ethanol", "hog", "others")
with open(output_filename, "w") as f:
headline = [
"sector", "demand_county", "corn_county", "corn_bu",
]
f.write(",".join(headline))
f.write("\n")
for i, v in enumerate(sol):
if v > 0:
sector = (i % no_of_demand) // county_size
src_county_idx = i // no_of_demand
des_county_idx = i % no_of_demand % county_size
supply_corn_bu = v
src_county_fips = county_code[src_county_idx]
des_county_fips = county_code[des_county_idx]
f.write("{},{},{},{}\n".format(sector_name[sector], des_county_fips, src_county_fips, supply_corn_bu))
if __name__ == '__main__':
ROOT_DIR = util.get_project_root()
output_dir = ROOT_DIR / "output"
if not os.path.exists(output_dir):
os.mkdir(output_dir)
corn_flow_filename = "../output/corn_flow_county_scale_major_category.csv"
corn_demand_filename = "../input/corn_demand_2012.csv"
main(corn_flow_filename, corn_demand_filename)
expand_list(corn_demand_filename,
corn_flow_filename,
"../output/impacts_scale_county_all_category.csv")
| [
"os.path.exists",
"foods3.util.get_project_root",
"numpy.zeros",
"os.mkdir",
"csv.reader"
] | [((463, 500), 'numpy.zeros', 'np.zeros', (['(no_of_supply * no_of_demand)'], {}), '(no_of_supply * no_of_demand)\n', (471, 500), True, 'import numpy as np\n'), ((3460, 3496), 'numpy.zeros', 'np.zeros', (['(county_size, county_size)'], {}), '((county_size, county_size))\n', (3468, 3496), True, 'import numpy as np\n'), ((7580, 7603), 'foods3.util.get_project_root', 'util.get_project_root', ([], {}), '()\n', (7601, 7603), False, 'from foods3 import util\n'), ((2213, 2226), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (2223, 2226), False, 'import csv\n'), ((2495, 2508), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (2505, 2508), False, 'import csv\n'), ((2968, 2981), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (2978, 2981), False, 'import csv\n'), ((3553, 3566), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (3563, 3566), False, 'import csv\n'), ((3854, 3867), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (3864, 3867), False, 'import csv\n'), ((4283, 4296), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (4293, 4296), False, 'import csv\n'), ((7653, 7679), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (7667, 7679), False, 'import os\n'), ((7689, 7709), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (7697, 7709), False, 'import os\n')] |
from numpy import array
from pandas import DataFrame, Series
from unittest.case import TestCase
from probability.distributions import Beta
from tests.test_questions.question_factories import make_likert_question
class TestLikertQuestion(TestCase):
def setUp(self) -> None:
self.question = make_likert_question()
def test_count(self):
self.assertEqual(15, self.question.count())
self.assertEqual(2, self.question.count('1 - strongly disagree'))
self.assertEqual(6, self.question.count(['1 - strongly disagree',
'2 - disagree']))
def test_value_counts(self):
self.assertTrue(Series({
'1 - strongly disagree': 2,
'2 - disagree': 4,
'3 - neither agree nor disagree': 6,
'4 - agree': 0,
'5 - strongly agree': 3
}).equals(self.question.value_counts()))
self.assertTrue(Series({
'1 - strongly disagree': 2,
}).equals(self.question.value_counts('1 - strongly disagree')))
self.assertTrue(Series({
'1 - strongly disagree': 2,
'2 - disagree': 4,
}).equals(self.question.value_counts(['1 - strongly disagree',
'2 - disagree'])))
def test_distribution_table__no_significance(self):
expected = DataFrame(data=[
('1 - strongly disagree', 2),
('2 - disagree', 4),
('3 - neither agree nor disagree', 6),
('4 - agree', 0),
('5 - strongly agree', 3)
], columns=['Value', 'Count'])
actual = self.question.distribution_table()
self.assertTrue(expected.equals(actual))
def test_distribution_table__significance(self):
a = array([2, 4, 6, 0, 3])
n = a.sum()
b = n - a
a_others = array([
(a.sum() - a[i]) / (len(a) - 1)
for i in range(len(a))
])
b_others = n - a_others
expected = DataFrame(data=[
('1 - strongly disagree', a[0],
Beta(1 + a[0], 1 + b[0]) > Beta(1 + a_others[0], 1 + b_others[0])),
('2 - disagree', a[1],
Beta(1 + a[1], 1 + b[1]) > Beta(1 + a_others[1], 1 + b_others[1])),
('3 - neither agree nor disagree', a[2],
Beta(1 + a[2], 1 + b[2]) > Beta(1 + a_others[2], 1 + b_others[2])),
('4 - agree', a[3],
Beta(1 + a[3], 1 + b[3]) > Beta(1 + a_others[3], 1 + b_others[3])),
('5 - strongly agree', a[4],
Beta(1 + a[4], 1 + b[4]) > Beta(1 + a_others[4], 1 + b_others[4]))
], columns=['Value', 'Count', 'Significance'])
actual = self.question.distribution_table(significance=True)
self.assertTrue(expected.equals(actual))
| [
"pandas.Series",
"probability.distributions.Beta",
"tests.test_questions.question_factories.make_likert_question",
"numpy.array",
"pandas.DataFrame"
] | [((306, 328), 'tests.test_questions.question_factories.make_likert_question', 'make_likert_question', ([], {}), '()\n', (326, 328), False, 'from tests.test_questions.question_factories import make_likert_question\n'), ((1387, 1576), 'pandas.DataFrame', 'DataFrame', ([], {'data': "[('1 - strongly disagree', 2), ('2 - disagree', 4), (\n '3 - neither agree nor disagree', 6), ('4 - agree', 0), (\n '5 - strongly agree', 3)]", 'columns': "['Value', 'Count']"}), "(data=[('1 - strongly disagree', 2), ('2 - disagree', 4), (\n '3 - neither agree nor disagree', 6), ('4 - agree', 0), (\n '5 - strongly agree', 3)], columns=['Value', 'Count'])\n", (1396, 1576), False, 'from pandas import DataFrame, Series\n'), ((1805, 1827), 'numpy.array', 'array', (['[2, 4, 6, 0, 3]'], {}), '([2, 4, 6, 0, 3])\n', (1810, 1827), False, 'from numpy import array\n'), ((683, 824), 'pandas.Series', 'Series', (["{'1 - strongly disagree': 2, '2 - disagree': 4,\n '3 - neither agree nor disagree': 6, '4 - agree': 0,\n '5 - strongly agree': 3}"], {}), "({'1 - strongly disagree': 2, '2 - disagree': 4,\n '3 - neither agree nor disagree': 6, '4 - agree': 0,\n '5 - strongly agree': 3})\n", (689, 824), False, 'from pandas import DataFrame, Series\n'), ((949, 985), 'pandas.Series', 'Series', (["{'1 - strongly disagree': 2}"], {}), "({'1 - strongly disagree': 2})\n", (955, 985), False, 'from pandas import DataFrame, Series\n'), ((1094, 1149), 'pandas.Series', 'Series', (["{'1 - strongly disagree': 2, '2 - disagree': 4}"], {}), "({'1 - strongly disagree': 2, '2 - disagree': 4})\n", (1100, 1149), False, 'from pandas import DataFrame, Series\n'), ((2109, 2133), 'probability.distributions.Beta', 'Beta', (['(1 + a[0])', '(1 + b[0])'], {}), '(1 + a[0], 1 + b[0])\n', (2113, 2133), False, 'from probability.distributions import Beta\n'), ((2136, 2174), 'probability.distributions.Beta', 'Beta', (['(1 + a_others[0])', '(1 + b_others[0])'], {}), '(1 + a_others[0], 1 + b_others[0])\n', (2140, 2174), False, 'from probability.distributions import Beta\n'), ((2225, 2249), 'probability.distributions.Beta', 'Beta', (['(1 + a[1])', '(1 + b[1])'], {}), '(1 + a[1], 1 + b[1])\n', (2229, 2249), False, 'from probability.distributions import Beta\n'), ((2252, 2290), 'probability.distributions.Beta', 'Beta', (['(1 + a_others[1])', '(1 + b_others[1])'], {}), '(1 + a_others[1], 1 + b_others[1])\n', (2256, 2290), False, 'from probability.distributions import Beta\n'), ((2359, 2383), 'probability.distributions.Beta', 'Beta', (['(1 + a[2])', '(1 + b[2])'], {}), '(1 + a[2], 1 + b[2])\n', (2363, 2383), False, 'from probability.distributions import Beta\n'), ((2386, 2424), 'probability.distributions.Beta', 'Beta', (['(1 + a_others[2])', '(1 + b_others[2])'], {}), '(1 + a_others[2], 1 + b_others[2])\n', (2390, 2424), False, 'from probability.distributions import Beta\n'), ((2472, 2496), 'probability.distributions.Beta', 'Beta', (['(1 + a[3])', '(1 + b[3])'], {}), '(1 + a[3], 1 + b[3])\n', (2476, 2496), False, 'from probability.distributions import Beta\n'), ((2499, 2537), 'probability.distributions.Beta', 'Beta', (['(1 + a_others[3])', '(1 + b_others[3])'], {}), '(1 + a_others[3], 1 + b_others[3])\n', (2503, 2537), False, 'from probability.distributions import Beta\n'), ((2594, 2618), 'probability.distributions.Beta', 'Beta', (['(1 + a[4])', '(1 + b[4])'], {}), '(1 + a[4], 1 + b[4])\n', (2598, 2618), False, 'from probability.distributions import Beta\n'), ((2621, 2659), 'probability.distributions.Beta', 'Beta', (['(1 + a_others[4])', '(1 + b_others[4])'], {}), '(1 + a_others[4], 1 + b_others[4])\n', (2625, 2659), False, 'from probability.distributions import Beta\n')] |
## File for training and evaluation of model
import os
import time
from tqdm import tqdm
import torch
import math
import numpy as np
from torch.utils.data import DataLoader
from torch.nn import DataParallel
from nets.attention_model import set_decode_type
from utils.log_utils import log_values
from utils import move_to
# Load model based on the architecture of machine
def get_inner_model(model):
return model.module if isinstance(model, DataParallel) else model
# Validating the model based on validation dataset and reporting average distance
def validate(model, dataset, opts):
# Validate
print('Validating...')
cost = rollout(model, dataset, opts)
avg_cost = cost.mean()
print('Validation overall avg_cost: {} +- {}'.format(
avg_cost, torch.std(cost) / math.sqrt(len(cost))))
return avg_cost
def rollout(model, dataset, opts):
# Put in greedy evaluation mode!
set_decode_type(model, "greedy")
model.eval()
def eval_model_bat(bat):
with torch.no_grad():
cost, _ = model(move_to(bat, opts.device))
return cost.data.cpu()
return torch.cat([
eval_model_bat(bat)
for bat
in tqdm(DataLoader(dataset, batch_size=opts.eval_batch_size), disable=opts.no_progress_bar)
], 0)
def clip_grad_norms(param_groups, max_norm=math.inf):
"""
Clips the norms for all param groups to max_norm and returns gradient norms before clipping
:param optimizer: Adams Optimizer with variable learning rate
:param max_norm: The maximum value of gradient, given by opts.max_grad_norm
:param gradient_norms_log:
:return: grad_norms, clipped_grad_norms: list with (clipped) gradient norms per group
"""
grad_norms = [
torch.nn.utils.clip_grad_norm_(
group['params'],
max_norm if max_norm > 0 else math.inf, # Inf so no clipping but still call to calc
norm_type=2
)
for group in param_groups
]
grad_norms_clipped = [min(g_norm, max_norm) for g_norm in grad_norms] if max_norm > 0 else grad_norms
return grad_norms, grad_norms_clipped
def train_epoch(model, optimizer, baseline, lr_scheduler, epoch, val_dataset, problem, tb_logger, opts, old_log_likelihood = None):
"""
Training of model for given number of epochs
:param model: Attention model selected in run.py
:param optimizer: Adams Optimizer with variable learning rate
:param baseline: Baseline or Critic chosen to perform experiment
:param lr_scheduler: Change the learning rate after each epoch
:param epoch: Number of training epochs for each experiment
:param val_dataset: Validation dataset used to validate the model after every epoch
:param problem: Problem to solve (CVRP)
:param tb_logger: Logger to save important value after every few steps
:param opts: Configuration established in the run command or by default values
:param old_log_likelihood: Policy for previous iterations
:return: Likelihood values(Policy) of the current epoch
"""
print("Start train epoch {}, lr={} for run {}".format(epoch, optimizer.param_groups[0]['lr'], opts.run_name))
# Calculate number of times we train a model on problem instance in each epoch [Parameter Used: opts.epoch_size, opts.batch_size]
step = epoch * (opts.epoch_size // opts.batch_size)
start_time = time.time()
# Connecting model to tensorboard
if not opts.no_tensorboard:
tb_logger.log_value('learnrate_pg0', optimizer.param_groups[0]['lr'], step)
# Generate new training data for each epoch, each trains on a different problem instance for 'step' times
training_dataset = baseline.wrap_dataset(problem.make_dataset(
size=opts.graph_size, num_samples=opts.epoch_size, distribution=opts.data_distribution))
training_dataloader = DataLoader(training_dataset, batch_size=opts.batch_size, num_workers=1)
# Put model in train mode!
model.train()
# Training happens in sampling mode to allow the model to explore different paths
"""
Different Sampling modes
:sample: Use multinomial distribution to sample node
:greedy: Use max probability to sample node
"""
set_decode_type(model, "sampling")
collect_old_log_likelihoods = []
# Run a particular dataset 'step' times in each epochs
for batch_id, batch in enumerate(tqdm(training_dataloader, disable=opts.no_progress_bar)):
# Run each step in an epoch
old_log_like = train_batch(
model,
optimizer,
baseline,
epoch,
batch_id,
step,
batch,
tb_logger,
opts,
old_log_likelihood
)
collect_old_log_likelihoods.append(old_log_like)
step += 1
# print(collect_old_log_likelihoods[-1], len(collect_old_log_likelihoods))
epoch_duration = time.time() - start_time
print("Finished epoch {}, took {} s".format(epoch, time.strftime('%H:%M:%S', time.gmtime(epoch_duration))))
# Save model after every epoch, allows to load model again if it fails at any given epoch
if (opts.checkpoint_epochs != 0 and epoch % opts.checkpoint_epochs == 0) or epoch == opts.n_epochs - 1:
print('Saving model and state...')
torch.save(
{
'model': get_inner_model(model).state_dict(),
'optimizer': optimizer.state_dict(),
'rng_state': torch.get_rng_state(),
'cuda_rng_state': torch.cuda.get_rng_state_all(),
'baseline': baseline.state_dict()
},
os.path.join(opts.save_dir, 'epoch-{}.pt'.format(epoch))
)
# Validate the model after completion of epoch on validation datasets
avg_reward = validate(model, val_dataset, opts)
if not opts.no_tensorboard:
tb_logger.log_value('val_avg_reward', avg_reward, step)
# Update the baseline with the newly trained model (only for baseline = rollout)
baseline.epoch_callback(model, epoch)
# lr_scheduler should be called at end of epoch
lr_scheduler.step()
# Select the final policy on a given dataset
last_iter_log_prob = move_to(collect_old_log_likelihoods[-1], opts.device)
return last_iter_log_prob
def train_batch(
model,
optimizer,
baseline,
epoch,
batch_id,
step,
batch,
tb_logger,
opts,
old_log_likelihood = None
):
"""
Training of model for each step in epoch
:param model: Attention model selected in run.py
:param optimizer: Adams Optimizer with variable learning rate
:param baseline: Baseline or Critic chosen to perform experiment
:param epoch: Number of training epochs for each experiment
:param step: Number of times we train a model on problem instance in each epoch
:param batch_id: Batch id of each batch
:param batch: Problem instances in a given batch (default 512)
:param tb_logger: Logger to save important value after every few steps
:param opts: Configuration established in the run command or by default values
:param old_log_likelihood: Policy for previous iterations
:return: Likelihood values(Policy) of the current step
"""
# Seperate the inputvalue of batch size and any inital value of cost
x, bl_val = baseline.unwrap_batch(batch)
# Move the variable to device based on whether GPU is available or not [Parameter used: opts.device]
x = move_to(x, opts.device)
bl_val = move_to(bl_val, opts.device) if bl_val is not None else None
# Evaluate model, get costs and log likelihood
cost, log_likelihood = model(x)
# For the first epoch we do not have any old policy, therefore we use the average value of current likelihood as subsitute
shapeOfLikelihood = log_likelihood.shape
if old_log_likelihood == None:
# Getting the mean value of likelihood for a given batch
mean_likelihood = log_likelihood.mean().item()
old_log_likelihood = np.full(shapeOfLikelihood[0], mean_likelihood)
old_log_likelihood = torch.as_tensor(old_log_likelihood)
old_log_likelihood = move_to(old_log_likelihood, opts.device)
log_likelihood = move_to(log_likelihood, opts.device)
# Evaluate baseline/critic, get baseline/critic loss if any
bl_val, bl_loss = baseline.eval(x, cost) if bl_val is None else (bl_val, 0)
# Reshape critic output to for faster operations
bl_val_1 = torch.reshape(bl_val, (-1,))
# Calculate Advantage function, this enable algorithm to compare action loss with critic or baseline loss
advantage = cost - bl_val
# Clipping values for PPO loss (default 0.2, selected in paper)
clip_param = 0.2
# Define the PPO loss function
# Note: Dividing likelihoods can lead to unexpected behavior, therefore we subtract log likelihoods and then exponentiate them
ratio = log_likelihood - old_log_likelihood
ratio = torch.exp(ratio)
surr1 = ratio
# Clipping the gradients
surr2 = torch.clamp(ratio, 1.0 - clip_param, 1.0 + clip_param)
# Selecting the minimum values between original and clipped gradients
actor_loss = torch.min(surr1, surr2)
# Combining all the values
# Note: We add an extra 0.5xbl_loss to allow the LSTM model to update its parameters using backpropogation
loss = 0.5 * bl_loss + (-(bl_val_1 - cost) * actor_loss).mean() #Added negative sign
# Perform backward pass and optimization step
optimizer.zero_grad()
loss.backward()
# Clip gradient norms and get (clipped) gradient norms for logging
grad_norms = clip_grad_norms(optimizer.param_groups, opts.max_grad_norm)
optimizer.step()
# Logging values after every few steps [parameter used: opts.log_step]
if step % int(opts.log_step) == 0:
log_values(cost, grad_norms, epoch, batch_id, step,
log_likelihood, loss, bl_loss, tb_logger, opts)
# Retunr Likelihood values after each step
return log_likelihood | [
"torch.cuda.get_rng_state_all",
"torch.as_tensor",
"torch.nn.utils.clip_grad_norm_",
"tqdm.tqdm",
"torch.exp",
"torch.reshape",
"torch.min",
"utils.log_utils.log_values",
"nets.attention_model.set_decode_type",
"torch.no_grad",
"time.gmtime",
"utils.move_to",
"torch.get_rng_state",
"torch.... | [((917, 949), 'nets.attention_model.set_decode_type', 'set_decode_type', (['model', '"""greedy"""'], {}), "(model, 'greedy')\n", (932, 949), False, 'from nets.attention_model import set_decode_type\n'), ((3530, 3541), 'time.time', 'time.time', ([], {}), '()\n', (3539, 3541), False, 'import time\n'), ((3998, 4069), 'torch.utils.data.DataLoader', 'DataLoader', (['training_dataset'], {'batch_size': 'opts.batch_size', 'num_workers': '(1)'}), '(training_dataset, batch_size=opts.batch_size, num_workers=1)\n', (4008, 4069), False, 'from torch.utils.data import DataLoader\n'), ((4375, 4409), 'nets.attention_model.set_decode_type', 'set_decode_type', (['model', '"""sampling"""'], {}), "(model, 'sampling')\n", (4390, 4409), False, 'from nets.attention_model import set_decode_type\n'), ((6371, 6424), 'utils.move_to', 'move_to', (['collect_old_log_likelihoods[-1]', 'opts.device'], {}), '(collect_old_log_likelihoods[-1], opts.device)\n', (6378, 6424), False, 'from utils import move_to\n'), ((7807, 7830), 'utils.move_to', 'move_to', (['x', 'opts.device'], {}), '(x, opts.device)\n', (7814, 7830), False, 'from utils import move_to\n'), ((8493, 8533), 'utils.move_to', 'move_to', (['old_log_likelihood', 'opts.device'], {}), '(old_log_likelihood, opts.device)\n', (8500, 8533), False, 'from utils import move_to\n'), ((8555, 8591), 'utils.move_to', 'move_to', (['log_likelihood', 'opts.device'], {}), '(log_likelihood, opts.device)\n', (8562, 8591), False, 'from utils import move_to\n'), ((8806, 8834), 'torch.reshape', 'torch.reshape', (['bl_val', '(-1,)'], {}), '(bl_val, (-1,))\n', (8819, 8834), False, 'import torch\n'), ((9301, 9317), 'torch.exp', 'torch.exp', (['ratio'], {}), '(ratio)\n', (9310, 9317), False, 'import torch\n'), ((9378, 9432), 'torch.clamp', 'torch.clamp', (['ratio', '(1.0 - clip_param)', '(1.0 + clip_param)'], {}), '(ratio, 1.0 - clip_param, 1.0 + clip_param)\n', (9389, 9432), False, 'import torch\n'), ((9525, 9548), 'torch.min', 'torch.min', (['surr1', 'surr2'], {}), '(surr1, surr2)\n', (9534, 9548), False, 'import torch\n'), ((1773, 1877), 'torch.nn.utils.clip_grad_norm_', 'torch.nn.utils.clip_grad_norm_', (["group['params']", '(max_norm if max_norm > 0 else math.inf)'], {'norm_type': '(2)'}), "(group['params'], max_norm if max_norm > 0 else\n math.inf, norm_type=2)\n", (1803, 1877), False, 'import torch\n'), ((4544, 4599), 'tqdm.tqdm', 'tqdm', (['training_dataloader'], {'disable': 'opts.no_progress_bar'}), '(training_dataloader, disable=opts.no_progress_bar)\n', (4548, 4599), False, 'from tqdm import tqdm\n'), ((5073, 5084), 'time.time', 'time.time', ([], {}), '()\n', (5082, 5084), False, 'import time\n'), ((7844, 7872), 'utils.move_to', 'move_to', (['bl_val', 'opts.device'], {}), '(bl_val, opts.device)\n', (7851, 7872), False, 'from utils import move_to\n'), ((8355, 8401), 'numpy.full', 'np.full', (['shapeOfLikelihood[0]', 'mean_likelihood'], {}), '(shapeOfLikelihood[0], mean_likelihood)\n', (8362, 8401), True, 'import numpy as np\n'), ((8431, 8466), 'torch.as_tensor', 'torch.as_tensor', (['old_log_likelihood'], {}), '(old_log_likelihood)\n', (8446, 8466), False, 'import torch\n'), ((10176, 10279), 'utils.log_utils.log_values', 'log_values', (['cost', 'grad_norms', 'epoch', 'batch_id', 'step', 'log_likelihood', 'loss', 'bl_loss', 'tb_logger', 'opts'], {}), '(cost, grad_norms, epoch, batch_id, step, log_likelihood, loss,\n bl_loss, tb_logger, opts)\n', (10186, 10279), False, 'from utils.log_utils import log_values\n'), ((1010, 1025), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1023, 1025), False, 'import torch\n'), ((777, 792), 'torch.std', 'torch.std', (['cost'], {}), '(cost)\n', (786, 792), False, 'import torch\n'), ((1055, 1080), 'utils.move_to', 'move_to', (['bat', 'opts.device'], {}), '(bat, opts.device)\n', (1062, 1080), False, 'from utils import move_to\n'), ((5179, 5206), 'time.gmtime', 'time.gmtime', (['epoch_duration'], {}), '(epoch_duration)\n', (5190, 5206), False, 'import time\n'), ((5634, 5655), 'torch.get_rng_state', 'torch.get_rng_state', ([], {}), '()\n', (5653, 5655), False, 'import torch\n'), ((5691, 5721), 'torch.cuda.get_rng_state_all', 'torch.cuda.get_rng_state_all', ([], {}), '()\n', (5719, 5721), False, 'import torch\n'), ((1197, 1249), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'opts.eval_batch_size'}), '(dataset, batch_size=opts.eval_batch_size)\n', (1207, 1249), False, 'from torch.utils.data import DataLoader\n')] |
"""
utils.py
Author: <NAME>
This script provides coordinate transformations from Geodetic -> ECEF, ECEF -> ENU
and Geodetic -> ENU (the composition of the two previous functions). Running the script
by itself runs tests.
based on https://gist.github.com/govert/1b373696c9a27ff4c72a.
It also provides some other useful functions.
"""
import math
from sklearn.utils import shuffle
import numpy as np
def train_test_split(data, fraction_val):
data_shuffled = shuffle(data)
n_val = int(data.shape[0]*fraction_val)
return data_shuffled[n_val:], data_shuffled[:n_val]
a = 6378137
b = 6356752.3142
f = (a - b) / a
e_sq = f * (2-f)
def geodetic_to_ecef(lat, lon, h):
# (lat, lon) in WSG-84 degrees
# h in meters
lamb = math.radians(lat)
phi = math.radians(lon)
s = math.sin(lamb)
N = a / math.sqrt(1 - e_sq * s * s)
sin_lambda = math.sin(lamb)
cos_lambda = math.cos(lamb)
sin_phi = math.sin(phi)
cos_phi = math.cos(phi)
x = (h + N) * cos_lambda * cos_phi
y = (h + N) * cos_lambda * sin_phi
z = (h + (1 - e_sq) * N) * sin_lambda
return x, y, z
def ecef_to_enu(x, y, z, lat0, lon0, h0):
lamb = math.radians(lat0)
phi = math.radians(lon0)
s = math.sin(lamb)
N = a / math.sqrt(1 - e_sq * s * s)
sin_lambda = math.sin(lamb)
cos_lambda = math.cos(lamb)
sin_phi = math.sin(phi)
cos_phi = math.cos(phi)
x0 = (h0 + N) * cos_lambda * cos_phi
y0 = (h0 + N) * cos_lambda * sin_phi
z0 = (h0 + (1 - e_sq) * N) * sin_lambda
xd = x - x0
yd = y - y0
zd = z - z0
xEast = -sin_phi * xd + cos_phi * yd
yNorth = -cos_phi * sin_lambda * xd - sin_lambda * sin_phi * yd + cos_lambda * zd
zUp = cos_lambda * cos_phi * xd + cos_lambda * sin_phi * yd + sin_lambda * zd
return xEast, yNorth, zUp
def geodetic_to_enu(lat, lon, h, lat_ref, lon_ref, h_ref):
x, y, z = geodetic_to_ecef(lat, lon, h)
return ecef_to_enu(x, y, z, lat_ref, lon_ref, h_ref)
def log_probability(means, covs, cluster_probs, X_val):
# Iterate over validation data and calculate probability of each one under model
n_clusters = len(means)
cov_pinvs = []
for j in range(n_clusters):
u, s, v = covs[j]
pinv = np.matmul(v.T, np.matmul(np.diag(1./s), u.T))
cov_pinvs.append(pinv)
x = 0.0
for traj in X_val:
possibilities = []
for j in range(n_clusters):
u, s, v = covs[j]
mean = means[j]
cov_pinv = cov_pinvs[j]
dev = (traj - mean)[:, np.newaxis]
log_2pi_pseudo_det = np.sum(np.log(2*np.pi*s))
maha = np.dot(traj.T, np.dot(cov_pinv, traj))
prob = -.5 * (log_2pi_pseudo_det + maha) + np.log(cluster_probs[j])
# prob = cluster_probs[j] * np.exp(log_2pi_pseudo_det)**(-.5)*np.exp(-.5*maha)
possibilities.append(prob)
x += np.max(possibilities)
return x
def generate(mean, cov, r=1.):
u, s, vt = cov
z = np.random.normal(size=vt.shape[1])
traj = mean + np.dot(vt.T, np.dot(np.diag(r*s**.5), np.dot(vt, z)))
return traj
if __name__ == '__main__':
def are_close(a, b):
return abs(a-b) < 1e-4
latLA = 34.00000048
lonLA = -117.3335693
hLA = 251.702
x0, y0, z0 = geodetic_to_ecef(latLA, lonLA, hLA)
x = x0 + 1
y = y0
z = z0
xEast, yNorth, zUp = ecef_to_enu(x, y, z, latLA, lonLA, hLA)
assert are_close(0.88834836, xEast)
assert are_close(0.25676467, yNorth)
assert are_close(-0.38066927, zUp)
x = x0
y = y0 + 1
z = z0
xEast, yNorth, zUp = ecef_to_enu(x, y, z, latLA, lonLA, hLA)
assert are_close(-0.45917011, xEast)
assert are_close(0.49675810, yNorth)
assert are_close(-0.73647416, zUp)
x = x0
y = y0
z = z0 + 1
xEast, yNorth, zUp = ecef_to_enu(x, y, z, latLA, lonLA, hLA)
assert are_close(0.00000000, xEast)
assert are_close(0.82903757, yNorth)
assert are_close(0.55919291, zUp) | [
"numpy.random.normal",
"sklearn.utils.shuffle",
"numpy.log",
"math.sqrt",
"math.radians",
"math.cos",
"numpy.max",
"numpy.diag",
"numpy.dot",
"math.sin"
] | [((463, 476), 'sklearn.utils.shuffle', 'shuffle', (['data'], {}), '(data)\n', (470, 476), False, 'from sklearn.utils import shuffle\n'), ((741, 758), 'math.radians', 'math.radians', (['lat'], {}), '(lat)\n', (753, 758), False, 'import math\n'), ((769, 786), 'math.radians', 'math.radians', (['lon'], {}), '(lon)\n', (781, 786), False, 'import math\n'), ((795, 809), 'math.sin', 'math.sin', (['lamb'], {}), '(lamb)\n', (803, 809), False, 'import math\n'), ((868, 882), 'math.sin', 'math.sin', (['lamb'], {}), '(lamb)\n', (876, 882), False, 'import math\n'), ((900, 914), 'math.cos', 'math.cos', (['lamb'], {}), '(lamb)\n', (908, 914), False, 'import math\n'), ((929, 942), 'math.sin', 'math.sin', (['phi'], {}), '(phi)\n', (937, 942), False, 'import math\n'), ((957, 970), 'math.cos', 'math.cos', (['phi'], {}), '(phi)\n', (965, 970), False, 'import math\n'), ((1166, 1184), 'math.radians', 'math.radians', (['lat0'], {}), '(lat0)\n', (1178, 1184), False, 'import math\n'), ((1195, 1213), 'math.radians', 'math.radians', (['lon0'], {}), '(lon0)\n', (1207, 1213), False, 'import math\n'), ((1222, 1236), 'math.sin', 'math.sin', (['lamb'], {}), '(lamb)\n', (1230, 1236), False, 'import math\n'), ((1295, 1309), 'math.sin', 'math.sin', (['lamb'], {}), '(lamb)\n', (1303, 1309), False, 'import math\n'), ((1327, 1341), 'math.cos', 'math.cos', (['lamb'], {}), '(lamb)\n', (1335, 1341), False, 'import math\n'), ((1356, 1369), 'math.sin', 'math.sin', (['phi'], {}), '(phi)\n', (1364, 1369), False, 'import math\n'), ((1384, 1397), 'math.cos', 'math.cos', (['phi'], {}), '(phi)\n', (1392, 1397), False, 'import math\n'), ((2996, 3030), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'vt.shape[1]'}), '(size=vt.shape[1])\n', (3012, 3030), True, 'import numpy as np\n'), ((822, 849), 'math.sqrt', 'math.sqrt', (['(1 - e_sq * s * s)'], {}), '(1 - e_sq * s * s)\n', (831, 849), False, 'import math\n'), ((1249, 1276), 'math.sqrt', 'math.sqrt', (['(1 - e_sq * s * s)'], {}), '(1 - e_sq * s * s)\n', (1258, 1276), False, 'import math\n'), ((2902, 2923), 'numpy.max', 'np.max', (['possibilities'], {}), '(possibilities)\n', (2908, 2923), True, 'import numpy as np\n'), ((2268, 2284), 'numpy.diag', 'np.diag', (['(1.0 / s)'], {}), '(1.0 / s)\n', (2275, 2284), True, 'import numpy as np\n'), ((2601, 2622), 'numpy.log', 'np.log', (['(2 * np.pi * s)'], {}), '(2 * np.pi * s)\n', (2607, 2622), True, 'import numpy as np\n'), ((2654, 2676), 'numpy.dot', 'np.dot', (['cov_pinv', 'traj'], {}), '(cov_pinv, traj)\n', (2660, 2676), True, 'import numpy as np\n'), ((2733, 2757), 'numpy.log', 'np.log', (['cluster_probs[j]'], {}), '(cluster_probs[j])\n', (2739, 2757), True, 'import numpy as np\n'), ((3069, 3090), 'numpy.diag', 'np.diag', (['(r * s ** 0.5)'], {}), '(r * s ** 0.5)\n', (3076, 3090), True, 'import numpy as np\n'), ((3087, 3100), 'numpy.dot', 'np.dot', (['vt', 'z'], {}), '(vt, z)\n', (3093, 3100), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
The grid class for swarm framework.
Grid: base grid, a simple dictionary.
"""
import math
import numpy as np
import re
class Grid:
"""Grid class.
Class that defines grid strucutre having attribute
Width, height and grid size.
"""
# pylint: disable=too-many-instance-attributes
# Nine is reasonable in this grid class
def __init__(self, width, height, grid_size=10):
"""Constructors for grid.
Args:
width: total width
height: total height
grid_size: granularity of the size of grid
Attributes:
x_limit: x-axis length in both direction
y_limit: y-axis length in both direction
grid: dictionary object which value is adjacent points
of grid and its value is the grid name
grid_objects: dictionary object which value is the grid name
and its value is the list of environment objects
"""
self.width = width
self.height = height
self.x_limit = width / 2
self.y_limit = height / 2
self.grid_size = grid_size
self.grid = dict()
self.grid_reverse = dict()
self.grid_objects = dict()
# self.width_fix = int(self.x_limit % self.grid_size)
# self.height_fix = int(self.y_limit % self.grid_size)
# If the width or height is not comptiable with grid size
if self.x_limit % self.grid_size != 0 \
or self.y_limit % self.grid_size != 0:
print("Grid size invalid")
exit(1)
# Create list for x cordinate & y cordinate to create grid
list_xcords = np.arange(
-self.width / 2, self.width / 2, self.grid_size).tolist()
list_ycords = np.arange(
-self.height / 2, self.height / 2, self.grid_size).tolist()
indx = 1
for ycord in list_ycords:
for xcord in list_xcords:
x_1 = xcord
y_1 = ycord
x_2 = xcord + self.grid_size
y_2 = ycord + self.grid_size
self.grid[(x_1, y_1), (x_2, y_2)] = indx
self.grid_reverse[indx] = (x_1, y_1), (x_2, y_2)
self.grid_objects[indx] = []
indx += 1
self.grid_len = indx - 1
def modify_points(self, point):
"""Modify poitns if the location line in the grid line."""
x, y = point[0], point[1]
if point[0] % self.grid_size == 0:
x = point[0] + 1
if point[1] % self.grid_size == 0:
y = point[1] + 1
if point[0] >= self.x_limit:
x = point[0] - self.grid_size + 1
if point[1] >= self.y_limit:
y = point[1] - self.grid_size + 1
return (x, y)
def find_lowerbound(self, point):
"""Find the lower bound from the point."""
point = self.find_upperbound(point)
return (point[0] - self.grid_size, point[1] - self.grid_size)
def find_upperbound(self, point):
"""Find the upper bound from the point."""
point = self.modify_points(point)
return (point[0] + self.grid_size - 1 * (
point[0] % self.grid_size), point[1] + self.grid_size - 1 * (
point[1] % self.grid_size))
def find_grid(self, point):
"""Find the grid based on the point passed."""
grid_key = (self.find_lowerbound(point), self.find_upperbound(point))
try:
return grid_key, self.grid[grid_key]
except KeyError:
print('KeyError', 'No grid key for ', grid_key)
exit()
def get_horizontal_neighbours(self, center_grid, scale, width_scale):
"""Get the neighboring horizontal grids."""
valid_horizontal_start = (math.floor(
(center_grid - 1) / width_scale) * width_scale) + 1
valid_horizontal_end = math.ceil(
center_grid / width_scale) * width_scale
if(center_grid - scale) < valid_horizontal_start:
horizontal_start = valid_horizontal_start
else:
horizontal_start = center_grid - scale
if(center_grid + scale + 1) > valid_horizontal_end:
horizontal_end = valid_horizontal_end + 1
else:
horizontal_end = center_grid + scale + 1
horizontal_grid = list(range(horizontal_start, horizontal_end, 1))
return horizontal_grid
# Find the adjacent grid based on radius
def get_neighborhood(self, point, radius):
"""Get the neighboring grids."""
all_grid = []
center_grid_key, center_grid = self.find_grid(point)
if self.grid_size >= radius:
return [center_grid]
else:
scale = int(radius / self.grid_size)
width_scale = int(self.width / self.grid_size)
horizontal_grid = self.get_horizontal_neighbours(
center_grid, scale, width_scale)
vertical_grid = list(range(
center_grid - scale * width_scale, center_grid +
1 + scale * width_scale, width_scale))
h_v_grid = []
for grid in vertical_grid:
h_v_grid += self.get_horizontal_neighbours(
grid, scale, width_scale)
all_grid = h_v_grid + horizontal_grid
all_grid = [grid for grid in all_grid if grid > 0 and
grid <= self.grid_len]
return list(set(all_grid))
def add_object_to_grid(self, point, objects):
"""Add object to a given grid."""
grid_values = self.get_neighborhood(point, objects.radius)
# print('add object to grid',grid_values, objects)
for grid in grid_values:
# gridobjects = self.get_objects(None, grid)
# for gobject in gridobjects:
# if not re.match('.*Agent.*' , type(gobject).__name__):
# if gobject.deathable and re.match('.*Agent.*' , type(objects).__name__):
# objects.dead = True
# print(grid, objects)
self.grid_objects[grid].append(objects)
# Remove object to the given grid
def remove_object_from_grid(self, point, objects):
"""Remove object from the given grid."""
grid_values = self.get_neighborhood(point, objects.radius)
for grid in grid_values:
self.grid_objects[grid].remove(objects)
def move_object(self, point, objects, newpoint):
"""Move object from the give grid to new grid."""
grid_key, grid_value = self.find_grid(point)
new_grid_key, new_grid_value = self.find_grid(newpoint)
# print('move object', point, newpoint, grid_value, new_grid_value)
if grid_value != new_grid_value:
if re.match('.*Agent.*' , type(objects).__name__) and objects.dead:
return False
elif re.match('.*Agent.*' , type(objects).__name__) and not objects.dead:
# print(point, newpoint, grid_value, new_grid_value)
if self.check_grid_deathable_constraints(new_grid_value):
objects.dead = True
objects.moveable = False
self.remove_object_from_grid(point, objects)
self.add_object_to_grid(newpoint, objects)
return True
else:
if self.check_grid_objects_constraints(new_grid_value):
self.remove_object_from_grid(point, objects)
self.add_object_to_grid(newpoint, objects)
return True
else:
return False
else:
if self.check_grid_objects_constraints(new_grid_value):
self.remove_object_from_grid(point, objects)
self.add_object_to_grid(newpoint, objects)
return True
else:
return False
else:
return True
# Check limits for the environment boundary
def check_limits(self, i, d):
"""Check the location is valid."""
x, y = i
if x > (self.width / 2):
x = x - (x - self.x_limit) - 2
d = np.pi + d
elif x < (self.width / 2) * -1:
x = x - (x + self.x_limit) + 2
d = np.pi + d
if y > (self.height / 2):
y = y - (y - self.y_limit) - 2
d = np.pi + d
elif y < (self.height / 2) * -1:
y = y - (y + self.y_limit) + 2
d = np.pi + d
return ((int(x), int(y)), d % (2*np.pi))
def check_grid_objects_constraints(self, new_grid_value):
"""Check the constraints on the next location."""
# grid_key, grid_value = self.find_grid(source_obj.location)
# new_grid_key, new_grid_value = self.find_grid(next_loc)
passable = True
objects_in_next_grid = self.get_objects(None, new_grid_value)
for obj in objects_in_next_grid:
if not obj.passable:
passable = False
break
return passable
def check_grid_deathable_constraints(self, new_grid_value):
"""Check the constraints on the next location."""
# grid_key, grid_value = self.find_grid(source_obj.location)
# new_grid_key, new_grid_value = self.find_grid(next_loc)
dead = False
objects_in_next_grid = self.get_objects(None, new_grid_value)
# print('dead const', objects_in_next_grid)
for obj in objects_in_next_grid:
try:
if obj.deathable:
dead = True
break
except:
pass
return dead
# Using fancy search to find the object in the particular grid
def get_objects(self, object_name, grid_value):
"""Use fancy search to find objects in a grid."""
if object_name is not None:
return list(filter(
lambda x: type(x).__name__ == object_name,
self.grid_objects[grid_value]))
else:
return list(filter(
lambda x: type(x).__name__ != 'list',
self.grid_objects[grid_value]))
def get_objects_from_grid(self, object_name, point):
"""Get objects from grid given a location."""
grid_key, grid_value = self.find_grid(point)
return self.get_objects(object_name, grid_value)
def get_objects_from_list_of_grid(self, object_name, grid_list):
"""Get list of objects from grid list."""
object_list = []
for grid in grid_list:
object_list += self.get_objects(object_name, grid)
return object_list
| [
"math.ceil",
"numpy.arange",
"math.floor"
] | [((3956, 3992), 'math.ceil', 'math.ceil', (['(center_grid / width_scale)'], {}), '(center_grid / width_scale)\n', (3965, 3992), False, 'import math\n'), ((1730, 1788), 'numpy.arange', 'np.arange', (['(-self.width / 2)', '(self.width / 2)', 'self.grid_size'], {}), '(-self.width / 2, self.width / 2, self.grid_size)\n', (1739, 1788), True, 'import numpy as np\n'), ((1833, 1893), 'numpy.arange', 'np.arange', (['(-self.height / 2)', '(self.height / 2)', 'self.grid_size'], {}), '(-self.height / 2, self.height / 2, self.grid_size)\n', (1842, 1893), True, 'import numpy as np\n'), ((3849, 3892), 'math.floor', 'math.floor', (['((center_grid - 1) / width_scale)'], {}), '((center_grid - 1) / width_scale)\n', (3859, 3892), False, 'import math\n')] |
"""
Correlation module calculating connectivity values from data
"""
import logging
import numpy as np
import os
from itertools import islice
from pylsl import local_clock
from scipy.signal import hilbert
from scipy.signal import lfilter
from scipy.stats import zscore
from astropy.stats import circmean
from itertools import product
from osc4py3.as_allthreads import *
from osc4py3 import oscbuildparse
from osc4py3 import oscchannel as osch
import warnings
warnings.filterwarnings("ignore")
current = os.path.dirname(__file__)
LAST_CALCULATION = local_clock()
ORDER = 5
class Correlation:
def __init__(self, sample_rate, channel_count, mode, chn_type, corr_params, OSC_params, compute_pow, norm_params,
window_length, COEFFICIENTS, HANN, CONNECTIONS, OUTLET, OUTLET_POWER):
"""
Class computing connectivity values
:param sample_rate: sampling rate
:param channel_count: channel count
:param mode: connectivity mode. See notes for options.
:param chn_type: compute all electrode pairs if 'all-to-all';
alternatively, compute only corresponding electrode pairs if 'one-to-one'
:param corr_params: a list of three lists: frequency parameters, channel parameters, weight parameters
:param OSC_params: OSC parameters for OSC transmission
:param compute_pow: boolean variable determining whether to compute and transmit power values
:param norm_params: a list of two numbers. min and max values for MinMax normalization
:param COEFFICIENTS: band-pass filtering coefficients
:param HANN: Hanning window coefficients
:param CONNECTIONS: number of connections
:param OUTLET: StreamOutlet object for connectivity value output
:param OUTLET_POWER: StreamOutlet object for power value output
Note:
**supported connectivity measures**
- 'envelope correlation': envelope correlation
- 'power correlation': power correlation
- 'plv': phase locking value
- 'ccorr': circular correlation coefficient
- 'coherence': coherence
- 'imaginary coherence': imaginary coherence
"""
self.logger = logging.getLogger(__name__)
self.sample_rate = sample_rate
self.window_length = window_length # number of samples in the analysis window
self.channel_count = channel_count
self.freqParams, self.chnParams, self.weightParams = corr_params
self.OSC_params = OSC_params
self.compute_pow = compute_pow
self.norm_min, self.norm_max = norm_params
self.mode = mode
self.chn_type = chn_type
self.timestamp = None
self.SAMPLE_RATE = self.sample_rate
self.CHANNEL_COUNT = self.channel_count
# read setup tools
self.COEFFICIENTS = COEFFICIENTS
self.HANN = HANN
self.CONNECTIONS = CONNECTIONS
self.OUTLET = OUTLET
if self.compute_pow:
self.OUTLET_POWER = OUTLET_POWER
if OSC_params[0] is not None:
self._setup_OSC()
def run(self, buffers):
"""
running the analysis
:return: connectivity values
"""
global LAST_CALCULATION
trailing_timestamp = self._find_trailing_timestamp(buffers)
if trailing_timestamp != LAST_CALCULATION:
LAST_CALCULATION = trailing_timestamp
# select data for analysis based on the last timestamp
analysis_window = self._select_analysis_window(trailing_timestamp, buffers)
# apply Hanning window
# analysis_window = self._apply_window_weights(analysis_window)
# band-pass filter and compute analytic signal
analytic_matrix = self._calculate_all(analysis_window)
# compute connectivity values
rvalues = self._calculate_rvalues(analytic_matrix, self.mode)
if self.compute_pow:
power_values = self._calculate_power(analytic_matrix)
self.OUTLET_POWER.push_sample(power_values, timestamp=trailing_timestamp)
# sending LSL packets
if self.OUTLET:
self.logger.warning("Sending {} R values with timestamp {}".format(len(rvalues), trailing_timestamp))
self.OUTLET.push_sample(rvalues, timestamp=trailing_timestamp)
# sending OSC packets
if self.OSC_params[0] is not None: # if sending OSC
sample_size = self.CONNECTIONS * len(self.freqParams)
msg = oscbuildparse.OSCMessage("/Rvalues/me", ","+'f'*sample_size, rvalues)
osc_send(msg, 'Rvalues')
osc_process()
return rvalues
else:
self.logger.debug("Still waiting for new data to arrive, skipping analysis")
return
def _clamp(self, n):
"""
helper function to clamp a float variable between 0 and 1
"""
return max(min(1, n), 0)
def _apply_window_weights(self, analysis_window):
"""
applying hanning window to data
:param analysis_window: dictionary with EEG data streams
:return: dictionary of the same shape after applying hanning window
"""
for uid in analysis_window.keys():
analysis_window[uid] = np.multiply(analysis_window[uid], self.HANN[:, None])
self.logger.debug("Applying window weights with %s samples and %s channels." % analysis_window[uid].shape)
return analysis_window
def _setup_OSC(self):
"""
setting up OSC outlet
"""
# reading params
IP = self.OSC_params[0]
port = int(self.OSC_params[1])
# Start the system.
osc_startup()
# Make client channels to send packets.
try:
osc_udp_client(IP, int(port), "Rvalues")
except:
osch.terminate_all_channels()
osc_udp_client(IP, int(port), "Rvalues")
# first message is empty (removed this bc it's causing OSC msg to be all zeros)
# msg = oscbuildparse.OSCMessage("/Rvalues/me", ","+'f'*sample_size, [0]*sample_size)
# osc_send(msg, 'Rvalues')
def _calculate_power(self, analytic_matrix):
"""
compute power values from analytic signals
:param analytic_matrix: shape is (n_freq_bands, n_subjects, n_channel_count, n_sample_size). filtered analytic signal
:return: a vector that can be reshaped into (n_freq_bands, n_subjects, n_channel_count). Power values
"""
return np.nanmean(np.abs(analytic_matrix)**2, axis=3).reshape(-1)
def _find_trailing_timestamp(self, buffers):
trailing_timestamp = local_clock()
for buffer in buffers.values():#self.buffers.values():
timestamp, _ = buffer[-1]
if trailing_timestamp > timestamp:
trailing_timestamp = timestamp
return trailing_timestamp
def _select_analysis_window(self, trailing_timestamp, buffers):
"""
construct the analysis window based on the timestamp from last window
:param trailing_timestamp: timestamp from the last window
:return: a dictionary containing data. each value is a matrix of size (n_sample_size, n_channel_count)
"""
analysis_window = {}
for uid, buffer in buffers.items():#self.buffers.items():
# compute the sample start
latest_sample_at, _ = buffer[-1]
sample_offset = int(round((latest_sample_at - trailing_timestamp) * self.sample_rate))
sample_start = len(buffer) - self.window_length - sample_offset
if sample_start < 0:
self.logger.info("Not enough data to process in buffer {}, using dummy data".format(uid))
analysis_window[uid] = np.zeros((self.window_length, self.channel_count))
else:
# take data from buffer
timestamped_window = list(islice(buffer, sample_start, sample_start + self.window_length))
analysis_window[uid] = np.array([sample[1] for sample in timestamped_window])
return analysis_window
def _calculate_all(self, analysis_window):
"""
compute analytic signal from the analysis window
:param analysis_window: a dictionary containing data
:return: a matrix of shape (n_freq_bands, n_subjects, n_channel_count, n_sample_size)
"""
all_analytic = zscore(np.swapaxes(np.array(list(analysis_window.values())),1,2), axis=-1) # shape = (n_sub, n_chn, n_times)
all_analytic = np.array([hilbert(lfilter(coeff[0], coeff[1], all_analytic)) for c, coeff in enumerate(self.COEFFICIENTS)])
return all_analytic
# helper function
def _multiply_conjugate(self, real: np.ndarray, imag: np.ndarray, transpose_axes: tuple) -> np.ndarray:
"""
Helper function to compute the product of a complex array and its conjugate.
It is designed specifically to collapse the last dimension of a four-dimensional array.
Arguments:
real: the real part of the array.
imag: the imaginary part of the array.
transpose_axes: axes to transpose for matrix multiplication.
Returns:
product: the product of the array and its complex conjugate.
"""
formula = 'ilm,imk->ilk'
product = np.einsum(formula, real, real.transpose(transpose_axes)) + \
np.einsum(formula, imag, imag.transpose(transpose_axes)) - 1j * \
(np.einsum(formula, real, imag.transpose(transpose_axes)) - \
np.einsum(formula, imag, real.transpose(transpose_axes)))
return product
def compute_sync(self, complex_signal: np.ndarray, mode: str) -> np.ndarray:
"""
helper function for computing connectivity value.
The result is a connectivity matrix of all possible electrode pairs between the dyad, including inter- and intra-brain connectivities.
:param complex_signal: complex signal of shape (n_freq, 2, n_channel_count, n_sample_size). data for one dyad.
:param mode: connectivity mode. see notes for details.
:return: connectivity matrix of shape (n_freq, 2*n_channel_count, 2*channel_count)
"""
n_ch, n_freq, n_samp = complex_signal.shape[2], complex_signal.shape[0], \
complex_signal.shape[3]
complex_signal = complex_signal.reshape(n_freq, 2 * n_ch, n_samp)
transpose_axes = (0, 2, 1)
if mode.lower() == 'plv':
phase = complex_signal / np.abs(complex_signal)
c = np.real(phase)
s = np.imag(phase)
dphi = self._multiply_conjugate(c, s, transpose_axes=transpose_axes)
con = abs(dphi) / n_samp
elif mode.lower() == 'envelope correlation':
env = np.abs(complex_signal)
mu_env = np.mean(env, axis=2).reshape(n_freq, 2 * n_ch, 1)
env = env - mu_env
con = np.einsum('ilm,imk->ilk', env, env.transpose(transpose_axes)) / \
np.sqrt(np.einsum('il,ik->ilk', np.sum(env ** 2, axis=2), np.sum(env ** 2, axis=2)))
elif mode.lower() == 'power correlation':
env = np.abs(complex_signal) ** 2
mu_env = np.mean(env, axis=2).reshape(n_freq, 2 * n_ch, 1)
env = env - mu_env
con = np.einsum('ilm,imk->ilk', env, env.transpose(transpose_axes)) / \
np.sqrt(np.einsum('il,ik->ilk', np.sum(env ** 2, axis=2), np.sum(env ** 2, axis=2)))
elif mode.lower() == 'coherence':
c = np.real(complex_signal)
s = np.imag(complex_signal)
amp = np.abs(complex_signal) ** 2
dphi = self._multiply_conjugate(c, s, transpose_axes=transpose_axes)
con = np.abs(dphi) / np.sqrt(np.einsum('il,ik->ilk', np.nansum(amp, axis=2),
np.nansum(amp, axis=2)))
# self.logger.warning('con '+str(con[2,18:,0:18]))
elif mode.lower() == 'imaginary coherence':
c = np.real(complex_signal)
s = np.imag(complex_signal)
amp = np.abs(complex_signal) ** 2
dphi = self._multiply_conjugate(c, s, transpose_axes=transpose_axes)
con = np.abs(np.imag(dphi)) / np.sqrt(np.einsum('il,ik->ilk', np.nansum(amp, axis=2),
np.nansum(amp, axis=2)))
elif mode.lower() == 'ccorr':
angle = np.angle(complex_signal)
mu_angle = circmean(angle, axis=2).reshape(n_freq, 2 * n_ch, 1)
angle = np.sin(angle - mu_angle)
formula = 'ilm,imk->ilk'
con = np.einsum(formula, angle, angle.transpose(transpose_axes)) / \
np.sqrt(np.einsum('il,ik->ilk', np.sum(angle ** 2, axis=2), np.sum(angle ** 2, axis=2)))
else:
ValueError('Metric type not supported.')
return con
def _calculate_rvalues(self, analytic_matrix, mode):
"""
computes connectivity value from the analytic signal
:param analytic_matrix: analytic signal of shape (n_freq_bands, n_subjects, n_channel_count, n_sample_size)
:param mode: connectivity mode. see notes for details.
:return: a list of length = n_connections * n_freq. connectivity values
"""
# compute all possible pair combinations
pair_index = [a for a in
list(product(np.arange(0, analytic_matrix.shape[1]), np.arange(0, analytic_matrix.shape[1])))
if a[0] < a[1]]
rvals = []
# iterate for each combination
for pair in pair_index:
con = np.abs(self.compute_sync(analytic_matrix[:, pair, :, :], mode))
# the connectivity matrix for the current pair. shape is (n_freq, n_ch, n_ch)
con = con[:, 0:self.channel_count, self.channel_count:]
if 'all-to-all' in self.chn_type: # all to all correlation
result = [np.nanmean(con[i, self.chnParams[freq]][:, self.chnParams[freq]], axis=(0, 1))
for i, freq in enumerate(self.freqParams.keys())]
else: # channel to channel correlation
result = [np.nanmean(np.diagonal(con[i], axis1=0, axis2=1)[self.chnParams[freq]])
for i, freq in enumerate(self.freqParams.keys())]
# adjust result according to weight parameters
weights = list(self.weightParams.values())
result = [r*weight for r, weight in zip(result, weights)]
result = [self._clamp((r-minn)/(maxx-minn)) for r, minn, maxx in zip(result, self.norm_min, self.norm_max)]
rvals.extend(result)
return rvals # a list of length n_connections * n_freq
| [
"logging.getLogger",
"osc4py3.oscbuildparse.OSCMessage",
"osc4py3.oscchannel.terminate_all_channels",
"numpy.array",
"numpy.nanmean",
"numpy.sin",
"numpy.imag",
"numpy.arange",
"numpy.mean",
"numpy.multiply",
"numpy.real",
"numpy.abs",
"numpy.diagonal",
"os.path.dirname",
"numpy.nansum",... | [((459, 492), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (482, 492), False, 'import warnings\n'), ((503, 528), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (518, 528), False, 'import os\n'), ((549, 562), 'pylsl.local_clock', 'local_clock', ([], {}), '()\n', (560, 562), False, 'from pylsl import local_clock\n'), ((2235, 2262), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2252, 2262), False, 'import logging\n'), ((6748, 6761), 'pylsl.local_clock', 'local_clock', ([], {}), '()\n', (6759, 6761), False, 'from pylsl import local_clock\n'), ((5365, 5418), 'numpy.multiply', 'np.multiply', (['analysis_window[uid]', 'self.HANN[:, None]'], {}), '(analysis_window[uid], self.HANN[:, None])\n', (5376, 5418), True, 'import numpy as np\n'), ((10730, 10744), 'numpy.real', 'np.real', (['phase'], {}), '(phase)\n', (10737, 10744), True, 'import numpy as np\n'), ((10761, 10775), 'numpy.imag', 'np.imag', (['phase'], {}), '(phase)\n', (10768, 10775), True, 'import numpy as np\n'), ((4588, 4661), 'osc4py3.oscbuildparse.OSCMessage', 'oscbuildparse.OSCMessage', (['"""/Rvalues/me"""', "(',' + 'f' * sample_size)", 'rvalues'], {}), "('/Rvalues/me', ',' + 'f' * sample_size, rvalues)\n", (4612, 4661), False, 'from osc4py3 import oscbuildparse\n'), ((5934, 5963), 'osc4py3.oscchannel.terminate_all_channels', 'osch.terminate_all_channels', ([], {}), '()\n', (5961, 5963), True, 'from osc4py3 import oscchannel as osch\n'), ((7883, 7933), 'numpy.zeros', 'np.zeros', (['(self.window_length, self.channel_count)'], {}), '((self.window_length, self.channel_count))\n', (7891, 7933), True, 'import numpy as np\n'), ((8138, 8192), 'numpy.array', 'np.array', (['[sample[1] for sample in timestamped_window]'], {}), '([sample[1] for sample in timestamped_window])\n', (8146, 8192), True, 'import numpy as np\n'), ((10691, 10713), 'numpy.abs', 'np.abs', (['complex_signal'], {}), '(complex_signal)\n', (10697, 10713), True, 'import numpy as np\n'), ((10966, 10988), 'numpy.abs', 'np.abs', (['complex_signal'], {}), '(complex_signal)\n', (10972, 10988), True, 'import numpy as np\n'), ((8034, 8097), 'itertools.islice', 'islice', (['buffer', 'sample_start', '(sample_start + self.window_length)'], {}), '(buffer, sample_start, sample_start + self.window_length)\n', (8040, 8097), False, 'from itertools import islice\n'), ((8682, 8723), 'scipy.signal.lfilter', 'lfilter', (['coeff[0]', 'coeff[1]', 'all_analytic'], {}), '(coeff[0], coeff[1], all_analytic)\n', (8689, 8723), False, 'from scipy.signal import lfilter\n'), ((14178, 14256), 'numpy.nanmean', 'np.nanmean', (['con[i, self.chnParams[freq]][:, self.chnParams[freq]]'], {'axis': '(0, 1)'}), '(con[i, self.chnParams[freq]][:, self.chnParams[freq]], axis=(0, 1))\n', (14188, 14256), True, 'import numpy as np\n'), ((6621, 6644), 'numpy.abs', 'np.abs', (['analytic_matrix'], {}), '(analytic_matrix)\n', (6627, 6644), True, 'import numpy as np\n'), ((11010, 11030), 'numpy.mean', 'np.mean', (['env'], {'axis': '(2)'}), '(env, axis=2)\n', (11017, 11030), True, 'import numpy as np\n'), ((11347, 11369), 'numpy.abs', 'np.abs', (['complex_signal'], {}), '(complex_signal)\n', (11353, 11369), True, 'import numpy as np\n'), ((11723, 11746), 'numpy.real', 'np.real', (['complex_signal'], {}), '(complex_signal)\n', (11730, 11746), True, 'import numpy as np\n'), ((11763, 11786), 'numpy.imag', 'np.imag', (['complex_signal'], {}), '(complex_signal)\n', (11770, 11786), True, 'import numpy as np\n'), ((13631, 13669), 'numpy.arange', 'np.arange', (['(0)', 'analytic_matrix.shape[1]'], {}), '(0, analytic_matrix.shape[1])\n', (13640, 13669), True, 'import numpy as np\n'), ((13671, 13709), 'numpy.arange', 'np.arange', (['(0)', 'analytic_matrix.shape[1]'], {}), '(0, analytic_matrix.shape[1])\n', (13680, 13709), True, 'import numpy as np\n'), ((11225, 11249), 'numpy.sum', 'np.sum', (['(env ** 2)'], {'axis': '(2)'}), '(env ** 2, axis=2)\n', (11231, 11249), True, 'import numpy as np\n'), ((11251, 11275), 'numpy.sum', 'np.sum', (['(env ** 2)'], {'axis': '(2)'}), '(env ** 2, axis=2)\n', (11257, 11275), True, 'import numpy as np\n'), ((11396, 11416), 'numpy.mean', 'np.mean', (['env'], {'axis': '(2)'}), '(env, axis=2)\n', (11403, 11416), True, 'import numpy as np\n'), ((11805, 11827), 'numpy.abs', 'np.abs', (['complex_signal'], {}), '(complex_signal)\n', (11811, 11827), True, 'import numpy as np\n'), ((11932, 11944), 'numpy.abs', 'np.abs', (['dphi'], {}), '(dphi)\n', (11938, 11944), True, 'import numpy as np\n'), ((12219, 12242), 'numpy.real', 'np.real', (['complex_signal'], {}), '(complex_signal)\n', (12226, 12242), True, 'import numpy as np\n'), ((12259, 12282), 'numpy.imag', 'np.imag', (['complex_signal'], {}), '(complex_signal)\n', (12266, 12282), True, 'import numpy as np\n'), ((14422, 14459), 'numpy.diagonal', 'np.diagonal', (['con[i]'], {'axis1': '(0)', 'axis2': '(1)'}), '(con[i], axis1=0, axis2=1)\n', (14433, 14459), True, 'import numpy as np\n'), ((11611, 11635), 'numpy.sum', 'np.sum', (['(env ** 2)'], {'axis': '(2)'}), '(env ** 2, axis=2)\n', (11617, 11635), True, 'import numpy as np\n'), ((11637, 11661), 'numpy.sum', 'np.sum', (['(env ** 2)'], {'axis': '(2)'}), '(env ** 2, axis=2)\n', (11643, 11661), True, 'import numpy as np\n'), ((12301, 12323), 'numpy.abs', 'np.abs', (['complex_signal'], {}), '(complex_signal)\n', (12307, 12323), True, 'import numpy as np\n'), ((12652, 12676), 'numpy.angle', 'np.angle', (['complex_signal'], {}), '(complex_signal)\n', (12660, 12676), True, 'import numpy as np\n'), ((12773, 12797), 'numpy.sin', 'np.sin', (['(angle - mu_angle)'], {}), '(angle - mu_angle)\n', (12779, 12797), True, 'import numpy as np\n'), ((11979, 12001), 'numpy.nansum', 'np.nansum', (['amp'], {'axis': '(2)'}), '(amp, axis=2)\n', (11988, 12001), True, 'import numpy as np\n'), ((12063, 12085), 'numpy.nansum', 'np.nansum', (['amp'], {'axis': '(2)'}), '(amp, axis=2)\n', (12072, 12085), True, 'import numpy as np\n'), ((12435, 12448), 'numpy.imag', 'np.imag', (['dphi'], {}), '(dphi)\n', (12442, 12448), True, 'import numpy as np\n'), ((12484, 12506), 'numpy.nansum', 'np.nansum', (['amp'], {'axis': '(2)'}), '(amp, axis=2)\n', (12493, 12506), True, 'import numpy as np\n'), ((12568, 12590), 'numpy.nansum', 'np.nansum', (['amp'], {'axis': '(2)'}), '(amp, axis=2)\n', (12577, 12590), True, 'import numpy as np\n'), ((12700, 12723), 'astropy.stats.circmean', 'circmean', (['angle'], {'axis': '(2)'}), '(angle, axis=2)\n', (12708, 12723), False, 'from astropy.stats import circmean\n'), ((12967, 12993), 'numpy.sum', 'np.sum', (['(angle ** 2)'], {'axis': '(2)'}), '(angle ** 2, axis=2)\n', (12973, 12993), True, 'import numpy as np\n'), ((12995, 13021), 'numpy.sum', 'np.sum', (['(angle ** 2)'], {'axis': '(2)'}), '(angle ** 2, axis=2)\n', (13001, 13021), True, 'import numpy as np\n')] |
# 综合分类数据集
from numpy import where
from sklearn.datasets import make_classification
import matplotlib.pyplot as plt
# 定义数据集
X, y = make_classification(n_samples=1000, n_features=2, n_informative=2, n_redundant=0, n_clusters_per_class=1, random_state=4)
# 为每个类的样本创建散点图
plt.figure()
plt.scatter(X[:,0],X[:,1])
plt.figure()
for class_value in range(2):
# 获取此类的示例的行索引
row_ix = where(y == class_value)
# 创建这些样本的散布
plt.scatter(X[row_ix, 0], X[row_ix, 1])
# 绘制散点图
plt.show() | [
"numpy.where",
"matplotlib.pyplot.figure",
"sklearn.datasets.make_classification",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.show"
] | [((130, 255), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(1000)', 'n_features': '(2)', 'n_informative': '(2)', 'n_redundant': '(0)', 'n_clusters_per_class': '(1)', 'random_state': '(4)'}), '(n_samples=1000, n_features=2, n_informative=2,\n n_redundant=0, n_clusters_per_class=1, random_state=4)\n', (149, 255), False, 'from sklearn.datasets import make_classification\n'), ((267, 279), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (277, 279), True, 'import matplotlib.pyplot as plt\n'), ((280, 309), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {}), '(X[:, 0], X[:, 1])\n', (291, 309), True, 'import matplotlib.pyplot as plt\n'), ((307, 319), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (317, 319), True, 'import matplotlib.pyplot as plt\n'), ((468, 478), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (476, 478), True, 'import matplotlib.pyplot as plt\n'), ((376, 399), 'numpy.where', 'where', (['(y == class_value)'], {}), '(y == class_value)\n', (381, 399), False, 'from numpy import where\n'), ((420, 459), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[row_ix, 0]', 'X[row_ix, 1]'], {}), '(X[row_ix, 0], X[row_ix, 1])\n', (431, 459), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
def remove_outliers(X:np.ndarray,Y:np.ndarray):
assert len(X.shape) == len(Y.shape) == 1
assert X.size == Y.size
EPSILON_X = 0.02
EPSILON_Y = 0.04
groups : dict= {0:{0}}
block = [(0,X[0],Y[0],0)]
for i,(x,y) in enumerate(zip(X[1:],Y[1:]),1):
while block and abs(block[0][1]-x)>EPSILON_X:
block.pop(0)
seen_groups =set()
for _,_,y_,g_ in block:
if abs(y_-y)>EPSILON_Y:
continue
seen_groups.add(g_)
if len(seen_groups)>1: # combines groups
new = set()
for group in seen_groups:
new |= groups[group]
del groups[group]
new.add(i)
groups[min(seen_groups)] = new
block = [(ind,i,j,min(seen_groups) if ind in new else k) for ind,i,j,k in block]
tfgroup = min(seen_groups)
elif len(seen_groups) == 1: # adds a to an existing group
tfgroup = min(seen_groups)
groups[tfgroup].add(i)
else: # creates a new group
tfgroup = max(groups.keys())+1
groups[tfgroup] = set([i])
block.append((i,x,x,tfgroup))
inliers = []
top = [[0,0],[0,0],[0,0]]
for group,ind in groups.items():
if len(ind) > top[0][1]:
top = [[0,0],top[0],top[1]]
top[0] = [group,len(ind)]
continue
elif len(ind) > top[1][1]:
top = [top[0],[0,0],top[1]]
top[1] = [group,len(ind)]
elif len(ind) > top[2][1]:
top[2] = [group,len(ind)]
if top[2][1] > 0.2*top[1][1]:
inliers = [ind for i,_ in top for ind in groups[i]]
else:
allowed = [top[0][0],top[1][0]]
inliers = [ind for i in allowed for ind in groups[i]]
inliers = np.sort(np.array(inliers))
return inliers | [
"numpy.array"
] | [((1865, 1882), 'numpy.array', 'np.array', (['inliers'], {}), '(inliers)\n', (1873, 1882), True, 'import numpy as np\n')] |
import typing
import sys
import numpy as np
import numba as nb
@nb.njit((nb.b1[:], ), cache=True)
def solve(c: np.ndarray) -> typing.NoReturn:
n = len(c)
a, b = np.sum(c == 1), 0
res = max(a, b)
for i in range(n):
if c[i] == 1:
a -= 1
else:
b += 1
res = min(res, max(a, b))
print(res)
def main() -> typing.NoReturn:
n = int(sys.stdin.buffer.readline().rstrip())
c = np.frombuffer(
sys.stdin.buffer.readline().rstrip(),
dtype='S1',
) == b'R'
solve(c)
main() | [
"sys.stdin.buffer.readline",
"numpy.sum",
"numba.njit"
] | [((69, 101), 'numba.njit', 'nb.njit', (['(nb.b1[:],)'], {'cache': '(True)'}), '((nb.b1[:],), cache=True)\n', (76, 101), True, 'import numba as nb\n'), ((171, 185), 'numpy.sum', 'np.sum', (['(c == 1)'], {}), '(c == 1)\n', (177, 185), True, 'import numpy as np\n'), ((368, 395), 'sys.stdin.buffer.readline', 'sys.stdin.buffer.readline', ([], {}), '()\n', (393, 395), False, 'import sys\n'), ((431, 458), 'sys.stdin.buffer.readline', 'sys.stdin.buffer.readline', ([], {}), '()\n', (456, 458), False, 'import sys\n')] |
import numpy as np
class Uniform(object):
"""
Make only a fraction of weights nonzero.
"""
def __init__(self, scale=.2):
self.scale = scale
def __str__(self):
return f"Uniform distribution on [{-self.scale}, {self.scale}]"
def initialize(self, n_rows, n_cols):
return np.random.uniform(
low=-self.scale,
high=self.scale,
size=(n_rows, n_cols),
)
| [
"numpy.random.uniform"
] | [((320, 394), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.scale)', 'high': 'self.scale', 'size': '(n_rows, n_cols)'}), '(low=-self.scale, high=self.scale, size=(n_rows, n_cols))\n', (337, 394), True, 'import numpy as np\n')] |
import numpy as np
from rvs import Dolly
from math_utils import *
from plot_utils import *
def Pr_Xnk_leq_x(X, n, k, x):
# log(INFO, "x= {}".format(x) )
cdf = 0
for i in range(k, n+1):
cdf += binom_(n, i) * X.cdf(x)**i * X.tail(x)**(n-i)
return cdf
def EXnk(X, n, k, m=1):
if k == 0:
return 0
if m == 1:
# EXnk, abserr = scipy.integrate.quad(lambda x: 1 - Pr_Xnk_leq_x(X, n, k, x), 0.0001, np.Inf) # 2*X.u_l
EXnk = float(mpmath.quad(lambda x: 1 - Pr_Xnk_leq_x(X, n, k, x), [0.0001, 10*X.u_l] ) )
else:
# EXnk, abserr = scipy.integrate.quad(lambda x: m*x**(m-1) * (1 - Pr_Xnk_leq_x(X, n, k, x)), 0.0001, np.Inf)
EXnk = float(mpmath.quad(lambda x: m*x**(m-1) * (1 - Pr_Xnk_leq_x(X, n, k, x) ), [0.0001, 10*X.u_l] ) )
return EXnk
def ECnk(X, n, k):
if k == 0:
return 0
EC = 0
for i in range(1, k):
EC += EXnk(X, n, i)
EC += (n-k+1)*EXnk(X, n, k)
return EC
def plot_cdf_X(X):
x_l, Pr_X_leq_x_l = [], []
for x in np.linspace(0, 30, 100):
x_l.append(x)
Pr_X_leq_x_l.append(X.cdf(x) )
plot.plot(x_l, Pr_X_leq_x_l, c='blue', marker='x', ls=':', mew=0.1, ms=8)
fontsize = 20
plot.legend(loc='best', framealpha=0.5, fontsize=14, numpoints=1)
plot.xlabel(r'$x$', fontsize=fontsize)
plot.ylabel(r'$\Pr\{X \leq x\}$', fontsize=fontsize)
plot.title(r'$X \sim {}$'.format(X.to_latex() ), fontsize=fontsize)
fig = plot.gcf()
fig.set_size_inches(4, 4)
plot.savefig('plot_cdf_X.png', bbox_inches='tight')
fig.clear()
log(INFO, "done.")
def redsmall_ES_wSl(k, r, D, Sl, d=None, red='coding'):
if d is None:
return D.mean()*sum([EXnk(Sl, i, i)*k.pdf(i) for i in k.v_l] )
ED_given_D_leq_doverk = lambda k: D.mean_given_leq_x(d/k)
return redsmall_ES_wSl(k, r, D, Sl, d=None, red=red) \
+ sum([(EXnk(Sl, i*r, i) - EXnk(Sl, i, i) )*ED_given_D_leq_doverk(i)*D.cdf(d/i)*k.pdf(i) for i in k.v_l] )
# + sum([(ES_k_n_pareto(i, i*r, a, alpha) - ES_k_n_pareto(i, i, a, alpha) )*ED_given_D_leq_doverk(i)*D.cdf(d/i)*k.pdf(i) for i in k.v_l] )
def redsmall_ES2_wSl(k, r, D, Sl, d=None, red='coding'):
if d is None:
return D.moment(2)*sum([EXnk(Sl, i, i, m=2)*k.pdf(i) for i in k.v_l] )
ED2_given_D_leq_doverk = lambda k: moment(D, 2, given_X_leq_x=True, x=d/k)
return redsmall_ES2_wSl(k, r, D, Sl, d=None, red=red) \
+ sum([(EXnk(Sl, i*r, i, m=2) - EXnk(Sl, i, i, m=2) )*ED2_given_D_leq_doverk(i)*D.cdf(d/i)*k.pdf(i) for i in k.v_l] )
def redsmall_EC_wSl(k, r, D, Sl, d=None, red='coding'):
if d is None:
return k.mean()*D.mean()*Sl.mean()
ED_given_D_leq_doverk = lambda k: D.mean_given_leq_x(d/k)
return redsmall_EC_wSl(k, r, D, Sl, d=None, red=red) \
+ sum([(ECnk(Sl, i*r, i) - i*Sl.mean())*ED_given_D_leq_doverk(i)*D.cdf(d/i)*k.pdf(i) for i in k.v_l] )
def ar_for_ro0(ro0, N, Cap, k, r, D, Sl):
return ro0*N*Cap/k.mean()/D.mean()/Sl.mean()
def redsmall_ET_EW_Prqing_wMGc_wSl(ro0, N, Cap, k, r, D, Sl, d, red='coding'):
'''Using the result for M/M/c to approximate E[T] in M/G/c.
[https://en.wikipedia.org/wiki/M/G/k_queue]
'''
ar = ar_for_ro0(ro0, N, Cap, k, r, D, Sl)
ES = redsmall_ES_wSl(k, r, D, Sl, d, red)
ES2 = redsmall_ES2_wSl(k, r, D, Sl, d, red)
EC = redsmall_EC_wSl(k, r, D, Sl, d, red)
log(INFO, "d= {}".format(d), ES=ES, ES2=ES2, EC=EC)
EW, Prqing = MGc_EW_Prqing(ar, N*Cap*ES/EC, ES, ES2)
if EW < 0:
# log(ERROR, "!!!", EW=EW, Prqing=Prqing, ES=ES, ES2=ES2, EC=EC)
# return None, None, None
# return (ES + abs(EW))**2, None, None
return 10**6, None, None
ET = ES + EW
# log(INFO, "d= {}, ro= {}, ES= {}, EW= {}, ET= {}".format(d, ro, ES, EW, ET) )
# log(INFO, "d= {}, ro= {}".format(d, ro) )
# return round(ET, 2), round(EW, 2), round(Prqing, 2)
return ET, EW, Prqing
def redsmall_approx_ET_EW_Prqing_wMGc_wSl(ro0, N, Cap, k, r, D, Sl, d, red='coding'):
ar = ar_for_ro0(ro0, N, Cap, k, r, D, Sl)
ro = ro0
ES = redsmall_ES_wSl(k, r, D, Sl, d, red)
# ES2 = redsmall_ES2_wSl(k, r, D, Sl, d, red)
# EC = redsmall_EC_wSl(k, r, D, Sl, d, red)
log(INFO, "d= {}".format(d), ar=ar, ES=ES) # , ES2=ES2, EC=EC
EW = 1/ar * ro**2/(1 - ro)
ET = ES + EW
return ET, EW, ro
def plot_ET(N, Cap, k, r, D, Sl, red='coding'):
def plot_(ro0):
log(INFO, "ro0= {}".format(ro0) )
d_l, ET_l = [], []
for d in np.linspace(D.l_l, D.mean()*15, 7):
ET, EW, Prqing = redsmall_ET_EW_Prqing_wMGc_wSl(ro0, N, Cap, k, r, D, Sl, d, red='coding') # redsmall_ES_wSl(k, r, D, Sl, d, red)
log(INFO, "d= {}, ET= {}, EW= {}, Prqing= {}".format(d, ET, EW, Prqing) )
if ET > 150:
break
d_l.append(d)
ET_l.append(ET)
plot.plot(d_l, ET_l, label=r'$\rho_0= {}$'.format(ro0), c=next(darkcolor_c), marker=next(marker_c), ls=':', mew=0.1, ms=8)
plot_(ro0=0.8)
# plot_(ro0=0.9)
fontsize = 20
plot.legend(loc='best', framealpha=0.5, fontsize=14, numpoints=1)
plot.xlabel(r'$d$', fontsize=fontsize)
plot.ylabel(r'$E[T]$', fontsize=fontsize)
plot.title(r'$r= {}$, $k \sim {}$'.format(r, k.to_latex() ) + "\n" \
+ r'$D \sim {}$, $Sl \sim {}$'.format(D.to_latex(), Sl.to_latex() ), fontsize=fontsize)
fig = plot.gcf()
fig.set_size_inches(4, 4)
plot.savefig('plot_ET.png', bbox_inches='tight')
fig.clear()
log(INFO, "done.")
if __name__ == "__main__":
X = Dolly()
print("EX= {}".format(X.mean() ) )
def EXnk_(n, k):
EX_ = EXnk(X, n, k)
print("n= {}, k= {}, EXnk= {}".format(n, k, EX_) )
# EXnk_(n=10, k=10)
# EXnk_(n=10, k=8)
# EXnk_(n=10, k=5)
N, Cap = 20, 10
k = BZipf(1, 10)
r = 2
D = Pareto(10, 3)
Sl = Dolly()
plot_ET(N, Cap, k, r, D, Sl)
| [
"numpy.linspace",
"rvs.Dolly"
] | [((981, 1004), 'numpy.linspace', 'np.linspace', (['(0)', '(30)', '(100)'], {}), '(0, 30, 100)\n', (992, 1004), True, 'import numpy as np\n'), ((5374, 5381), 'rvs.Dolly', 'Dolly', ([], {}), '()\n', (5379, 5381), False, 'from rvs import Dolly\n'), ((5662, 5669), 'rvs.Dolly', 'Dolly', ([], {}), '()\n', (5667, 5669), False, 'from rvs import Dolly\n')] |
#
##############################################
# #
# Ferdinand 0.40, <NAME>, LLNL #
# #
# gnd,endf,fresco,azure,hyrma #
# #
##############################################
# Brune Transformation for ferdinand.py
import numpy
from scipy.linalg import eigh
from numpy.matlib import zeros #,matmul
def BruneTransformation ( gamB, EB, Bc, Shift, debug,J,pi,lab2cm):
# NCH = number of partial waves
# NLEV = number of levels
# Arrays input gamB(NLEV,NCH) gamma-tilde (Brune)
# Bc (NLEV) boundary conditions to fix output gamS (independent of energy L)
# Shift (NLEV,NCH) S_m(E_l)
# EB(NLEV) R-matrix pole energy-tilde
#
# returns gamS(NLEV,NCH) gamma (standard)
# ES(NLEV) R-matrix pole energy (standard)
#
# All energies and widths are on the scale of the projectile lab frame
#
# From <NAME>, Phys. Rev. C66, 044611 (2002), equations (20, 21, 24, 27).
#
NLEV,NCH = gamB.shape
if debug: print("BT: NLEV,NCH =",NLEV,NCH)
if NLEV*NCH==0: return gamB,EB
M = zeros([NLEV,NLEV])
if debug: print('Bc:',Bc)
if debug: print('Shift:\n',Shift)
N = M.copy()
for l in range(NLEV):
M[l,l] = 1
N[l,l] = EB[l]
for c in range(NCH):
N[l,l] += gamB[l,c]**2 * (Shift[l,c] - Bc[c])
for k in range(l+1,NLEV):
for c in range(NCH):
M[k,l] -= gamB[l,c]*gamB[k,c] * (Shift[l,c]-Shift[k,c])/(EB[l]-EB[k])
N[k,l] += gamB[l,c]*gamB[k,c] * ( (EB[l]*Shift[k,c] - EB[k]*Shift[l,c]) /(EB[l]-EB[k]) - Bc[c])
M[l,k] = M[k,l]
N[l,k] = N[k,l]
if debug: print('N matrix:\n',N)
if debug: print('M norm matrix:\n',M)
try:
ES,vec = eigh(N, M, lower=True)
except:
print("\nERROR in eigh for J/pi=",J,'+' if int(pi)>0 else '-')
if debug: print('N matrix:\n',N)
if debug: print('M norm matrix:\n',M)
ES,vec = eigh(M, lower=True)
print("\n Norm eigenvalues:\n",' ',ES)
if debug: print("Norm eigenvectors:\n",vec)
print("\n FAILED in BruneTransform for",J,'+' if int(pi)>0 else '-',"\n")
ES = [9.1111 for i in range(NLEV)]
# Transform gamB to gamS
gamS = zeros([NLEV,NCH])
for c in range(NCH):
for l in range(NLEV):
sum = 0
for k in range(NLEV): sum += vec[k,l] * gamB[k,c]
gamS[l,c] = sum
if debug:
print("EB:\n",EB)
print("ES:\n",ES)
print("gamB:\n",gamB)
print("gamS:\n",gamS)
print('\n')
for l in range(NLEV):
print("Brune transformation in J,pi=%.1f%c from %9.3f to %9.3f (cm: %9.3f to %9.3f)" % (J,'+' if int(pi)>0 else '-',EB[l],ES[l],EB[l]*lab2cm,ES[l]*lab2cm))
return gamS,ES
| [
"scipy.linalg.eigh",
"numpy.matlib.zeros"
] | [((1235, 1254), 'numpy.matlib.zeros', 'zeros', (['[NLEV, NLEV]'], {}), '([NLEV, NLEV])\n', (1240, 1254), False, 'from numpy.matlib import zeros\n'), ((2427, 2445), 'numpy.matlib.zeros', 'zeros', (['[NLEV, NCH]'], {}), '([NLEV, NCH])\n', (2432, 2445), False, 'from numpy.matlib import zeros\n'), ((1917, 1939), 'scipy.linalg.eigh', 'eigh', (['N', 'M'], {'lower': '(True)'}), '(N, M, lower=True)\n', (1921, 1939), False, 'from scipy.linalg import eigh\n'), ((2127, 2146), 'scipy.linalg.eigh', 'eigh', (['M'], {'lower': '(True)'}), '(M, lower=True)\n', (2131, 2146), False, 'from scipy.linalg import eigh\n')] |
import numpy as np
### from https://github.com/rflamary/POT/blob/master/ot/bregman.py ###
def sinkhorn_knopp(a, b, M, reg, numItermax=1000,
stopThr=1e-9, verbose=False, log=False, **kwargs):
r"""
Solve the entropic regularization optimal transport problem and return the OT matrix
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- M is the (dim_a, dim_b) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are source and target weights (histograms, both sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [2]_
Parameters
----------
a : ndarray, shape (dim_a,)
samples weights in the source domain
b : ndarray, shape (dim_b,) or ndarray, shape (dim_b, n_hists)
samples in the target domain, compute sinkhorn with multiple targets
and fixed M if b is a matrix (return OT loss + dual variables in log)
M : ndarray, shape (dim_a, dim_b)
loss matrix
reg : float
Regularization term >0
numItermax : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : ndarray, shape (dim_a, dim_b)
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
Examples
--------
>>> import ot
>>> a=[.5, .5]
>>> b=[.5, .5]
>>> M=[[0., 1.], [1., 0.]]
>>> ot.sinkhorn(a, b, M, 1)
array([[0.36552929, 0.13447071],
[0.13447071, 0.36552929]])
References
----------
.. [2] <NAME>, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
See Also
--------
ot.lp.emd : Unregularized OT
ot.optim.cg : General regularized OT
"""
a = np.asarray(a, dtype=np.float64)
b = np.asarray(b, dtype=np.float64)
M = np.asarray(M, dtype=np.float64)
if len(a) == 0:
a = np.ones((M.shape[0],), dtype=np.float64) / M.shape[0]
if len(b) == 0:
b = np.ones((M.shape[1],), dtype=np.float64) / M.shape[1]
# init data
dim_a = len(a)
dim_b = len(b)
if len(b.shape) > 1:
n_hists = b.shape[1]
else:
n_hists = 0
if log:
log = {'err': []}
# we assume that no distances are null except those of the diagonal of
# distances
if n_hists:
u = np.ones((dim_a, n_hists)) / dim_a
v = np.ones((dim_b, n_hists)) / dim_b
else:
u = np.ones(dim_a) / dim_a
v = np.ones(dim_b) / dim_b
# print(reg)
# Next 3 lines equivalent to K= np.exp(-M/reg), but faster to compute
K = np.empty(M.shape, dtype=M.dtype)
np.divide(M, -reg, out=K)
np.exp(K, out=K)
# print(np.min(K))
tmp2 = np.empty(b.shape, dtype=M.dtype)
Kp = (1 / (a+ 1e-299)).reshape(-1, 1) * K
cpt = 0
err = 1
while (err > stopThr and cpt < numItermax):
uprev = u
vprev = v
KtransposeU = np.dot(K.T, u)
v = np.divide(b, (KtransposeU+ 1e-299))
u = 1. / (np.dot(Kp, v)+ 1e-299)
if (np.any(KtransposeU == 0)
or np.any(np.isnan(u)) or np.any(np.isnan(v))
or np.any(np.isinf(u)) or np.any(np.isinf(v))):
# we have reached the machine precision
# come back to previous solution and quit loop
print('Warning: numerical errors at iteration', cpt)
u = uprev
v = vprev
break
if cpt % 10 == 0:
# we can speed up the process by checking for the error only all
# the 10th iterations
if n_hists:
np.einsum('ik,ij,jk->jk', u, K, v, out=tmp2)
else:
# compute right marginal tmp2= (diag(u)Kdiag(v))^T1
np.einsum('i,ij,j->j', u, K, v, out=tmp2)
err = np.linalg.norm(tmp2 - b) # violation of marginal
if log:
log['err'].append(err)
if verbose:
if cpt % 200 == 0:
print(
'{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19)
print('{:5d}|{:8e}|'.format(cpt, err))
cpt = cpt + 1
if log:
log['u'] = u
log['v'] = v
if n_hists: # return only loss
res = np.einsum('ik,ij,jk,ij->k', u, K, v, M)
if log:
return res, cpt, log
else:
return res, cpt
else: # return OT matrix
if log:
return u.reshape((-1, 1)) * K * v.reshape((1, -1)), cpt, log
else:
return u.reshape((-1, 1)) * K * v.reshape((1, -1)), cpt
| [
"numpy.ones",
"numpy.asarray",
"numpy.any",
"numpy.exp",
"numpy.dot",
"numpy.empty",
"numpy.einsum",
"numpy.isnan",
"numpy.linalg.norm",
"numpy.isinf",
"numpy.divide"
] | [((2313, 2344), 'numpy.asarray', 'np.asarray', (['a'], {'dtype': 'np.float64'}), '(a, dtype=np.float64)\n', (2323, 2344), True, 'import numpy as np\n'), ((2353, 2384), 'numpy.asarray', 'np.asarray', (['b'], {'dtype': 'np.float64'}), '(b, dtype=np.float64)\n', (2363, 2384), True, 'import numpy as np\n'), ((2393, 2424), 'numpy.asarray', 'np.asarray', (['M'], {'dtype': 'np.float64'}), '(M, dtype=np.float64)\n', (2403, 2424), True, 'import numpy as np\n'), ((3158, 3190), 'numpy.empty', 'np.empty', (['M.shape'], {'dtype': 'M.dtype'}), '(M.shape, dtype=M.dtype)\n', (3166, 3190), True, 'import numpy as np\n'), ((3195, 3220), 'numpy.divide', 'np.divide', (['M', '(-reg)'], {'out': 'K'}), '(M, -reg, out=K)\n', (3204, 3220), True, 'import numpy as np\n'), ((3225, 3241), 'numpy.exp', 'np.exp', (['K'], {'out': 'K'}), '(K, out=K)\n', (3231, 3241), True, 'import numpy as np\n'), ((3277, 3309), 'numpy.empty', 'np.empty', (['b.shape'], {'dtype': 'M.dtype'}), '(b.shape, dtype=M.dtype)\n', (3285, 3309), True, 'import numpy as np\n'), ((3488, 3502), 'numpy.dot', 'np.dot', (['K.T', 'u'], {}), '(K.T, u)\n', (3494, 3502), True, 'import numpy as np\n'), ((3515, 3549), 'numpy.divide', 'np.divide', (['b', '(KtransposeU + 1e-299)'], {}), '(b, KtransposeU + 1e-299)\n', (3524, 3549), True, 'import numpy as np\n'), ((4835, 4874), 'numpy.einsum', 'np.einsum', (['"""ik,ij,jk,ij->k"""', 'u', 'K', 'v', 'M'], {}), "('ik,ij,jk,ij->k', u, K, v, M)\n", (4844, 4874), True, 'import numpy as np\n'), ((2458, 2498), 'numpy.ones', 'np.ones', (['(M.shape[0],)'], {'dtype': 'np.float64'}), '((M.shape[0],), dtype=np.float64)\n', (2465, 2498), True, 'import numpy as np\n'), ((2544, 2584), 'numpy.ones', 'np.ones', (['(M.shape[1],)'], {'dtype': 'np.float64'}), '((M.shape[1],), dtype=np.float64)\n', (2551, 2584), True, 'import numpy as np\n'), ((2897, 2922), 'numpy.ones', 'np.ones', (['(dim_a, n_hists)'], {}), '((dim_a, n_hists))\n', (2904, 2922), True, 'import numpy as np\n'), ((2943, 2968), 'numpy.ones', 'np.ones', (['(dim_b, n_hists)'], {}), '((dim_b, n_hists))\n', (2950, 2968), True, 'import numpy as np\n'), ((2999, 3013), 'numpy.ones', 'np.ones', (['dim_a'], {}), '(dim_a)\n', (3006, 3013), True, 'import numpy as np\n'), ((3034, 3048), 'numpy.ones', 'np.ones', (['dim_b'], {}), '(dim_b)\n', (3041, 3048), True, 'import numpy as np\n'), ((3605, 3629), 'numpy.any', 'np.any', (['(KtransposeU == 0)'], {}), '(KtransposeU == 0)\n', (3611, 3629), True, 'import numpy as np\n'), ((4378, 4402), 'numpy.linalg.norm', 'np.linalg.norm', (['(tmp2 - b)'], {}), '(tmp2 - b)\n', (4392, 4402), True, 'import numpy as np\n'), ((3569, 3582), 'numpy.dot', 'np.dot', (['Kp', 'v'], {}), '(Kp, v)\n', (3575, 3582), True, 'import numpy as np\n'), ((3656, 3667), 'numpy.isnan', 'np.isnan', (['u'], {}), '(u)\n', (3664, 3667), True, 'import numpy as np\n'), ((3679, 3690), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (3687, 3690), True, 'import numpy as np\n'), ((3718, 3729), 'numpy.isinf', 'np.isinf', (['u'], {}), '(u)\n', (3726, 3729), True, 'import numpy as np\n'), ((3741, 3752), 'numpy.isinf', 'np.isinf', (['v'], {}), '(v)\n', (3749, 3752), True, 'import numpy as np\n'), ((4171, 4215), 'numpy.einsum', 'np.einsum', (['"""ik,ij,jk->jk"""', 'u', 'K', 'v'], {'out': 'tmp2'}), "('ik,ij,jk->jk', u, K, v, out=tmp2)\n", (4180, 4215), True, 'import numpy as np\n'), ((4318, 4359), 'numpy.einsum', 'np.einsum', (['"""i,ij,j->j"""', 'u', 'K', 'v'], {'out': 'tmp2'}), "('i,ij,j->j', u, K, v, out=tmp2)\n", (4327, 4359), True, 'import numpy as np\n')] |
import numpy as np
def load_glove(gloveFile):
'''
Requires packages: numpy
gloveFile: string
file path to txt file containing words and glove vectors
returns a dictionary of words as keys and their corresponding vectors as values
'''
f = open(gloveFile,'r', encoding='utf8')
word_vector = {}
for line in f:
splitLine = line.split(' ')
word = splitLine[0]
embedding = np.asarray(splitLine[1:], dtype='float32')
word_vector[word] = embedding
return word_vector | [
"numpy.asarray"
] | [((449, 491), 'numpy.asarray', 'np.asarray', (['splitLine[1:]'], {'dtype': '"""float32"""'}), "(splitLine[1:], dtype='float32')\n", (459, 491), True, 'import numpy as np\n')] |
# coding: utf-8
"""
Tests of the U.S. 1976 Standard Atmosphere implementation. All of them are
validated against the `standard`_.
.. _`standard`: http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19770009539_1977009539.pdf
"""
from __future__ import division, absolute_import
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal,
assert_array_equal, assert_array_almost_equal)
import scipy as sp
import scipy.constants
from skaero.atmosphere import coesa, util
def _C2K(val):
"""Convert Celsius to Kelvins."""
return scipy.constants.convert_temperature([val], 'C', 'K')
def test_sea_level():
"""Tests sea level values.
"""
h = 0.0
expected_h = 0.0
expected_T = _C2K(15)
expected_p = sp.constants.atm
expected_rho = 1.2250
h, T, p, rho = coesa.table(h)
assert_equal(h, expected_h)
assert_equal(T, expected_T)
assert_equal(p, expected_p)
assert_almost_equal(rho, expected_rho, decimal=4)
def test_sea_level_0d_array():
"""Tests sea level values using zero dimension array.
"""
h = np.array(0.0)
expected_h = np.array(0.0)
expected_T = np.array(_C2K(15))
expected_p = np.array(sp.constants.atm)
expected_rho = np.array(1.2250)
h, T, p, rho = coesa.table(h)
assert_array_equal(h, expected_h)
assert_array_almost_equal(T, expected_T)
assert_array_almost_equal(p, expected_p)
assert_array_almost_equal(rho, expected_rho)
def test_sea_level_nd_array():
"""Tests sea level values using n dimension array.
"""
h = np.array([0.0, 0.0, 0.0])
expected_h = np.array([0.0, 0.0, 0.0])
expected_T = np.array([288.15] * 3)
expected_p = np.array([101325.0] * 3)
expected_rho = np.array([1.2250] * 3)
h, T, p, rho = coesa.table(h)
assert_array_equal(h, expected_h)
assert_array_almost_equal(T, expected_T)
assert_array_almost_equal(p, expected_p)
assert_array_almost_equal(rho, expected_rho)
def test_geometric_to_geopotential():
z = np.array([50.0, 5550.0, 10450.0])
h = util.geometric_to_geopotential(z)
expected_h = np.array([50.0, 5545.0, 10433.0])
assert_array_almost_equal(h, expected_h, decimal=0)
def test_under_1000m():
"""Tests for altitude values under 1000.0 m
"""
z = np.array([50.0, 550.0, 850.0])
h = util.geometric_to_geopotential(z)
expected_h = np.array([50.0, 550.0, 850.0])
expected_T = np.array([287.825, 284.575, 282.626])
expected_p = np.array([100720.0, 94890.0, 91523.0])
expected_rho = np.array([1.2191, 1.1616, 1.1281])
h, T, p, rho = coesa.table(h)
assert_array_almost_equal(h, expected_h, decimal=0)
assert_array_almost_equal(T, expected_T, decimal=3)
assert_array_almost_equal(p, expected_p, decimal=-1)
assert_array_almost_equal(rho, expected_rho, decimal=4)
def test_under_11km():
"""Tests for altitude values between 1 and 11 km
"""
z = np.array([500.0, 2500.0, 6500.0, 9000.0, 11000.0])
h = util.geometric_to_geopotential(z)
expected_h = np.array([500.0, 2499.0, 6493.0, 8987.0, 10981.0])
expected_T = np.array([284.900, 271.906, 245.943, 229.733, 216.774])
expected_p = np.array([95461.0, 74691.0, 44075.0, 30800.0, 22699.0])
expected_rho = np.array([1.1673, 0.95695, 0.62431, 0.46706, 0.36480])
h, T, p, rho = coesa.table(h)
assert_array_almost_equal(h, expected_h, decimal=0)
assert_array_almost_equal(T, expected_T, decimal=3)
assert_array_almost_equal(p, expected_p, decimal=0)
assert_array_almost_equal(rho, expected_rho, decimal=4)
def test_under_35km():
"""Tests for altitude values between 11 and 35 km
"""
z = np.array([15000.0, 25000.0, 35000.0])
h = util.geometric_to_geopotential(z)
expected_h = np.array([14965.0, 24902., 34808.0])
expected_T = np.array([216.65, 221.552, 236.513])
expected_p = np.array([12111.0, 2549.2, 574.59])
expected_rho = np.array([0.19476, 0.040084, 0.0084634])
h, T, p, rho = coesa.table(h)
assert_array_almost_equal(h, expected_h, decimal=0)
assert_array_almost_equal(T, expected_T, decimal=3)
assert_array_almost_equal(p, expected_p, decimal=0)
assert_array_almost_equal(rho, expected_rho, decimal=5)
def test_under_86km():
"""Tests for altitude values between 35 and 86 km
"""
z = np.array([50000.0, 70000.0, 86000.0])
h = util.geometric_to_geopotential(z)
expected_h = np.array([49610.0, 69238., 84852.0])
expected_T = np.array([270.65, 219.585, 186.87])
expected_p = np.array([79.779, 5.2209, 0.37338])
expected_rho = np.array([0.0010269, 0.000082829, 0.000006958])
h, T, p, rho = coesa.table(h)
assert_array_almost_equal(h, expected_h, decimal=0)
assert_array_almost_equal(T, expected_T, decimal=2)
assert_array_almost_equal(p, expected_p, decimal=3)
assert_array_almost_equal(rho, expected_rho, decimal=7)
| [
"numpy.testing.assert_array_almost_equal",
"numpy.testing.assert_equal",
"skaero.atmosphere.util.geometric_to_geopotential",
"numpy.testing.assert_almost_equal",
"numpy.array",
"skaero.atmosphere.coesa.table",
"numpy.testing.assert_array_equal"
] | [((846, 860), 'skaero.atmosphere.coesa.table', 'coesa.table', (['h'], {}), '(h)\n', (857, 860), False, 'from skaero.atmosphere import coesa, util\n'), ((866, 893), 'numpy.testing.assert_equal', 'assert_equal', (['h', 'expected_h'], {}), '(h, expected_h)\n', (878, 893), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((898, 925), 'numpy.testing.assert_equal', 'assert_equal', (['T', 'expected_T'], {}), '(T, expected_T)\n', (910, 925), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((930, 957), 'numpy.testing.assert_equal', 'assert_equal', (['p', 'expected_p'], {}), '(p, expected_p)\n', (942, 957), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((962, 1011), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['rho', 'expected_rho'], {'decimal': '(4)'}), '(rho, expected_rho, decimal=4)\n', (981, 1011), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((1120, 1133), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (1128, 1133), True, 'import numpy as np\n'), ((1151, 1164), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (1159, 1164), True, 'import numpy as np\n'), ((1218, 1244), 'numpy.array', 'np.array', (['sp.constants.atm'], {}), '(sp.constants.atm)\n', (1226, 1244), True, 'import numpy as np\n'), ((1264, 1279), 'numpy.array', 'np.array', (['(1.225)'], {}), '(1.225)\n', (1272, 1279), True, 'import numpy as np\n'), ((1301, 1315), 'skaero.atmosphere.coesa.table', 'coesa.table', (['h'], {}), '(h)\n', (1312, 1315), False, 'from skaero.atmosphere import coesa, util\n'), ((1321, 1354), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['h', 'expected_h'], {}), '(h, expected_h)\n', (1339, 1354), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((1359, 1399), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['T', 'expected_T'], {}), '(T, expected_T)\n', (1384, 1399), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((1404, 1444), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['p', 'expected_p'], {}), '(p, expected_p)\n', (1429, 1444), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((1449, 1493), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['rho', 'expected_rho'], {}), '(rho, expected_rho)\n', (1474, 1493), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((1599, 1624), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1607, 1624), True, 'import numpy as np\n'), ((1642, 1667), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1650, 1667), True, 'import numpy as np\n'), ((1685, 1707), 'numpy.array', 'np.array', (['([288.15] * 3)'], {}), '([288.15] * 3)\n', (1693, 1707), True, 'import numpy as np\n'), ((1725, 1749), 'numpy.array', 'np.array', (['([101325.0] * 3)'], {}), '([101325.0] * 3)\n', (1733, 1749), True, 'import numpy as np\n'), ((1769, 1790), 'numpy.array', 'np.array', (['([1.225] * 3)'], {}), '([1.225] * 3)\n', (1777, 1790), True, 'import numpy as np\n'), ((1812, 1826), 'skaero.atmosphere.coesa.table', 'coesa.table', (['h'], {}), '(h)\n', (1823, 1826), False, 'from skaero.atmosphere import coesa, util\n'), ((1832, 1865), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['h', 'expected_h'], {}), '(h, expected_h)\n', (1850, 1865), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((1870, 1910), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['T', 'expected_T'], {}), '(T, expected_T)\n', (1895, 1910), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((1915, 1955), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['p', 'expected_p'], {}), '(p, expected_p)\n', (1940, 1955), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((1960, 2004), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['rho', 'expected_rho'], {}), '(rho, expected_rho)\n', (1985, 2004), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((2053, 2086), 'numpy.array', 'np.array', (['[50.0, 5550.0, 10450.0]'], {}), '([50.0, 5550.0, 10450.0])\n', (2061, 2086), True, 'import numpy as np\n'), ((2095, 2128), 'skaero.atmosphere.util.geometric_to_geopotential', 'util.geometric_to_geopotential', (['z'], {}), '(z)\n', (2125, 2128), False, 'from skaero.atmosphere import coesa, util\n'), ((2146, 2179), 'numpy.array', 'np.array', (['[50.0, 5545.0, 10433.0]'], {}), '([50.0, 5545.0, 10433.0])\n', (2154, 2179), True, 'import numpy as np\n'), ((2184, 2235), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['h', 'expected_h'], {'decimal': '(0)'}), '(h, expected_h, decimal=0)\n', (2209, 2235), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((2327, 2357), 'numpy.array', 'np.array', (['[50.0, 550.0, 850.0]'], {}), '([50.0, 550.0, 850.0])\n', (2335, 2357), True, 'import numpy as np\n'), ((2366, 2399), 'skaero.atmosphere.util.geometric_to_geopotential', 'util.geometric_to_geopotential', (['z'], {}), '(z)\n', (2396, 2399), False, 'from skaero.atmosphere import coesa, util\n'), ((2417, 2447), 'numpy.array', 'np.array', (['[50.0, 550.0, 850.0]'], {}), '([50.0, 550.0, 850.0])\n', (2425, 2447), True, 'import numpy as np\n'), ((2465, 2502), 'numpy.array', 'np.array', (['[287.825, 284.575, 282.626]'], {}), '([287.825, 284.575, 282.626])\n', (2473, 2502), True, 'import numpy as np\n'), ((2520, 2558), 'numpy.array', 'np.array', (['[100720.0, 94890.0, 91523.0]'], {}), '([100720.0, 94890.0, 91523.0])\n', (2528, 2558), True, 'import numpy as np\n'), ((2578, 2612), 'numpy.array', 'np.array', (['[1.2191, 1.1616, 1.1281]'], {}), '([1.2191, 1.1616, 1.1281])\n', (2586, 2612), True, 'import numpy as np\n'), ((2633, 2647), 'skaero.atmosphere.coesa.table', 'coesa.table', (['h'], {}), '(h)\n', (2644, 2647), False, 'from skaero.atmosphere import coesa, util\n'), ((2653, 2704), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['h', 'expected_h'], {'decimal': '(0)'}), '(h, expected_h, decimal=0)\n', (2678, 2704), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((2709, 2760), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['T', 'expected_T'], {'decimal': '(3)'}), '(T, expected_T, decimal=3)\n', (2734, 2760), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((2765, 2817), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['p', 'expected_p'], {'decimal': '(-1)'}), '(p, expected_p, decimal=-1)\n', (2790, 2817), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((2822, 2877), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['rho', 'expected_rho'], {'decimal': '(4)'}), '(rho, expected_rho, decimal=4)\n', (2847, 2877), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((2973, 3023), 'numpy.array', 'np.array', (['[500.0, 2500.0, 6500.0, 9000.0, 11000.0]'], {}), '([500.0, 2500.0, 6500.0, 9000.0, 11000.0])\n', (2981, 3023), True, 'import numpy as np\n'), ((3032, 3065), 'skaero.atmosphere.util.geometric_to_geopotential', 'util.geometric_to_geopotential', (['z'], {}), '(z)\n', (3062, 3065), False, 'from skaero.atmosphere import coesa, util\n'), ((3083, 3133), 'numpy.array', 'np.array', (['[500.0, 2499.0, 6493.0, 8987.0, 10981.0]'], {}), '([500.0, 2499.0, 6493.0, 8987.0, 10981.0])\n', (3091, 3133), True, 'import numpy as np\n'), ((3151, 3204), 'numpy.array', 'np.array', (['[284.9, 271.906, 245.943, 229.733, 216.774]'], {}), '([284.9, 271.906, 245.943, 229.733, 216.774])\n', (3159, 3204), True, 'import numpy as np\n'), ((3224, 3279), 'numpy.array', 'np.array', (['[95461.0, 74691.0, 44075.0, 30800.0, 22699.0]'], {}), '([95461.0, 74691.0, 44075.0, 30800.0, 22699.0])\n', (3232, 3279), True, 'import numpy as np\n'), ((3299, 3352), 'numpy.array', 'np.array', (['[1.1673, 0.95695, 0.62431, 0.46706, 0.3648]'], {}), '([1.1673, 0.95695, 0.62431, 0.46706, 0.3648])\n', (3307, 3352), True, 'import numpy as np\n'), ((3374, 3388), 'skaero.atmosphere.coesa.table', 'coesa.table', (['h'], {}), '(h)\n', (3385, 3388), False, 'from skaero.atmosphere import coesa, util\n'), ((3398, 3449), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['h', 'expected_h'], {'decimal': '(0)'}), '(h, expected_h, decimal=0)\n', (3423, 3449), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((3454, 3505), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['T', 'expected_T'], {'decimal': '(3)'}), '(T, expected_T, decimal=3)\n', (3479, 3505), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((3510, 3561), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['p', 'expected_p'], {'decimal': '(0)'}), '(p, expected_p, decimal=0)\n', (3535, 3561), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((3566, 3621), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['rho', 'expected_rho'], {'decimal': '(4)'}), '(rho, expected_rho, decimal=4)\n', (3591, 3621), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((3718, 3755), 'numpy.array', 'np.array', (['[15000.0, 25000.0, 35000.0]'], {}), '([15000.0, 25000.0, 35000.0])\n', (3726, 3755), True, 'import numpy as np\n'), ((3764, 3797), 'skaero.atmosphere.util.geometric_to_geopotential', 'util.geometric_to_geopotential', (['z'], {}), '(z)\n', (3794, 3797), False, 'from skaero.atmosphere import coesa, util\n'), ((3815, 3852), 'numpy.array', 'np.array', (['[14965.0, 24902.0, 34808.0]'], {}), '([14965.0, 24902.0, 34808.0])\n', (3823, 3852), True, 'import numpy as np\n'), ((3869, 3905), 'numpy.array', 'np.array', (['[216.65, 221.552, 236.513]'], {}), '([216.65, 221.552, 236.513])\n', (3877, 3905), True, 'import numpy as np\n'), ((3923, 3958), 'numpy.array', 'np.array', (['[12111.0, 2549.2, 574.59]'], {}), '([12111.0, 2549.2, 574.59])\n', (3931, 3958), True, 'import numpy as np\n'), ((3978, 4018), 'numpy.array', 'np.array', (['[0.19476, 0.040084, 0.0084634]'], {}), '([0.19476, 0.040084, 0.0084634])\n', (3986, 4018), True, 'import numpy as np\n'), ((4039, 4053), 'skaero.atmosphere.coesa.table', 'coesa.table', (['h'], {}), '(h)\n', (4050, 4053), False, 'from skaero.atmosphere import coesa, util\n'), ((4063, 4114), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['h', 'expected_h'], {'decimal': '(0)'}), '(h, expected_h, decimal=0)\n', (4088, 4114), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((4119, 4170), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['T', 'expected_T'], {'decimal': '(3)'}), '(T, expected_T, decimal=3)\n', (4144, 4170), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((4175, 4226), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['p', 'expected_p'], {'decimal': '(0)'}), '(p, expected_p, decimal=0)\n', (4200, 4226), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((4231, 4286), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['rho', 'expected_rho'], {'decimal': '(5)'}), '(rho, expected_rho, decimal=5)\n', (4256, 4286), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((4383, 4420), 'numpy.array', 'np.array', (['[50000.0, 70000.0, 86000.0]'], {}), '([50000.0, 70000.0, 86000.0])\n', (4391, 4420), True, 'import numpy as np\n'), ((4429, 4462), 'skaero.atmosphere.util.geometric_to_geopotential', 'util.geometric_to_geopotential', (['z'], {}), '(z)\n', (4459, 4462), False, 'from skaero.atmosphere import coesa, util\n'), ((4480, 4517), 'numpy.array', 'np.array', (['[49610.0, 69238.0, 84852.0]'], {}), '([49610.0, 69238.0, 84852.0])\n', (4488, 4517), True, 'import numpy as np\n'), ((4534, 4569), 'numpy.array', 'np.array', (['[270.65, 219.585, 186.87]'], {}), '([270.65, 219.585, 186.87])\n', (4542, 4569), True, 'import numpy as np\n'), ((4587, 4622), 'numpy.array', 'np.array', (['[79.779, 5.2209, 0.37338]'], {}), '([79.779, 5.2209, 0.37338])\n', (4595, 4622), True, 'import numpy as np\n'), ((4642, 4686), 'numpy.array', 'np.array', (['[0.0010269, 8.2829e-05, 6.958e-06]'], {}), '([0.0010269, 8.2829e-05, 6.958e-06])\n', (4650, 4686), True, 'import numpy as np\n'), ((4710, 4724), 'skaero.atmosphere.coesa.table', 'coesa.table', (['h'], {}), '(h)\n', (4721, 4724), False, 'from skaero.atmosphere import coesa, util\n'), ((4734, 4785), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['h', 'expected_h'], {'decimal': '(0)'}), '(h, expected_h, decimal=0)\n', (4759, 4785), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((4790, 4841), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['T', 'expected_T'], {'decimal': '(2)'}), '(T, expected_T, decimal=2)\n', (4815, 4841), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((4846, 4897), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['p', 'expected_p'], {'decimal': '(3)'}), '(p, expected_p, decimal=3)\n', (4871, 4897), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((4902, 4957), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['rho', 'expected_rho'], {'decimal': '(7)'}), '(rho, expected_rho, decimal=7)\n', (4927, 4957), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n')] |
import os
import numpy as np
from collections import deque
from ccontrol.utils import save_scores, save_AC_models, save_configuration
class Runner:
def __init__(self) -> None:
file_location = os.path.dirname(__file__)
self.path_score = os.path.join(file_location, r'./../../output/score')
self.path_model = os.path.join(file_location, r'./../../output/model')
self.path_config = os.path.join(file_location, r'./../../output/configuration')
def run(self, agent, env, brain_name, nb_episodes, key,
average_on=10, target_score=None, target_over=100,
save_score=True, save_config=True, save_weights=False, save_interaction=False):
scores = deque()
scores_target = deque(maxlen=target_over)
scores_window = deque(maxlen=average_on)
is_solved = ''
for episode in range(1, nb_episodes+1):
env_info = env.reset(train_mode=True)[brain_name]
states = env_info.vector_observations
score = 0
while True:
actions = agent.act(states, noise=True)
env_info = env.step(actions)[brain_name]
next_states = env_info.vector_observations
rewards = env_info.rewards
dones = env_info.local_done
for state, action, reward, next_state, done in zip(states, actions, rewards, next_states, dones):
agent.step(state, action, reward, next_state, done)
states = next_states
score += np.mean(rewards)
if np.any(dones):
scores.append(score)
scores_target.append(score)
scores_window.append(score)
score_averaged = np.mean(list(scores_window))
print(f"\rEpisode {episode} Score: {score_averaged}{is_solved}",
end='\r')
if target_score:
if (len(is_solved) == 0) & (np.mean(list(scores_target)) > target_score):
is_solved = f' -> Solved in {episode} episodes'
break
print(f"\nLast score: {round(score_averaged,5)} {is_solved}")
if save_score:
save_scores(scores, key, self.path_score)
if save_config:
save_configuration(agent, key, self.path_config)
if save_weights:
save_AC_models(agent, key, self.path_model)
if save_interaction:
raise Exception('not implemented yet')
def run_single_agent(self, agent, env, brain_name, nb_episodes, key,
average_on=10, target_score=None, target_over=100,
save_score=True, save_config=True, save_weights=False, save_interaction=False):
scores = deque()
scores_target = deque(maxlen=target_over)
scores_window = deque(maxlen=average_on)
is_solved = ''
for episode in range(1, nb_episodes+1):
env_info = env.reset(train_mode=True)[brain_name]
state = env_info.vector_observations[0]
score = 0
while True:
action = agent.act(state, noise=True)
env_info = env.step(action)[brain_name]
next_state = env_info.vector_observations[0]
reward = env_info.rewards[0]
done = env_info.local_done[0]
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
scores.append(score)
scores_target.append(score)
scores_window.append(score)
score_averaged = np.mean(list(scores_window))
print(f"\rEpisode {episode} Score: {score_averaged}{is_solved}",
end='\r')
if target_score:
if (len(is_solved) == 0) & (np.mean(list(scores_target)) > target_score):
is_solved = f' -> Solved in {episode} episodes'
break
print(f"\rLast score: {round(score_averaged,5)} {is_solved}")
if save_score:
save_scores(scores, key, self.path_score)
if save_config:
save_configuration(agent, key, self.path_config)
if save_weights:
save_AC_models(agent, key, self.path_model)
if save_interaction:
raise Exception('not implemented yet') | [
"numpy.mean",
"collections.deque",
"ccontrol.utils.save_AC_models",
"os.path.join",
"numpy.any",
"os.path.dirname",
"ccontrol.utils.save_configuration",
"ccontrol.utils.save_scores"
] | [((209, 234), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (224, 234), False, 'import os\n'), ((261, 312), 'os.path.join', 'os.path.join', (['file_location', '"""./../../output/score"""'], {}), "(file_location, './../../output/score')\n", (273, 312), False, 'import os\n'), ((340, 391), 'os.path.join', 'os.path.join', (['file_location', '"""./../../output/model"""'], {}), "(file_location, './../../output/model')\n", (352, 391), False, 'import os\n'), ((420, 479), 'os.path.join', 'os.path.join', (['file_location', '"""./../../output/configuration"""'], {}), "(file_location, './../../output/configuration')\n", (432, 479), False, 'import os\n'), ((731, 738), 'collections.deque', 'deque', ([], {}), '()\n', (736, 738), False, 'from collections import deque\n'), ((767, 792), 'collections.deque', 'deque', ([], {'maxlen': 'target_over'}), '(maxlen=target_over)\n', (772, 792), False, 'from collections import deque\n'), ((821, 845), 'collections.deque', 'deque', ([], {'maxlen': 'average_on'}), '(maxlen=average_on)\n', (826, 845), False, 'from collections import deque\n'), ((3232, 3239), 'collections.deque', 'deque', ([], {}), '()\n', (3237, 3239), False, 'from collections import deque\n'), ((3264, 3289), 'collections.deque', 'deque', ([], {'maxlen': 'target_over'}), '(maxlen=target_over)\n', (3269, 3289), False, 'from collections import deque\n'), ((3314, 3338), 'collections.deque', 'deque', ([], {'maxlen': 'average_on'}), '(maxlen=average_on)\n', (3319, 3338), False, 'from collections import deque\n'), ((2626, 2667), 'ccontrol.utils.save_scores', 'save_scores', (['scores', 'key', 'self.path_score'], {}), '(scores, key, self.path_score)\n', (2637, 2667), False, 'from ccontrol.utils import save_scores, save_AC_models, save_configuration\n'), ((2717, 2765), 'ccontrol.utils.save_configuration', 'save_configuration', (['agent', 'key', 'self.path_config'], {}), '(agent, key, self.path_config)\n', (2735, 2765), False, 'from ccontrol.utils import save_scores, save_AC_models, save_configuration\n'), ((2816, 2859), 'ccontrol.utils.save_AC_models', 'save_AC_models', (['agent', 'key', 'self.path_model'], {}), '(agent, key, self.path_model)\n', (2830, 2859), False, 'from ccontrol.utils import save_scores, save_AC_models, save_configuration\n'), ((4793, 4834), 'ccontrol.utils.save_scores', 'save_scores', (['scores', 'key', 'self.path_score'], {}), '(scores, key, self.path_score)\n', (4804, 4834), False, 'from ccontrol.utils import save_scores, save_AC_models, save_configuration\n'), ((4872, 4920), 'ccontrol.utils.save_configuration', 'save_configuration', (['agent', 'key', 'self.path_config'], {}), '(agent, key, self.path_config)\n', (4890, 4920), False, 'from ccontrol.utils import save_scores, save_AC_models, save_configuration\n'), ((4959, 5002), 'ccontrol.utils.save_AC_models', 'save_AC_models', (['agent', 'key', 'self.path_model'], {}), '(agent, key, self.path_model)\n', (4973, 5002), False, 'from ccontrol.utils import save_scores, save_AC_models, save_configuration\n'), ((1786, 1802), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (1793, 1802), True, 'import numpy as np\n'), ((1847, 1860), 'numpy.any', 'np.any', (['dones'], {}), '(dones)\n', (1853, 1860), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import time
image = cv2.imread('test.png')
image = cv2.flip(image,2)
rows, cols,_ = image.shape
M = cv2.getRotationMatrix2D((cols/2,rows/2),40,1)
image = cv2.warpAffine(image,M,(cols,rows))
mask=cv2.inRange(image, np.array([0, 180, 255],dtype='uint8'),np.array([0, 180, 255],dtype='uint8'))
_, cnts, _ = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
t0 = time.time()
size = np.size(mask)
skel = np.zeros(mask.shape,np.uint8)
element = cv2.getStructuringElement(cv2.MORPH_CROSS,(20,20))
done = False
while( not done):
eroded = cv2.erode(mask,element)
temp = cv2.dilate(eroded,element)
temp = cv2.subtract(mask,temp)
skel = cv2.bitwise_or(skel,temp)
img = eroded.copy()
done = True
zeros = size - cv2.countNonZero(mask)
if zeros==size:
break
t1=time.time()
print(t1-t0)
lines = cv2.HoughLines(skel,1,np.pi/90,52)
print(lines)
if lines[0][0][1] > lines[1][0][1]:
newPath = lines[0]
else:
newPath = lines[1]
for i in range(len(lines)):
for rho,theta in lines[1]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
print(x1,y1,x2,y2)
cv2.line(image,(x1,y1),(x2,y2),(0,0,255),2)
cv2.imshow("skel",image)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"cv2.imshow",
"numpy.array",
"cv2.HoughLines",
"cv2.destroyAllWindows",
"cv2.bitwise_or",
"numpy.sin",
"cv2.erode",
"cv2.line",
"cv2.waitKey",
"cv2.warpAffine",
"numpy.size",
"numpy.cos",
"cv2.getRotationMatrix2D",
"cv2.subtract",
"time.time",
"cv2.imread",
"cv2.countNonZero",
"cv2... | [((52, 74), 'cv2.imread', 'cv2.imread', (['"""test.png"""'], {}), "('test.png')\n", (62, 74), False, 'import cv2\n'), ((83, 101), 'cv2.flip', 'cv2.flip', (['image', '(2)'], {}), '(image, 2)\n', (91, 101), False, 'import cv2\n'), ((132, 184), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cols / 2, rows / 2)', '(40)', '(1)'], {}), '((cols / 2, rows / 2), 40, 1)\n', (155, 184), False, 'import cv2\n'), ((186, 224), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(cols, rows)'], {}), '(image, M, (cols, rows))\n', (200, 224), False, 'import cv2\n'), ((413, 424), 'time.time', 'time.time', ([], {}), '()\n', (422, 424), False, 'import time\n'), ((432, 445), 'numpy.size', 'np.size', (['mask'], {}), '(mask)\n', (439, 445), True, 'import numpy as np\n'), ((453, 483), 'numpy.zeros', 'np.zeros', (['mask.shape', 'np.uint8'], {}), '(mask.shape, np.uint8)\n', (461, 483), True, 'import numpy as np\n'), ((493, 545), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_CROSS', '(20, 20)'], {}), '(cv2.MORPH_CROSS, (20, 20))\n', (518, 545), False, 'import cv2\n'), ((844, 855), 'time.time', 'time.time', ([], {}), '()\n', (853, 855), False, 'import time\n'), ((877, 916), 'cv2.HoughLines', 'cv2.HoughLines', (['skel', '(1)', '(np.pi / 90)', '(52)'], {}), '(skel, 1, np.pi / 90, 52)\n', (891, 916), False, 'import cv2\n'), ((1337, 1362), 'cv2.imshow', 'cv2.imshow', (['"""skel"""', 'image'], {}), "('skel', image)\n", (1347, 1362), False, 'import cv2\n'), ((1362, 1376), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1373, 1376), False, 'import cv2\n'), ((1377, 1400), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1398, 1400), False, 'import cv2\n'), ((246, 284), 'numpy.array', 'np.array', (['[0, 180, 255]'], {'dtype': '"""uint8"""'}), "([0, 180, 255], dtype='uint8')\n", (254, 284), True, 'import numpy as np\n'), ((284, 322), 'numpy.array', 'np.array', (['[0, 180, 255]'], {'dtype': '"""uint8"""'}), "([0, 180, 255], dtype='uint8')\n", (292, 322), True, 'import numpy as np\n'), ((589, 613), 'cv2.erode', 'cv2.erode', (['mask', 'element'], {}), '(mask, element)\n', (598, 613), False, 'import cv2\n'), ((624, 651), 'cv2.dilate', 'cv2.dilate', (['eroded', 'element'], {}), '(eroded, element)\n', (634, 651), False, 'import cv2\n'), ((662, 686), 'cv2.subtract', 'cv2.subtract', (['mask', 'temp'], {}), '(mask, temp)\n', (674, 686), False, 'import cv2\n'), ((697, 723), 'cv2.bitwise_or', 'cv2.bitwise_or', (['skel', 'temp'], {}), '(skel, temp)\n', (711, 723), False, 'import cv2\n'), ((783, 805), 'cv2.countNonZero', 'cv2.countNonZero', (['mask'], {}), '(mask)\n', (799, 805), False, 'import cv2\n'), ((1075, 1088), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1081, 1088), True, 'import numpy as np\n'), ((1098, 1111), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1104, 1111), True, 'import numpy as np\n'), ((1291, 1342), 'cv2.line', 'cv2.line', (['image', '(x1, y1)', '(x2, y2)', '(0, 0, 255)', '(2)'], {}), '(image, (x1, y1), (x2, y2), (0, 0, 255), 2)\n', (1299, 1342), False, 'import cv2\n')] |
#! python3
"""
Pass this program a filename as the first argument
"""
import sys
import pickle
import numpy as np
import matplotlib.pyplot as plt
from robot.base import State
import config
from logger import augment
def add_arrow(line, direction='right', size=15, color=None, n=1):
"""
add an arrow to a line.
line: Line2D object
position: x-position of the arrow. If None, mean of xdata is taken
direction: 'left' or 'right'
size: size of the arrow in fontsize points
color: if None, line color is taken.
"""
if color is None:
color = line.get_color()
xdata = line.get_xdata()
ydata = line.get_ydata()
# find closest index
dists = np.cumsum(np.hypot(np.diff(xdata), np.diff(ydata)))
dists = np.concatenate(([0], dists))
total_dist = dists[-1]
for i in range(1, n+1):
target_dist = total_dist * i / (n+1)
end_ind = np.argmax(dists >= target_dist)
start_ind = end_ind - 1
frac = (target_dist - dists[start_ind]) / (dists[end_ind] - dists[start_ind])
end_x = xdata[start_ind]*(1-frac) + xdata[end_ind]*frac
end_y = ydata[start_ind]*(1-frac) + ydata[end_ind]*frac
line.axes.annotate('',
xytext=(xdata[start_ind], ydata[start_ind]),
xy=(end_x, end_y),
arrowprops=dict(arrowstyle="->", color=color),
size=size
)
if len(sys.argv) > 1:
fname = sys.argv[1]
else:
fname = 'walking.pickle'
with open(fname, 'rb') as f:
data = pickle.load(f)
# add all the extra calculated fields
aug = augment(data)
error_bounds = config.error_active_lim
fig, axs = plt.subplots(3, sharex=True)
fig.patch.set(alpha=0)
fig.suptitle('Time-angle plots: {}'.format(fname))
for i, ax in enumerate(axs):
ax.plot(aug.t, np.degrees(aug.target[:,i]), label='target')
l, = ax.plot(aug.t, np.degrees(aug.actual[:,i]), label='actual')
ax.plot(aug.t, np.degrees(aug.actual_oob[:,i]), color=l.get_color(), alpha=0.5)
l, = ax.plot(aug.t, np.degrees(aug.servo[:,i]), label='servo')
ax.plot(aug.t, np.degrees(aug.servo_oob[:,i]), color=l.get_color(), alpha=0.5)
l, = ax.plot(aug.t, np.degrees(aug.error[:,i]), label='displacement')
ax.axhline(y=np.degrees(error_bounds[i,0]), color=l.get_color(), alpha=0.5)
ax.axhline(y=np.degrees(error_bounds[i,1]), color=l.get_color(), alpha=0.5)
ax.grid()
ax.set(xlabel='time / s', ylabel='$q_{}$ / degrees'.format(i+1))
ax.legend()
target_states = np.stack([State(target).joint_positions for target in aug.target], axis=1)
actual_states = np.stack([State(actual).joint_positions for actual in aug.actual], axis=1)
fig, ax = plt.subplots()
fig.patch.set(alpha=0)
fig.suptitle('Space-plots: {}'.format(fname))
for targets, actuals in zip(target_states, actual_states):
l, = ax.plot(targets[:,0],targets[:,1], alpha=0.5, linewidth=3, label='target')
ax.plot(actuals[:,0],actuals[:,1], color=l.get_color(), label='actual')
ax.legend()
ax.axis('equal')
ax.grid()
# this data is super noisy, so filter it
N = 11
filt = np.ones(N) / N
fig, axs = plt.subplots(3)
fig.patch.set(alpha=0)
fig.suptitle('Angle-force plots: {}'.format(fname))
for i, ax in enumerate(axs):
# extract aug, and smooth it
actual_i = aug.actual[:,i]
error_i = aug.error[:,i]
sm_actual_i = np.convolve(actual_i, filt)[N//2:][:len(actual_i)]
sm_error_i = np.convolve(error_i, filt)[N//2:][:len(error_i)]
# plot both
l, = ax.plot(np.degrees(actual_i), np.degrees(error_i), alpha=0.25)
l, = ax.plot(np.degrees(sm_actual_i), np.degrees(sm_error_i), color=l.get_color())
add_arrow(l, n=5)
ax.grid()
ax.set(xlabel=r'$\phi_{}$ / degrees', ylabel='spring angle / degrees'.format(i+1))
plt.show()
| [
"numpy.convolve",
"numpy.ones",
"pickle.load",
"numpy.argmax",
"numpy.diff",
"logger.augment",
"robot.base.State",
"numpy.concatenate",
"numpy.degrees",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((1595, 1608), 'logger.augment', 'augment', (['data'], {}), '(data)\n', (1602, 1608), False, 'from logger import augment\n'), ((1661, 1689), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)'], {'sharex': '(True)'}), '(3, sharex=True)\n', (1673, 1689), True, 'import matplotlib.pyplot as plt\n'), ((2654, 2668), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2666, 2668), True, 'import matplotlib.pyplot as plt\n'), ((3074, 3089), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)'], {}), '(3)\n', (3086, 3089), True, 'import matplotlib.pyplot as plt\n'), ((3690, 3700), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3698, 3700), True, 'import matplotlib.pyplot as plt\n'), ((782, 810), 'numpy.concatenate', 'np.concatenate', (['([0], dists)'], {}), '(([0], dists))\n', (796, 810), True, 'import numpy as np\n'), ((1535, 1549), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1546, 1549), False, 'import pickle\n'), ((3047, 3057), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (3054, 3057), True, 'import numpy as np\n'), ((931, 962), 'numpy.argmax', 'np.argmax', (['(dists >= target_dist)'], {}), '(dists >= target_dist)\n', (940, 962), True, 'import numpy as np\n'), ((1809, 1837), 'numpy.degrees', 'np.degrees', (['aug.target[:, i]'], {}), '(aug.target[:, i])\n', (1819, 1837), True, 'import numpy as np\n'), ((1875, 1903), 'numpy.degrees', 'np.degrees', (['aug.actual[:, i]'], {}), '(aug.actual[:, i])\n', (1885, 1903), True, 'import numpy as np\n'), ((1936, 1968), 'numpy.degrees', 'np.degrees', (['aug.actual_oob[:, i]'], {}), '(aug.actual_oob[:, i])\n', (1946, 1968), True, 'import numpy as np\n'), ((2022, 2049), 'numpy.degrees', 'np.degrees', (['aug.servo[:, i]'], {}), '(aug.servo[:, i])\n', (2032, 2049), True, 'import numpy as np\n'), ((2081, 2112), 'numpy.degrees', 'np.degrees', (['aug.servo_oob[:, i]'], {}), '(aug.servo_oob[:, i])\n', (2091, 2112), True, 'import numpy as np\n'), ((2166, 2193), 'numpy.degrees', 'np.degrees', (['aug.error[:, i]'], {}), '(aug.error[:, i])\n', (2176, 2193), True, 'import numpy as np\n'), ((3435, 3455), 'numpy.degrees', 'np.degrees', (['actual_i'], {}), '(actual_i)\n', (3445, 3455), True, 'import numpy as np\n'), ((3457, 3476), 'numpy.degrees', 'np.degrees', (['error_i'], {}), '(error_i)\n', (3467, 3476), True, 'import numpy as np\n'), ((3504, 3527), 'numpy.degrees', 'np.degrees', (['sm_actual_i'], {}), '(sm_actual_i)\n', (3514, 3527), True, 'import numpy as np\n'), ((3529, 3551), 'numpy.degrees', 'np.degrees', (['sm_error_i'], {}), '(sm_error_i)\n', (3539, 3551), True, 'import numpy as np\n'), ((737, 751), 'numpy.diff', 'np.diff', (['xdata'], {}), '(xdata)\n', (744, 751), True, 'import numpy as np\n'), ((753, 767), 'numpy.diff', 'np.diff', (['ydata'], {}), '(ydata)\n', (760, 767), True, 'import numpy as np\n'), ((2230, 2260), 'numpy.degrees', 'np.degrees', (['error_bounds[i, 0]'], {}), '(error_bounds[i, 0])\n', (2240, 2260), True, 'import numpy as np\n'), ((2307, 2337), 'numpy.degrees', 'np.degrees', (['error_bounds[i, 1]'], {}), '(error_bounds[i, 1])\n', (2317, 2337), True, 'import numpy as np\n'), ((2487, 2500), 'robot.base.State', 'State', (['target'], {}), '(target)\n', (2492, 2500), False, 'from robot.base import State\n'), ((2578, 2591), 'robot.base.State', 'State', (['actual'], {}), '(actual)\n', (2583, 2591), False, 'from robot.base import State\n'), ((3293, 3320), 'numpy.convolve', 'np.convolve', (['actual_i', 'filt'], {}), '(actual_i, filt)\n', (3304, 3320), True, 'import numpy as np\n'), ((3358, 3384), 'numpy.convolve', 'np.convolve', (['error_i', 'filt'], {}), '(error_i, filt)\n', (3369, 3384), True, 'import numpy as np\n')] |
from object_detection.utils import label_map_util, visualization_utils as vis_util
import tensorflow as tf
import pandas as pd
import numpy as np
import cv2 as cv
import sys
import os
import collections
from pathlib import Path
BASE_DIR = Path(__file__).parent
class CaptchaSolver(object):
def __init__(self):
self.num_classes = 36
self.labels_path = str((BASE_DIR / 'model/labelmap.pbtxt').resolve())
self.modelckpt_path = str((BASE_DIR / 'model/frozen_inference_graph.pb').resolve())
self.tolerance = 0.6
self.model = None
self.detection_graph = None
def __load_label_map(self):
label_map = label_map_util.load_labelmap(self.labels_path)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=self.num_classes, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return category_index
def __load_tfmodel(self):
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.modelckpt_path , 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.model = tf.Session(graph=self.detection_graph)
def get_boxes_coordinates(self, image, boxes, classes, scores, category_index, instance_masks=None, instance_boundaries=None, keypoints=None, use_normalized_coordinates=False, max_boxes_to_draw=6, min_score_thresh=.5, agnostic_mode=False, line_thickness=4, groundtruth_box_visualization_color='black', skip_scores=False, skip_labels=False):
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_score_map = {}
box_to_keypoints_map = collections.defaultdict(list)
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100*scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100*scores[i]))
box_to_display_str_map[box].append(display_str)
box_to_score_map[box] = scores[i]
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
else:
box_to_color_map[box] = vis_util.STANDARD_COLORS[classes[i] % len(vis_util.STANDARD_COLORS)]
coordinates_list = []
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
height, width, channels = image.shape
coordinate = dict(xmin=int(),xmax=int(),ymin=int(),ymax=int())
coordinate['ymin'] = int(ymin*height)
coordinate['ymax'] = int(ymax*height)
coordinate['xmin'] = int(xmin*width)
coordinate['xmax'] = int(xmax*width)
coordinates_list.append(coordinate)
return coordinates_list
def predict_captcha(self, image_path):
self.__load_tfmodel()
image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
image = cv.imread(image_path)
image_rgb = cv.cvtColor(image, cv.COLOR_BGR2RGB)
image_expanded = np.expand_dims(image_rgb, axis=0)
(boxes, scores, classes, num) = self.model.run([detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_expanded})
category_index = self.__load_label_map()
coordinates = self.get_boxes_coordinates(image, np.squeeze(boxes), np.squeeze(classes).astype(np.int32), np.squeeze(scores), category_index, min_score_thresh=self.tolerance)
digits = self.__get_digits_prediction(category_index, (boxes, scores, classes, num), coordinates)
solved_captcha = self.__get_solved_captcha(digits)
return solved_captcha
def __get_digits_prediction(self, category_index, model_output, coordinates, threshold=0.6):
digits = []
for x in range(len(model_output[1][0])):
if model_output[1][0][x] > threshold:
digits.append(dict(label=category_index[model_output[2][0][x]]['name'], score=float(model_output[1][0][x]),
coordenadas=coordinates[x], xmin=coordinates[x]['xmin']))
return sorted(digits, key=lambda digit:digit['xmin'])
def __get_solved_captcha(self, digits):
solved_captcha = ''
for digit in digits:
solved_captcha = solved_captcha + digit['label']
return solved_captcha
| [
"tensorflow.Graph",
"tensorflow.gfile.GFile",
"pathlib.Path",
"tensorflow.Session",
"tensorflow.GraphDef",
"numpy.squeeze",
"collections.defaultdict",
"object_detection.utils.label_map_util.convert_label_map_to_categories",
"cv2.cvtColor",
"numpy.expand_dims",
"tensorflow.import_graph_def",
"o... | [((469, 483), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (473, 483), False, 'from pathlib import Path\n'), ((929, 975), 'object_detection.utils.label_map_util.load_labelmap', 'label_map_util.load_labelmap', (['self.labels_path'], {}), '(self.labels_path)\n', (957, 975), False, 'from object_detection.utils import label_map_util, visualization_utils as vis_util\n'), ((1001, 1120), 'object_detection.utils.label_map_util.convert_label_map_to_categories', 'label_map_util.convert_label_map_to_categories', (['label_map'], {'max_num_classes': 'self.num_classes', 'use_display_name': '(True)'}), '(label_map, max_num_classes=\n self.num_classes, use_display_name=True)\n', (1047, 1120), False, 'from object_detection.utils import label_map_util, visualization_utils as vis_util\n'), ((1141, 1189), 'object_detection.utils.label_map_util.create_category_index', 'label_map_util.create_category_index', (['categories'], {}), '(categories)\n', (1177, 1189), False, 'from object_detection.utils import label_map_util, visualization_utils as vis_util\n'), ((1311, 1321), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1319, 1321), True, 'import tensorflow as tf\n'), ((2133, 2162), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (2156, 2162), False, 'import collections\n'), ((2190, 2218), 'collections.defaultdict', 'collections.defaultdict', (['str'], {}), '(str)\n', (2213, 2218), False, 'import collections\n'), ((2363, 2392), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (2386, 2392), False, 'import collections\n'), ((5297, 5318), 'cv2.imread', 'cv.imread', (['image_path'], {}), '(image_path)\n', (5306, 5318), True, 'import cv2 as cv\n'), ((5344, 5380), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2RGB'], {}), '(image, cv.COLOR_BGR2RGB)\n', (5355, 5380), True, 'import cv2 as cv\n'), ((5406, 5439), 'numpy.expand_dims', 'np.expand_dims', (['image_rgb'], {'axis': '(0)'}), '(image_rgb, axis=0)\n', (5420, 5439), True, 'import numpy as np\n'), ((1398, 1411), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1409, 1411), True, 'import tensorflow as tf\n'), ((1703, 1741), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.detection_graph'}), '(graph=self.detection_graph)\n', (1713, 1741), True, 'import tensorflow as tf\n'), ((5773, 5790), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (5783, 5790), True, 'import numpy as np\n'), ((5830, 5848), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (5840, 5848), True, 'import numpy as np\n'), ((1442, 1483), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['self.modelckpt_path', '"""rb"""'], {}), "(self.modelckpt_path, 'rb')\n", (1456, 1483), True, 'import tensorflow as tf\n'), ((1618, 1660), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (1637, 1660), True, 'import tensorflow as tf\n'), ((5792, 5811), 'numpy.squeeze', 'np.squeeze', (['classes'], {}), '(classes)\n', (5802, 5811), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from frovedis.exrpc.server import FrovedisServer
from frovedis.mllib.tree import DecisionTreeClassifier
from frovedis.mllib.tree import DecisionTreeRegressor
import sys
import numpy as np
import pandas as pd
#Objective: Run without error
# initializing the Frovedis server
argvs = sys.argv
argc = len(argvs)
if (argc < 2):
print ('Please give frovedis_server calling command as the first argument \n(e.g. "mpirun -np 2 -x /opt/nec/nosupport/frovedis/ve/bin/frovedis_server")')
quit()
FrovedisServer.initialize(argvs[1])
mat = pd.DataFrame([[10, 0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 0, 1, 0, 1],
[1, 0, 0, 1, 0, 1, 0]],dtype=np.float64)
lbl = np.array([0, 1, 1.0, 0],dtype=np.float64)
# fitting input matrix and label on DecisionTree Classifier object
dtc1 = DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=5,
min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0,
max_features=None, random_state=None, max_leaf_nodes=1,
min_impurity_decrease=0.0,
class_weight=None, presort=False, verbose = 0)
dtc = dtc1.fit(mat,lbl)
dtc.debug_print()
# predicting on train model
print("predicting on DecisionTree classifier model: ")
dtcm = dtc.predict(mat)
print (dtcm)
print (dtc.predict_proba(mat))
print("Accuracy score for predicted DecisionTree Classifier model")
print (dtc.score(mat,lbl))
# fitting input matrix and label on DecisionTree Regressor object
dtr1 = DecisionTreeRegressor(criterion='mse', splitter='best',
max_depth=5, min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0.0, max_features=None, random_state=None,
max_leaf_nodes=1, min_impurity_decrease=0.0, min_impurity_split=None,
class_weight=None, presort=False, verbose = 0)
lbl1 = np.array([1.2,0.3,1.1,1.9])
dtr = dtr1.fit(mat,lbl1)
dtr.debug_print()
# predicting on train model
print("predicting on DecisionTree Regressor model: ")
dtrm = dtr.predict(mat)
print (dtrm)
print("Root mean square for predicted DecisionTree Regressor model")
print (dtr.score(mat,lbl1))
#clean-up
dtc.release()
dtr.release()
FrovedisServer.shut_down()
| [
"frovedis.exrpc.server.FrovedisServer.shut_down",
"frovedis.mllib.tree.DecisionTreeClassifier",
"frovedis.mllib.tree.DecisionTreeRegressor",
"numpy.array",
"frovedis.exrpc.server.FrovedisServer.initialize",
"pandas.DataFrame"
] | [((517, 552), 'frovedis.exrpc.server.FrovedisServer.initialize', 'FrovedisServer.initialize', (['argvs[1]'], {}), '(argvs[1])\n', (542, 552), False, 'from frovedis.exrpc.server import FrovedisServer\n'), ((560, 689), 'pandas.DataFrame', 'pd.DataFrame', (['[[10, 0, 1, 0, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 0, 1, 0, 1], [1, \n 0, 0, 1, 0, 1, 0]]'], {'dtype': 'np.float64'}), '([[10, 0, 1, 0, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 0, 1,\n 0, 1], [1, 0, 0, 1, 0, 1, 0]], dtype=np.float64)\n', (572, 689), True, 'import pandas as pd\n'), ((751, 793), 'numpy.array', 'np.array', (['[0, 1, 1.0, 0]'], {'dtype': 'np.float64'}), '([0, 1, 1.0, 0], dtype=np.float64)\n', (759, 793), True, 'import numpy as np\n'), ((868, 1149), 'frovedis.mllib.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'criterion': '"""gini"""', 'splitter': '"""best"""', 'max_depth': '(5)', 'min_samples_split': '(2)', 'min_samples_leaf': '(1)', 'min_weight_fraction_leaf': '(0.0)', 'max_features': 'None', 'random_state': 'None', 'max_leaf_nodes': '(1)', 'min_impurity_decrease': '(0.0)', 'class_weight': 'None', 'presort': '(False)', 'verbose': '(0)'}), "(criterion='gini', splitter='best', max_depth=5,\n min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0,\n max_features=None, random_state=None, max_leaf_nodes=1,\n min_impurity_decrease=0.0, class_weight=None, presort=False, verbose=0)\n", (890, 1149), False, 'from frovedis.mllib.tree import DecisionTreeClassifier\n'), ((1560, 1868), 'frovedis.mllib.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'criterion': '"""mse"""', 'splitter': '"""best"""', 'max_depth': '(5)', 'min_samples_split': '(2)', 'min_samples_leaf': '(1)', 'min_weight_fraction_leaf': '(0.0)', 'max_features': 'None', 'random_state': 'None', 'max_leaf_nodes': '(1)', 'min_impurity_decrease': '(0.0)', 'min_impurity_split': 'None', 'class_weight': 'None', 'presort': '(False)', 'verbose': '(0)'}), "(criterion='mse', splitter='best', max_depth=5,\n min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0,\n max_features=None, random_state=None, max_leaf_nodes=1,\n min_impurity_decrease=0.0, min_impurity_split=None, class_weight=None,\n presort=False, verbose=0)\n", (1581, 1868), False, 'from frovedis.mllib.tree import DecisionTreeRegressor\n'), ((1930, 1960), 'numpy.array', 'np.array', (['[1.2, 0.3, 1.1, 1.9]'], {}), '([1.2, 0.3, 1.1, 1.9])\n', (1938, 1960), True, 'import numpy as np\n'), ((2260, 2286), 'frovedis.exrpc.server.FrovedisServer.shut_down', 'FrovedisServer.shut_down', ([], {}), '()\n', (2284, 2286), False, 'from frovedis.exrpc.server import FrovedisServer\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from six.moves import xrange
from wtte import transforms as tr
def timeline_plot(padded, title='', cmap="jet", plot=True, fig=None, ax=None):
if fig is None or ax is None:
fig, ax = plt.subplots(ncols=2, sharey=True, figsize=(12, 4))
ax[0].imshow(padded, interpolation='none',
aspect='auto', cmap=cmap, origin='lower')
ax[0].set_ylabel('sequence')
ax[0].set_xlabel('sequence time')
ax[1].imshow(tr.right_pad_to_left_pad(padded),
interpolation='none',
aspect='auto',
cmap=cmap,
origin='lower')
ax[1].set_ylabel('sequence')
ax[1].set_xlabel('absolute time') # (Assuming sequences end today)
fig.suptitle(title, fontsize=14)
if plot:
fig.show()
return None, None
else:
return fig, ax
def timeline_aggregate_plot(padded, title='', cmap="jet", plot=True):
fig, ax = plt.subplots(ncols=2, nrows=2, sharex=True,
sharey=False, figsize=(12, 8))
fig, ax[0] = timeline_plot(
padded, title, cmap=cmap, plot=False, fig=fig, ax=ax[0])
ax[1, 0].plot(np.nanmean(padded, axis=0), lw=0.5,
c='black', drawstyle='steps-post')
ax[1, 0].set_title('mean/timestep')
padded = tr.right_pad_to_left_pad(padded)
ax[1, 1].plot(np.nanmean(padded, axis=0), lw=0.5,
c='black', drawstyle='steps-post')
ax[1, 1].set_title('mean/timestep')
fig.suptitle(title, fontsize=14)
if plot:
fig.show()
return None, None
else:
return fig, ax
| [
"wtte.transforms.right_pad_to_left_pad",
"numpy.nanmean",
"matplotlib.pyplot.subplots"
] | [((1092, 1166), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'nrows': '(2)', 'sharex': '(True)', 'sharey': '(False)', 'figsize': '(12, 8)'}), '(ncols=2, nrows=2, sharex=True, sharey=False, figsize=(12, 8))\n', (1104, 1166), True, 'import matplotlib.pyplot as plt\n'), ((1453, 1485), 'wtte.transforms.right_pad_to_left_pad', 'tr.right_pad_to_left_pad', (['padded'], {}), '(padded)\n', (1477, 1485), True, 'from wtte import transforms as tr\n'), ((358, 409), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'sharey': '(True)', 'figsize': '(12, 4)'}), '(ncols=2, sharey=True, figsize=(12, 4))\n', (370, 409), True, 'import matplotlib.pyplot as plt\n'), ((606, 638), 'wtte.transforms.right_pad_to_left_pad', 'tr.right_pad_to_left_pad', (['padded'], {}), '(padded)\n', (630, 638), True, 'from wtte import transforms as tr\n'), ((1311, 1337), 'numpy.nanmean', 'np.nanmean', (['padded'], {'axis': '(0)'}), '(padded, axis=0)\n', (1321, 1337), True, 'import numpy as np\n'), ((1504, 1530), 'numpy.nanmean', 'np.nanmean', (['padded'], {'axis': '(0)'}), '(padded, axis=0)\n', (1514, 1530), True, 'import numpy as np\n')] |
import numpy as np
import torch
from src.derivatives import jacobian, trace
def test_jacobian():
batchsize = int(np.random.randint(1, 10))
# vector * matrix --
# Unbatched
rand_lengths = np.random.randint(1, 10, 2)
ins = torch.rand(tuple(list(rand_lengths[-1:])), requires_grad=True)
factor = torch.rand(tuple(list(rand_lengths)))
out = factor @ ins
jac = jacobian(out, ins)
assert torch.allclose(jac, factor)
# Batched
ins = ins.unsqueeze(0).expand(batchsize, *ins.size())
out = torch.einsum("ij,kj->ki", factor, ins)
assert torch.allclose(torch.squeeze(out), out)
bat_jac = jacobian(out, ins, batched=True)
for i in range(batchsize):
assert torch.allclose(bat_jac[i], factor)
# test nonlinear case
rand_lengths = np.random.randint(1, 10, 2)
ins = torch.rand(
batchsize, *tuple(list(rand_lengths[-1:])), requires_grad=True
)
out = torch.sin(3.15 * ins + 2.91)
bat_jac = jacobian(out, ins, batched=True)
expected = torch.diag_embed(3.15 * torch.cos(3.15 * ins + 2.91))
assert torch.allclose(bat_jac, expected)
# matrix * matrix --
# Unbatched
rand_lengths = np.random.randint(1, 10, 3)
ins = torch.rand(tuple(list(rand_lengths[-2:])), requires_grad=True)
factor = torch.rand(tuple(list(rand_lengths[:-1])))
out = factor @ ins
jac = jacobian(out, ins)
ans = jac.new_zeros(jac.size())
for i in range(jac.size()[-1]):
ans[:, i, :, i] = factor
assert torch.allclose(jac, ans)
# Batched
ins = ins.unsqueeze(0).expand(batchsize, *ins.size())
out = torch.einsum("ij,kjl->kil", factor, ins)
bat_jac = jacobian(out, ins, batched=True)
ans = jac.new_zeros(bat_jac.size())
for b in range(batchsize):
for i in range(bat_jac.size()[-1]):
ans[b, :, i, :, i] = factor
assert torch.allclose(bat_jac, ans)
# Confirm agreement in complex case --
# Unbatched
rand_lengths = np.random.randint(1, 7, 5)
ins = torch.rand(tuple(list(rand_lengths)), requires_grad=True)
out = torch.relu(ins)
jac = jacobian(out, ins)
# Check that lists work correctly
out = torch.relu(ins)
list_jac = jacobian(out, [ins, ins])
assert all(torch.allclose(jac, list_jac[i]) for i in range(len(list_jac)))
# Batched
ins = ins.view(-1, *ins.size())
out = torch.relu(ins)
bat_jac = jacobian(out, ins, batched=True)
assert torch.allclose(jac, bat_jac[0])
def test_trace():
# Unbatched
rand_length = int(np.random.randint(1, 10, 1))
ins = torch.rand((rand_length, rand_length))
trc = trace(ins)
assert torch.allclose(trc, torch.trace(ins))
# Check that lists work correctly
list_trc = trace([ins, ins])
assert all(torch.allclose(list_trc[i], trc) for i in range(len(list_trc)))
# Batched
batchsize = int(np.random.randint(1, 10))
ins = ins.unsqueeze(0).expand(batchsize, *ins.size())
ans = trace(ins)
for b in range(batchsize):
assert torch.allclose(ans[b], trc)
| [
"torch.sin",
"torch.relu",
"src.derivatives.trace",
"src.derivatives.jacobian",
"numpy.random.randint",
"torch.trace",
"torch.einsum",
"torch.cos",
"torch.squeeze",
"torch.allclose",
"torch.rand"
] | [((210, 237), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)', '(2)'], {}), '(1, 10, 2)\n', (227, 237), True, 'import numpy as np\n'), ((395, 413), 'src.derivatives.jacobian', 'jacobian', (['out', 'ins'], {}), '(out, ins)\n', (403, 413), False, 'from src.derivatives import jacobian, trace\n'), ((425, 452), 'torch.allclose', 'torch.allclose', (['jac', 'factor'], {}), '(jac, factor)\n', (439, 452), False, 'import torch\n'), ((536, 574), 'torch.einsum', 'torch.einsum', (['"""ij,kj->ki"""', 'factor', 'ins'], {}), "('ij,kj->ki', factor, ins)\n", (548, 574), False, 'import torch\n'), ((640, 672), 'src.derivatives.jacobian', 'jacobian', (['out', 'ins'], {'batched': '(True)'}), '(out, ins, batched=True)\n', (648, 672), False, 'from src.derivatives import jacobian, trace\n'), ((800, 827), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)', '(2)'], {}), '(1, 10, 2)\n', (817, 827), True, 'import numpy as np\n'), ((937, 965), 'torch.sin', 'torch.sin', (['(3.15 * ins + 2.91)'], {}), '(3.15 * ins + 2.91)\n', (946, 965), False, 'import torch\n'), ((980, 1012), 'src.derivatives.jacobian', 'jacobian', (['out', 'ins'], {'batched': '(True)'}), '(out, ins, batched=True)\n', (988, 1012), False, 'from src.derivatives import jacobian, trace\n'), ((1093, 1126), 'torch.allclose', 'torch.allclose', (['bat_jac', 'expected'], {}), '(bat_jac, expected)\n', (1107, 1126), False, 'import torch\n'), ((1190, 1217), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)', '(3)'], {}), '(1, 10, 3)\n', (1207, 1217), True, 'import numpy as np\n'), ((1381, 1399), 'src.derivatives.jacobian', 'jacobian', (['out', 'ins'], {}), '(out, ins)\n', (1389, 1399), False, 'from src.derivatives import jacobian, trace\n'), ((1516, 1540), 'torch.allclose', 'torch.allclose', (['jac', 'ans'], {}), '(jac, ans)\n', (1530, 1540), False, 'import torch\n'), ((1624, 1664), 'torch.einsum', 'torch.einsum', (['"""ij,kjl->kil"""', 'factor', 'ins'], {}), "('ij,kjl->kil', factor, ins)\n", (1636, 1664), False, 'import torch\n'), ((1679, 1711), 'src.derivatives.jacobian', 'jacobian', (['out', 'ins'], {'batched': '(True)'}), '(out, ins, batched=True)\n', (1687, 1711), False, 'from src.derivatives import jacobian, trace\n'), ((1878, 1906), 'torch.allclose', 'torch.allclose', (['bat_jac', 'ans'], {}), '(bat_jac, ans)\n', (1892, 1906), False, 'import torch\n'), ((1988, 2014), 'numpy.random.randint', 'np.random.randint', (['(1)', '(7)', '(5)'], {}), '(1, 7, 5)\n', (2005, 2014), True, 'import numpy as np\n'), ((2094, 2109), 'torch.relu', 'torch.relu', (['ins'], {}), '(ins)\n', (2104, 2109), False, 'import torch\n'), ((2120, 2138), 'src.derivatives.jacobian', 'jacobian', (['out', 'ins'], {}), '(out, ins)\n', (2128, 2138), False, 'from src.derivatives import jacobian, trace\n'), ((2188, 2203), 'torch.relu', 'torch.relu', (['ins'], {}), '(ins)\n', (2198, 2203), False, 'import torch\n'), ((2219, 2244), 'src.derivatives.jacobian', 'jacobian', (['out', '[ins, ins]'], {}), '(out, [ins, ins])\n', (2227, 2244), False, 'from src.derivatives import jacobian, trace\n'), ((2386, 2401), 'torch.relu', 'torch.relu', (['ins'], {}), '(ins)\n', (2396, 2401), False, 'import torch\n'), ((2416, 2448), 'src.derivatives.jacobian', 'jacobian', (['out', 'ins'], {'batched': '(True)'}), '(out, ins, batched=True)\n', (2424, 2448), False, 'from src.derivatives import jacobian, trace\n'), ((2461, 2492), 'torch.allclose', 'torch.allclose', (['jac', 'bat_jac[0]'], {}), '(jac, bat_jac[0])\n', (2475, 2492), False, 'import torch\n'), ((2591, 2629), 'torch.rand', 'torch.rand', (['(rand_length, rand_length)'], {}), '((rand_length, rand_length))\n', (2601, 2629), False, 'import torch\n'), ((2640, 2650), 'src.derivatives.trace', 'trace', (['ins'], {}), '(ins)\n', (2645, 2650), False, 'from src.derivatives import jacobian, trace\n'), ((2754, 2771), 'src.derivatives.trace', 'trace', (['[ins, ins]'], {}), '([ins, ins])\n', (2759, 2771), False, 'from src.derivatives import jacobian, trace\n'), ((2981, 2991), 'src.derivatives.trace', 'trace', (['ins'], {}), '(ins)\n', (2986, 2991), False, 'from src.derivatives import jacobian, trace\n'), ((121, 145), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (138, 145), True, 'import numpy as np\n'), ((601, 619), 'torch.squeeze', 'torch.squeeze', (['out'], {}), '(out)\n', (614, 619), False, 'import torch\n'), ((719, 753), 'torch.allclose', 'torch.allclose', (['bat_jac[i]', 'factor'], {}), '(bat_jac[i], factor)\n', (733, 753), False, 'import torch\n'), ((2552, 2579), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)', '(1)'], {}), '(1, 10, 1)\n', (2569, 2579), True, 'import numpy as np\n'), ((2682, 2698), 'torch.trace', 'torch.trace', (['ins'], {}), '(ins)\n', (2693, 2698), False, 'import torch\n'), ((2886, 2910), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (2903, 2910), True, 'import numpy as np\n'), ((3038, 3065), 'torch.allclose', 'torch.allclose', (['ans[b]', 'trc'], {}), '(ans[b], trc)\n', (3052, 3065), False, 'import torch\n'), ((1052, 1080), 'torch.cos', 'torch.cos', (['(3.15 * ins + 2.91)'], {}), '(3.15 * ins + 2.91)\n', (1061, 1080), False, 'import torch\n'), ((2260, 2292), 'torch.allclose', 'torch.allclose', (['jac', 'list_jac[i]'], {}), '(jac, list_jac[i])\n', (2274, 2292), False, 'import torch\n'), ((2787, 2819), 'torch.allclose', 'torch.allclose', (['list_trc[i]', 'trc'], {}), '(list_trc[i], trc)\n', (2801, 2819), False, 'import torch\n')] |
import math
from matplotlib import pyplot as plt
import numpy as np
listx=[]
listy=[]
listz=[]
listw=[]
phi=(1+5**(1/2))/2
def function(x):
y=(phi**x)-(phi)
return y
def function2(x):
return (function4(x-1)*(phi**x))-(function4(x-2)*phi**(x-2))
def function4(x):
return ((phi**i)-(-1*(phi-1))**i)/(5**(1/2))
x=float(input("last n"))
i=0
while i<x:
listx.append(i)
listy.append(function(i))
listz.append(function2(i))
listw.append(function4(i))
i=i+0.1
listx=np.array(listx)
listy=np.array(listy)
listz=np.array(listz)
listw=np.array(listw)
listz=(listy+phi)*math.log(phi)
plt.plot( listx,listy, color='skyblue')
plt.plot( listx,listz, color='black')
plt.plot( listx,listw, color='red')
#plt.plot(listx,,color="pink")
plt.legend()
plt.show() | [
"matplotlib.pyplot.plot",
"math.log",
"numpy.array",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((522, 537), 'numpy.array', 'np.array', (['listx'], {}), '(listx)\n', (530, 537), True, 'import numpy as np\n'), ((545, 560), 'numpy.array', 'np.array', (['listy'], {}), '(listy)\n', (553, 560), True, 'import numpy as np\n'), ((568, 583), 'numpy.array', 'np.array', (['listz'], {}), '(listz)\n', (576, 583), True, 'import numpy as np\n'), ((591, 606), 'numpy.array', 'np.array', (['listw'], {}), '(listw)\n', (599, 606), True, 'import numpy as np\n'), ((641, 680), 'matplotlib.pyplot.plot', 'plt.plot', (['listx', 'listy'], {'color': '"""skyblue"""'}), "(listx, listy, color='skyblue')\n", (649, 680), True, 'from matplotlib import pyplot as plt\n'), ((682, 719), 'matplotlib.pyplot.plot', 'plt.plot', (['listx', 'listz'], {'color': '"""black"""'}), "(listx, listz, color='black')\n", (690, 719), True, 'from matplotlib import pyplot as plt\n'), ((721, 756), 'matplotlib.pyplot.plot', 'plt.plot', (['listx', 'listw'], {'color': '"""red"""'}), "(listx, listw, color='red')\n", (729, 756), True, 'from matplotlib import pyplot as plt\n'), ((790, 802), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (800, 802), True, 'from matplotlib import pyplot as plt\n'), ((806, 816), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (814, 816), True, 'from matplotlib import pyplot as plt\n'), ((626, 639), 'math.log', 'math.log', (['phi'], {}), '(phi)\n', (634, 639), False, 'import math\n')] |
from moviepy.editor import *
from moviepy.audio.AudioClip import AudioArrayClip
import numpy as np
import math
# 15fps
frames = np.concatenate([np.ones([15, 84, 84, 3]), np.zeros([15, 84, 84, 3]), np.ones([15, 84, 84, 3])], axis = 0)
audioL = np.array([math.sin(2*math.pi*440*i/44100) for i in range(44100 * 3)])
audioR = np.array([math.sin(2*math.pi*660*i/44100) for i in range(44100 * 3)])
audios = np.stack([audioL, audioR], axis = 0)
print(frames.shape, audios.shape)
# Images
frames = [255 * frames[i] for i in range(frames.shape[0])]
#frames = 255 * frames
image_clip = ImageSequenceClip(frames, fps=15)
# Audios
#audios = (audios * 32768).astype(np.int16)
audios = np.transpose(audios)
print(audios.shape, audios.min(), audios.max())
audioclip = AudioArrayClip(audios, fps=44100)
#audioclip.write_audiofile('audio.mp3')
# Make video
video_clip = image_clip.set_audio(audioclip)
video_clip.write_videofile("result.mp4", fps=15, temp_audiofile="temp-audio.m4a", remove_temp=True, codec="libx264", audio_codec="aac")
| [
"numpy.ones",
"moviepy.audio.AudioClip.AudioArrayClip",
"math.sin",
"numpy.stack",
"numpy.zeros",
"numpy.transpose"
] | [((402, 436), 'numpy.stack', 'np.stack', (['[audioL, audioR]'], {'axis': '(0)'}), '([audioL, audioR], axis=0)\n', (410, 436), True, 'import numpy as np\n'), ((675, 695), 'numpy.transpose', 'np.transpose', (['audios'], {}), '(audios)\n', (687, 695), True, 'import numpy as np\n'), ((756, 789), 'moviepy.audio.AudioClip.AudioArrayClip', 'AudioArrayClip', (['audios'], {'fps': '(44100)'}), '(audios, fps=44100)\n', (770, 789), False, 'from moviepy.audio.AudioClip import AudioArrayClip\n'), ((145, 169), 'numpy.ones', 'np.ones', (['[15, 84, 84, 3]'], {}), '([15, 84, 84, 3])\n', (152, 169), True, 'import numpy as np\n'), ((171, 196), 'numpy.zeros', 'np.zeros', (['[15, 84, 84, 3]'], {}), '([15, 84, 84, 3])\n', (179, 196), True, 'import numpy as np\n'), ((198, 222), 'numpy.ones', 'np.ones', (['[15, 84, 84, 3]'], {}), '([15, 84, 84, 3])\n', (205, 222), True, 'import numpy as np\n'), ((254, 293), 'math.sin', 'math.sin', (['(2 * math.pi * 440 * i / 44100)'], {}), '(2 * math.pi * 440 * i / 44100)\n', (262, 293), False, 'import math\n'), ((333, 372), 'math.sin', 'math.sin', (['(2 * math.pi * 660 * i / 44100)'], {}), '(2 * math.pi * 660 * i / 44100)\n', (341, 372), False, 'import math\n')] |
#!/usr/bin/env python3
from os import mkdir, remove
from os.path import isdir
from datetime import datetime, timezone, timedelta
#import struct
import numpy as np
from LoLIM.utilities import processed_data_dir, v_air
from LoLIM.IO.raw_tbb_IO import filePaths_by_stationName, MultiFile_Dal1, read_station_delays, read_antenna_pol_flips, read_bad_antennas, read_antenna_delays
from LoLIM.IO.metadata import ITRF_to_geoditic
from LoLIM.findRFI import window_and_filter
from LoLIM.interferometry import read_interferometric_PSE as R_IPSE
from LoLIM.getTrace_fromLoc import getTrace_fromLoc
from LoLIM.signal_processing import remove_saturation, upsample_and_correlate, parabolic_fit, num_double_zeros, data_cut_inspan, locate_data_loss
from LoLIM.signal_processing import parabolic_fitter
from LMA_window_data import writer_manager, antenna_symbols
def window_data(TBB_data, filter, edge_length, start_sample_number, end_sample_number, amp_tresh, num_dataLoss_zeros, max_num_antennas, data_writer_manager,
inject_T_noise=None):
## note, the clock noise is one value per all pulses on station
if inject_T_noise is not None:
clock_noise = np.random.normal(scale=inject_T_noise)
else:
clock_noise = 0.0
ITRFantenna_locations = TBB_data.get_ITRF_antenna_positions()
antenna_names = TBB_data.get_antenna_names()
sname = TBB_data.get_station_name()
num_ants = len(ITRFantenna_locations)
posix_timestamp = TBB_data.get_timestamp()
num_station_antennas = len(ITRFantenna_locations)
num_ants = min(int(num_station_antennas/2),max_num_antennas )
antennas_to_use = np.arange( num_ants )*2
writers = []
for antI in antennas_to_use:
name = antenna_names[ antI ]
lat_lon_alt = ITRF_to_geoditic( ITRFantenna_locations[antI] )
writers.append( data_writer_manager.get_next_writer(name, lat_lon_alt, posix_timestamp ) )
antenna_start_times = TBB_data.get_time_from_second()
antenna_start_times += clock_noise
#### allocate memory ####
blocksize = filter.blocksize
data_loss_segments = [ [] ]*num_station_antennas ## note: length of all antennas in station, not just antennas to load
workspace = np.empty( blocksize, dtype=np.complex )
data_blocks = np.empty( (num_station_antennas,blocksize), dtype=np.double )## note: length of all antennas in station, not just antennas to load
current_samples = np.empty( num_station_antennas, dtype=np.int ) ## note: length of all antennas in station, not just antennas to load
#### initialize data
def load_data_block(ant_i, sample_number):
sample_number -= edge_length
TMP = TBB_data.get_data(sample_number, blocksize, antenna_index=ant_i )
data_loss_spans, DL = locate_data_loss(TMP, num_dataLoss_zeros)
workspace[:] = TMP
workspace[:] = filter.filter( workspace )
np.abs(workspace, out = data_blocks[ant_i,:])
data_loss_segments[ant_i] = data_loss_spans
current_samples[ant_i] = sample_number
return data_blocks[ant_i, edge_length:-edge_length], len(data_loss_spans)>0
for ant_i in antennas_to_use:
load_data_block( ant_i, start_sample_number )
print_width = 10 ## number of blocks
bin_width = int( (80e-6)/(5e-9) )
half_bin_width = int( bin_width/2 )
nominal_blocksize = blocksize - 2*edge_length - bin_width
number_blocks = ((end_sample_number-start_sample_number)/nominal_blocksize) + 1
blocks_total = 0
blocks_found = 0
last_pulse_10us_index = None
current_sample = start_sample_number
last_peak_time = None
while current_sample<end_sample_number:
blocks_total += 1
if not blocks_total % print_width:
print(sname, blocks_total, blocks_total/number_blocks )
for ant_i,writer in zip(antennas_to_use,writers):
data, has_dataloss = load_data_block(ant_i, current_sample)
if has_dataloss:
continue
local_start_time = current_sample*5.0E-9 + antenna_start_times[ant_i]
blocks_found += 1
start_i = half_bin_width
if (last_peak_time is not None) and ((local_start_time + start_i*5e-9) < last_peak_time ):
start_i = int( (last_peak_time-local_start_time)/5e-9 ) + 1
for i in range(half_bin_width, len(data)-half_bin_width):
if data[i] > amp_tresh:
window = data[i-half_bin_width:i+half_bin_width]
AM = np.argmax( window )
if AM == half_bin_width :
peak_time = local_start_time + i*5.0E-9
pulse_10us_index = int( (peak_time-int(peak_time))/(10e-6) )
if (last_pulse_10us_index is None or pulse_10us_index != last_pulse_10us_index) and (last_peak_time is None or (peak_time-last_peak_time)>(40e-6) ) :
if (last_peak_time is not None) and peak_time<last_peak_time:
print("<NAME>!")
quit()
writer.write_pulse( peak_time )
last_peak_time = peak_time
last_pulse_10us_index = pulse_10us_index
break
current_sample += nominal_blocksize
for w in writers:
w.finalize()
return blocks_found/blocks_total
| [
"numpy.random.normal",
"numpy.abs",
"LoLIM.IO.metadata.ITRF_to_geoditic",
"numpy.argmax",
"LoLIM.signal_processing.locate_data_loss",
"numpy.empty",
"numpy.arange"
] | [((2264, 2301), 'numpy.empty', 'np.empty', (['blocksize'], {'dtype': 'np.complex'}), '(blocksize, dtype=np.complex)\n', (2272, 2301), True, 'import numpy as np\n'), ((2322, 2382), 'numpy.empty', 'np.empty', (['(num_station_antennas, blocksize)'], {'dtype': 'np.double'}), '((num_station_antennas, blocksize), dtype=np.double)\n', (2330, 2382), True, 'import numpy as np\n'), ((2475, 2519), 'numpy.empty', 'np.empty', (['num_station_antennas'], {'dtype': 'np.int'}), '(num_station_antennas, dtype=np.int)\n', (2483, 2519), True, 'import numpy as np\n'), ((1177, 1215), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'inject_T_noise'}), '(scale=inject_T_noise)\n', (1193, 1215), True, 'import numpy as np\n'), ((1652, 1671), 'numpy.arange', 'np.arange', (['num_ants'], {}), '(num_ants)\n', (1661, 1671), True, 'import numpy as np\n'), ((1790, 1835), 'LoLIM.IO.metadata.ITRF_to_geoditic', 'ITRF_to_geoditic', (['ITRFantenna_locations[antI]'], {}), '(ITRFantenna_locations[antI])\n', (1806, 1835), False, 'from LoLIM.IO.metadata import ITRF_to_geoditic\n'), ((2825, 2866), 'LoLIM.signal_processing.locate_data_loss', 'locate_data_loss', (['TMP', 'num_dataLoss_zeros'], {}), '(TMP, num_dataLoss_zeros)\n', (2841, 2866), False, 'from LoLIM.signal_processing import remove_saturation, upsample_and_correlate, parabolic_fit, num_double_zeros, data_cut_inspan, locate_data_loss\n'), ((2952, 2996), 'numpy.abs', 'np.abs', (['workspace'], {'out': 'data_blocks[ant_i, :]'}), '(workspace, out=data_blocks[ant_i, :])\n', (2958, 2996), True, 'import numpy as np\n'), ((4732, 4749), 'numpy.argmax', 'np.argmax', (['window'], {}), '(window)\n', (4741, 4749), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# run in py3 !!
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1";
import tensorflow as tf
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction=0.5
config.gpu_options.allow_growth = True
tf.Session(config=config)
import numpy as np
from sklearn import preprocessing
import tensorflow as tf
import time
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import pandas as pd
from keras import backend as K
import keras.layers.convolutional as conv
from keras.layers import merge
from keras.wrappers.scikit_learn import KerasRegressor
from keras import utils
from keras.layers.pooling import MaxPooling1D, MaxPooling2D
from keras.layers import pooling
from keras.models import Sequential, Model
from keras.regularizers import l1, l2
from keras import layers
from keras.layers import Dense, Dropout, Activation, Flatten, Input, Convolution1D, Convolution2D, LSTM
from keras.optimizers import SGD, RMSprop
from keras.layers.normalization import BatchNormalization
from keras import initializers
from keras.callbacks import EarlyStopping
from keras import callbacks
from keras import backend as K
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping, ModelCheckpoint, Callback
from keras.models import Model
from keras import initializers, layers
from keras.optimizers import SGD, Adadelta, Adam
from keras.regularizers import l1, l2
from keras import regularizers
import sys
sys.path.append('.')
from hist_figure import his_figures
if len(sys.argv) > 1:
prefix = sys.argv[1]
else:
prefix = time.time()
DATAPATH = '5fold/'
RESULT_PATH = './results/'
feature_num = 25
batch_num = 2
# batch_size = 32
batch_size = 512
SEQ_LENGTH = 20
STATEFUL = False
scaler = None # tmp, for fit_transform
# id,usage,date,com_date,week,month,year
# com_date,date,id,month,usage,week,year
def get_data(path_to_dataset='df_dh.csv', sequence_length=20, stateful=False, issplit=True):
fold_index = 1
###
dtypes = {'sub': 'float', 'super': 'float', 'error': 'float', 'com_date': 'int', 'week': 'str', 'month': 'str',
'year': 'str', 'numbers': 'int', 'log': 'float', 'id': 'str', 'usage': 'float'}
parse_dates = ['date']
print(path_to_dataset)
df = pd.read_csv(DATAPATH + path_to_dataset, header=0, dtype=dtypes, parse_dates=parse_dates, encoding="utf-8")
# print(path_to_dataset)
print(df.columns)
df = df[df['error'] >= 0]
# df_test = pd.read_csv(DATAPATH+"test"+str(fold_index)+".csv", header = 0, dtype=dtypes, parse_dates=parse_dates,encoding="utf-8")
def helper(x):
split = list(map(int, x.strip('[').strip(']').split(',')))
d = {}
for counter, value in enumerate(split):
k = str(len(split)) + "-" + str(counter)
d[k] = value
return d
# df_train_temp = df_train['week'].apply(helper).apply(pd.Series)
df_week = df['week'].apply(helper).apply(pd.Series).as_matrix() # 7
df_month = df['month'].apply(helper).apply(pd.Series).as_matrix() # 12
df_year = df['year'].apply(helper).apply(pd.Series).as_matrix() # 3
df_empty = df[['super', 'com_date', 'error', 'numbers']].copy()
# print(df_empty)
df_super = df_empty.ix[:, [0]]
df_com_date = df_empty.ix[:, [1]]
df_error = df_empty.ix[:, [2]]
df_numbers = df_empty.ix[:, [3]]
X_train_ = np.column_stack((df_super, df_com_date, df_numbers, df_week, df_month))
Y_train_ = df_error.as_matrix()
ss_x = preprocessing.MaxAbsScaler()
ss_y = preprocessing.MaxAbsScaler()
global scaler
scaler = ss_y
# ss_x = preprocessing.StandardScaler()
array_new = ss_x.fit_transform(df_empty.ix[:, [0]])
df_super = pd.DataFrame(array_new)
array_new = ss_x.fit_transform(df_empty.ix[:, [1]])
df_com_date = pd.DataFrame(array_new)
array_new = ss_x.fit_transform(df_empty.ix[:, [3]])
df_numbers = pd.DataFrame(array_new)
array_new = ss_y.fit_transform(df_empty.ix[:, [2]])
df_error = pd.DataFrame(array_new)
df_week = ss_x.fit_transform(df_week)
df_week = pd.DataFrame(df_week)
df_month = ss_x.fit_transform(df_month)
df_month = pd.DataFrame(df_month)
X_train = np.column_stack((df_super, df_com_date, df_numbers, df_week, df_month))
Y_train = df_error.as_matrix()
print('Xshape:' + str(X_train.shape))
print('Yshape:' + str(Y_train.shape))
y_arr = Y_train.T.tolist()
# print(y_arr)
try:
y_arr = ss_y.inverse_transform(y_arr)
#draw_error_line(y_arr[0], df)
#draw_error_bar(y_arr[0])
except Exception as e:
print(e)
if not issplit:
print('Xshape:' + str(X_train.shape))
print('Yshape:' + str(Y_train.shape))
X_train, X_test, Y_train, Y_test = train_test_split(X_train_, Y_train_, test_size=0.1, shuffle=False)
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.1, shuffle=False)
return X_train, Y_train, X_test, Y_test, X_val, Y_val
else:
return split_CV(X_train, Y_train, sequence_length=sequence_length, stateful=False)
import datetime
def get_data_single_user(path_to_dataset='df_dh.csv', sequence_length=20, stateful=False, issplit=True):
fold_index = 1
###
dtypes = {'sub': 'float', 'super': 'float', 'error': 'float', 'com_date': 'int', 'week': 'str', 'month': 'str',
'year': 'str', 'numbers': 'int', 'log': 'float', 'id': 'str', 'usage': 'float'}
parse_dates = ['date']
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$' + path_to_dataset)
df = pd.read_csv(DATAPATH + path_to_dataset, header=0, dtype=dtypes, parse_dates=parse_dates, encoding="utf-8")
# print(path_to_dataset)
print(df.columns)
df = df[df['usage'] >= 0]
# df_test = pd.read_csv(DATAPATH+"test"+str(fold_index)+".csv", header = 0, dtype=dtypes, parse_dates=parse_dates,encoding="utf-8")
def helper(x):
split = list(map(int, x.strip('[').strip(']').split(',')))
d = {}
for counter, value in enumerate(split):
k = str(len(split)) + "-" + str(counter)
d[k] = value
return d
# df_train_temp = df_train['week'].apply(helper).apply(pd.Series)
df_week = df['week'].apply(helper).apply(pd.Series).as_matrix() # 7
df_month = df['month'].apply(helper).apply(pd.Series).as_matrix() # 12
df_year = df['year'].apply(helper).apply(pd.Series).as_matrix() # 3
df_empty = df[['com_date', 'usage']].copy()
# print(df_empty)
df_com_date = df_empty.ix[:, [0]]
df_usage = df_empty.ix[:, [1]]
ss_x = preprocessing.MaxAbsScaler()
ss_y = preprocessing.MaxAbsScaler()
global scaler
scaler = ss_y
# ss_x = preprocessing.StandardScaler()
array_new = ss_x.fit_transform(df_empty.ix[:, [0]])
df_com_date = pd.DataFrame(array_new)
array_new = ss_y.fit_transform(df_empty.ix[:, [1]])
df_usage = pd.DataFrame(array_new)
df_week = ss_x.fit_transform(df_week)
df_week = pd.DataFrame(df_week)
df_month = ss_x.fit_transform(df_month)
df_month = pd.DataFrame(df_month)
X_train = np.column_stack((df_week, df_month))
Y_train = df_usage.as_matrix()
print(X_train)
print(Y_train.shape)
y_arr = Y_train.T.tolist()
# print(y_arr)
print(df)
y_arr = ss_y.inverse_transform(y_arr)
draw_error_line(y_arr[0], df)
draw_error_bar(y_arr[0])
# try:
#
# except Exception as e:
# print(e)
if not issplit:
return X_train, Y_train
else:
return split_CV(X_train, Y_train, sequence_length=sequence_length, stateful=False)
def inverse_xy_transform(scaler, *para):
temp = []
for i in para:
print(i.reshape(-1, 1))
temp.append(scaler.inverse_transform(i.reshape(-1, 1)))
return temp
def split_CV(X_train, Y_train, sequence_length=20, stateful=False):
"""return ndarray
"""
print(X_train)
print(Y_train.shape[0])
result_x = []
result_y = []
for index in range(len(Y_train) - sequence_length):
result_x.append(X_train[index: index + sequence_length])
# result_y.append(Y_train[index: index + sequence_length])
result_y.append(Y_train[index + sequence_length])
X_train = np.array(result_x)
Y_train = np.array(result_y)
print(X_train.shape) # (705, 20, 24)
print(Y_train.shape) # (705, 1)
print('##################################################################')
if stateful == True:
# X_train, X_test, Y_train, Y_test = train_test_split(X_train, Y_train, test_size=0.1,shuffle=False)
cp_X_train = X_train.copy()
cp_Y_train = Y_train.copy()
X_train = cp_X_train[:640, ...]
X_test = cp_X_train[640:, ...]
Y_train = cp_Y_train[:640, ...]
Y_test = cp_Y_train[640:, ...]
print(X_test.shape[0]) #
print(Y_test.shape[0]) #
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.1, shuffle=False)
print('##################################################################')
if stateful == False:
X_train, X_test, Y_train, Y_test = train_test_split(X_train, Y_train, test_size=0.1, shuffle=False)
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.1, shuffle=False)
# print(X_train.shape)#(705, 20, 24)
# print(Y_train.shape)#(705, 1)
# train_x_disorder = X_train.reshape((X_train.shape[0],X_train.shape[1] , feature_num))
# test_x_disorder = X_test.reshape((X_test.shape[0],X_test.shape[1], feature_num ))
# X_val = X_val.reshape((X_val.shape[0], X_val.shape[1] , feature_num))
# print(train_x_disorder.dtype)
train_y_disorder = Y_train.reshape(-1, 1)
test_y_disorder = Y_test.reshape(-1, 1)
Y_val = Y_val.reshape(-1, 1)
print(X_train.shape[0]) # (705, 20, 24)
print(Y_train.shape[0]) # (705, 1)
print('@' * 40)
# print(X_test)
print(train_y_disorder.shape)
print('@' * 40)
return [X_train, train_y_disorder, X_test, test_y_disorder, X_val, Y_val] # ndarray
def LSTM2(X_train):
model = Sequential()
# layers = [1, 50, 100, 1]
layers = [1, 30, 30, 1]
if STATEFUL == False:
model.add(LSTM(
layers[1],
input_shape=(X_train.shape[1], X_train.shape[2]),
stateful=STATEFUL,
return_sequences=True,
kernel_initializer='he_normal'
# , kernel_regularizer=l2(0.01)
))
else:
model.add(LSTM(
layers[1],
# input_shape=(X_train.shape[1], X_train.shape[2]),
batch_input_shape=(batch_size, X_train.shape[1], X_train.shape[2]),
stateful=STATEFUL,
return_sequences=True,
kernel_initializer='he_normal'
# , kernel_regularizer=l2(0.01)
))
# model.add(Dropout(0.2))
model.add(LSTM(
layers[2],
stateful=STATEFUL,
return_sequences=False,
kernel_initializer='he_normal'
# ,kernel_regularizer=l2(0.01)
))
model.add(Dropout(0.2))
# model.add(Flatten())
model.add(Dense(
layers[3]
, kernel_initializer='he_normal'
, kernel_regularizer=l2(0.01)
, activity_regularizer=l1(0.01)
))
model.add(BatchNormalization())
model.add(Activation("linear"))
start = time.time()
sgd = SGD(lr=1e-3, decay=1e-8, momentum=0.9, nesterov=True)
ada = Adadelta(lr=1e-4, rho=0.95, epsilon=1e-6)
rms = RMSprop(lr=0.001, rho=0.9, epsilon=1e-6, decay=1e-8)
adam = Adam(lr=1e-3)
# model.compile(loss="mse", optimizer=sgd)
# try:
# model.load_weights("./lstm.h5")
# except Exception as ke:
# print(str(ke))
model.compile(loss="mse", optimizer=adam)
print("Compilation Time : ", time.time() - start)
return model
def draw_error_bar(y_array):
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
x = list(range(len(y_array)))
plt.bar(x, y_array, label='error')
# plt.legend(handles=[line1, line2,line3])
plt.legend()
plt.title('error bar')
# plt.show()
axes.grid()
fig.tight_layout()
fig.savefig(RESULT_PATH + str(batch_size) + 'bar_error.png', dpi=300)
def draw_error_line(y_array, df):
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
x = list(range(len(y_array)))
plt.plot(x, y_array, label='error')
x = list(range(len(df['error'])))
plt.plot(x, df['error'], label='error')
# plt.legend(handles=[line1, line2,line3])
plt.legend()
plt.title('error plot')
# plt.show()
axes.grid()
fig.tight_layout()
fig.savefig(RESULT_PATH + str(batch_size) + 'line_error.png', dpi=300)
def draw_scatter(predicted, y_test, X_test, x_train, y_train, data_file):
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
x = list(range(len(predicted)))
total_width, n = 0.8, 2
width = total_width / n
plt.bar(x, y_test.T[0], width=width, label='truth', fc='y')
for i in range(len(x)):
x[i] = x[i] + width
plt.bar(x, predicted, width=width, label='predict', fc='r')
# plt.legend(handles=[line1, line2,line3])
plt.legend()
plt.title('lstm')
# plt.show()
axes.grid()
fig.tight_layout()
fig.savefig(RESULT_PATH + str(batch_size) + data_file + str(prefix) + 'bar_lstm.png', dpi=300)
fig = plt.figure()
plt.scatter(y_test.T[0], predicted)
# plt.plot(y_test.T[0], predicted, linewidth =0.3, color='red')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xlabel('truth')
plt.ylabel('predict')
# plt.show()
fig.savefig(RESULT_PATH + str(batch_size) + data_file + str(prefix) + '_scatter_lstm.png',
dpi=300)
def draw_line(predicted, y_test, X_test, x_train, y_train, data_file):
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
x = list(range(len(predicted)))
total_width, n = 0.8, 2
width = total_width / n
plt.bar(x, y_test.T[0], width=width, label='True', fc='y')
for i in range(len(x)):
x[i] = x[i] + width
plt.bar(x, predicted, width=width, label='Predicted', fc='r')
# plt.legend(handles=[line1, line2,line3])
plt.legend()
plt.title('lstm')
# plt.show()
axes.grid()
axes = fig.add_subplot(1, 1, 1)
fig.tight_layout()
fig.savefig(RESULT_PATH + str(batch_size) + data_file + str(prefix) + 'bar_lstm.png', dpi=300)
fig = plt.figure()
plt.scatter(y_test.T[0], predicted)
# plt.plot(y_test.T[0], predicted, linewidth =0.3, color='red')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xlabel('True')
plt.ylabel('Predicted')
# plt.show()
fig.savefig(RESULT_PATH + str(batch_size) + data_file + str(prefix) + '_scatter_lstm.png',
dpi=300)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
plt.plot(x, y_test.T[0], label='True')
for i in range(len(x)):
x[i] = x[i] + width
plt.plot(x, predicted, label='Predicted')
plt.legend()
axes.grid()
fig.tight_layout()
fig.savefig(RESULT_PATH + str(batch_size) + data_file + str(prefix) + 'line_lstm.png', dpi=300)
def stat_metrics(X_test, y_test, predicted):
predicted = np.reshape(predicted, y_test.shape[0])
train_error = np.abs(y_test - predicted)
mean_error = np.mean(train_error)
min_error = np.min(train_error)
max_error = np.max(train_error)
std_error = np.std(train_error)
print(predicted)
print(y_test.T[0])
print(np.mean(X_test))
print("#" * 20)
print(mean_error)
print(std_error)
print(max_error)
print(min_error)
print("#" * 20)
print(X_test[:, 1])
# 0.165861394194
# ####################
# 0.238853857898
# 0.177678269353
# 0.915951014937
# 5.2530646691e-0
pass
def run_regressor(model=LSTM2, sequence_length = SEQ_LENGTH, data=None, data_file='df_dh.csv', isload_model=True, testonly=False):
epochs = 1000
path_to_dataset = data_file
global mses
if data is None:
X_train, y_train, X_test, y_test, X_val, Y_val = get_data(sequence_length=sequence_length, stateful=STATEFUL,
path_to_dataset=data_file)
else:
X_train, y_train, X_test, y_test, X_val, Y_val = data
if STATEFUL:
X_test = X_test[:int(X_test.shape[0] / batch_size) * batch_size]
y_test = y_test[:int(y_test.shape[0] / batch_size) * batch_size]
estimator = KerasRegressor(build_fn=lambda x=X_train: model(x))
# if testonly == True:
# # predicted = model.predict(X_test, verbose=1,batch_size=batch_size)
# prediction = estimator.predict(X_test)
# stat_metrics(X_test, y_test, prediction)
# draw_scatter(predicted_arr[0], y_test, X_test, X_train, y_train, data_file)
# return
early_stopping = EarlyStopping(monitor='val_loss', verbose=1, patience=40)
checkpoint = ModelCheckpoint("./lstm.h5", monitor='val_loss', verbose=1, save_best_only=True,
save_weights_only=True)
################
hist = estimator.fit(X_train, y_train, validation_data=(X_val, Y_val), callbacks=[checkpoint, early_stopping],
epochs=epochs, batch_size=batch_size, verbose=1)
# prediction = estimator.predict(X_test)
score = mean_squared_error(y_test, estimator.predict(X_test))
estimator_score = estimator.score(X_test, y_test)
print(score)
mses.append(score)
prediction = estimator.predict(X_test)
print(prediction)
print(X_test)
print("##############################################")
# predicted_arr = prediction.T.tolist()
# print(predicted_arr)
global scaler
prediction_, y_test_, y_train_ = inverse_xy_transform(scaler, prediction, y_test, y_train)
predicted_df = pd.DataFrame(prediction_)
y_test_df = pd.DataFrame(y_test_)
# X_test_df = pd.DataFrame(X_test) #columns
predicted_df.to_csv(DATAPATH + str(prefix) + data_file + str(batch_size) + str(sequence_length) + "predicted_df.csv")
y_test_df.to_csv(DATAPATH + str(prefix) + data_file + str(batch_size) + str(sequence_length) + "y_test_df.csv")
# X_test_df.to_csv(DATAPATH+data_file+"X_test_df.csv")
draw_scatter(prediction, y_test, X_test, X_train, y_train, data_file)
his_figures(hist)
draw_line(prediction, y_test, X_test, X_train, y_train, data_file)
return predicted_df, y_test_df
if __name__ == '__main__':
# get_data_single_user()
x = range(5, 121, 5)
total_mses =[]
for i in range(1,11):
mses = []
for length in x:
X_train, y_train, X_test, y_test, X_val, Y_val = get_data(sequence_length=length, stateful=STATEFUL)
run_regressor(sequence_length = length,data=[X_train, y_train, X_test, y_test, X_val, Y_val],
data_file='df_dh.csv', isload_model=True)
total_mses.append(mses)
print(total_mses)
np.save(RESULT_PATH + str(prefix) + 'mses.npy', np.asarray(total_mses))
'''
# stock_predict tf
# https://github.com/LouisScorpio/datamining/blob/master/tensorflow-program/rnn/stock_predict/stock_predict_2.py
# boston tf
# https://blog.csdn.net/baixiaozhe/article/details/54410313
########### consume predict keras
# http://www.cnblogs.com/arkenstone/p/5794063.html
# bike number predict keras
# http://resuly.me/2017/08/16/keras-rnn-tutorial/#%E4%BB%BB%E5%8A%A1%E6%8F%8F%E8%BF%B0
# Multivariate Time Series Forecasting with LSTMs in Keras
# https://zhuanlan.zhihu.com/p/28746221
'''
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"hist_figure.his_figures",
"numpy.column_stack",
"numpy.array",
"keras.optimizers.SGD",
"keras.layers.Activation",
"sys.path.append",
"keras.optimizers.Adadelta",
"numpy.mean",
"numpy.reshape",
"tensorflow.Session",
"matplotlib.pyplot.plot",
"... | [((128, 144), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (142, 144), True, 'import tensorflow as tf\n'), ((241, 266), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (251, 266), True, 'import tensorflow as tf\n'), ((383, 397), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (390, 397), True, 'import matplotlib as mpl\n'), ((1620, 1640), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (1635, 1640), False, 'import sys\n'), ((1744, 1755), 'time.time', 'time.time', ([], {}), '()\n', (1753, 1755), False, 'import time\n'), ((2423, 2534), 'pandas.read_csv', 'pd.read_csv', (['(DATAPATH + path_to_dataset)'], {'header': '(0)', 'dtype': 'dtypes', 'parse_dates': 'parse_dates', 'encoding': '"""utf-8"""'}), "(DATAPATH + path_to_dataset, header=0, dtype=dtypes, parse_dates\n =parse_dates, encoding='utf-8')\n", (2434, 2534), True, 'import pandas as pd\n'), ((3538, 3609), 'numpy.column_stack', 'np.column_stack', (['(df_super, df_com_date, df_numbers, df_week, df_month)'], {}), '((df_super, df_com_date, df_numbers, df_week, df_month))\n', (3553, 3609), True, 'import numpy as np\n'), ((3658, 3686), 'sklearn.preprocessing.MaxAbsScaler', 'preprocessing.MaxAbsScaler', ([], {}), '()\n', (3684, 3686), False, 'from sklearn import preprocessing\n'), ((3698, 3726), 'sklearn.preprocessing.MaxAbsScaler', 'preprocessing.MaxAbsScaler', ([], {}), '()\n', (3724, 3726), False, 'from sklearn import preprocessing\n'), ((3878, 3901), 'pandas.DataFrame', 'pd.DataFrame', (['array_new'], {}), '(array_new)\n', (3890, 3901), True, 'import pandas as pd\n'), ((3977, 4000), 'pandas.DataFrame', 'pd.DataFrame', (['array_new'], {}), '(array_new)\n', (3989, 4000), True, 'import pandas as pd\n'), ((4075, 4098), 'pandas.DataFrame', 'pd.DataFrame', (['array_new'], {}), '(array_new)\n', (4087, 4098), True, 'import pandas as pd\n'), ((4171, 4194), 'pandas.DataFrame', 'pd.DataFrame', (['array_new'], {}), '(array_new)\n', (4183, 4194), True, 'import pandas as pd\n'), ((4252, 4273), 'pandas.DataFrame', 'pd.DataFrame', (['df_week'], {}), '(df_week)\n', (4264, 4273), True, 'import pandas as pd\n'), ((4334, 4356), 'pandas.DataFrame', 'pd.DataFrame', (['df_month'], {}), '(df_month)\n', (4346, 4356), True, 'import pandas as pd\n'), ((4372, 4443), 'numpy.column_stack', 'np.column_stack', (['(df_super, df_com_date, df_numbers, df_week, df_month)'], {}), '((df_super, df_com_date, df_numbers, df_week, df_month))\n', (4387, 4443), True, 'import numpy as np\n'), ((5763, 5874), 'pandas.read_csv', 'pd.read_csv', (['(DATAPATH + path_to_dataset)'], {'header': '(0)', 'dtype': 'dtypes', 'parse_dates': 'parse_dates', 'encoding': '"""utf-8"""'}), "(DATAPATH + path_to_dataset, header=0, dtype=dtypes, parse_dates\n =parse_dates, encoding='utf-8')\n", (5774, 5874), True, 'import pandas as pd\n'), ((6783, 6811), 'sklearn.preprocessing.MaxAbsScaler', 'preprocessing.MaxAbsScaler', ([], {}), '()\n', (6809, 6811), False, 'from sklearn import preprocessing\n'), ((6823, 6851), 'sklearn.preprocessing.MaxAbsScaler', 'preprocessing.MaxAbsScaler', ([], {}), '()\n', (6849, 6851), False, 'from sklearn import preprocessing\n'), ((7006, 7029), 'pandas.DataFrame', 'pd.DataFrame', (['array_new'], {}), '(array_new)\n', (7018, 7029), True, 'import pandas as pd\n'), ((7102, 7125), 'pandas.DataFrame', 'pd.DataFrame', (['array_new'], {}), '(array_new)\n', (7114, 7125), True, 'import pandas as pd\n'), ((7183, 7204), 'pandas.DataFrame', 'pd.DataFrame', (['df_week'], {}), '(df_week)\n', (7195, 7204), True, 'import pandas as pd\n'), ((7265, 7287), 'pandas.DataFrame', 'pd.DataFrame', (['df_month'], {}), '(df_month)\n', (7277, 7287), True, 'import pandas as pd\n'), ((7303, 7339), 'numpy.column_stack', 'np.column_stack', (['(df_week, df_month)'], {}), '((df_week, df_month))\n', (7318, 7339), True, 'import numpy as np\n'), ((8437, 8455), 'numpy.array', 'np.array', (['result_x'], {}), '(result_x)\n', (8445, 8455), True, 'import numpy as np\n'), ((8470, 8488), 'numpy.array', 'np.array', (['result_y'], {}), '(result_y)\n', (8478, 8488), True, 'import numpy as np\n'), ((10307, 10319), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (10317, 10319), False, 'from keras.models import Sequential, Model\n'), ((11562, 11573), 'time.time', 'time.time', ([], {}), '()\n', (11571, 11573), False, 'import time\n'), ((11584, 11639), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.001)', 'decay': '(1e-08)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.001, decay=1e-08, momentum=0.9, nesterov=True)\n', (11587, 11639), False, 'from keras.optimizers import SGD, Adadelta, Adam\n'), ((11648, 11692), 'keras.optimizers.Adadelta', 'Adadelta', ([], {'lr': '(0.0001)', 'rho': '(0.95)', 'epsilon': '(1e-06)'}), '(lr=0.0001, rho=0.95, epsilon=1e-06)\n', (11656, 11692), False, 'from keras.optimizers import SGD, Adadelta, Adam\n'), ((11700, 11754), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': '(0.001)', 'rho': '(0.9)', 'epsilon': '(1e-06)', 'decay': '(1e-08)'}), '(lr=0.001, rho=0.9, epsilon=1e-06, decay=1e-08)\n', (11707, 11754), False, 'from keras.optimizers import SGD, RMSprop\n'), ((11764, 11778), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (11768, 11778), False, 'from keras.optimizers import SGD, Adadelta, Adam\n'), ((12092, 12104), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12102, 12104), True, 'import matplotlib.pyplot as plt\n'), ((12179, 12213), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'y_array'], {'label': '"""error"""'}), "(x, y_array, label='error')\n", (12186, 12213), True, 'import matplotlib.pyplot as plt\n'), ((12265, 12277), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12275, 12277), True, 'import matplotlib.pyplot as plt\n'), ((12282, 12304), 'matplotlib.pyplot.title', 'plt.title', (['"""error bar"""'], {}), "('error bar')\n", (12291, 12304), True, 'import matplotlib.pyplot as plt\n'), ((12483, 12495), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12493, 12495), True, 'import matplotlib.pyplot as plt\n'), ((12570, 12605), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_array'], {'label': '"""error"""'}), "(x, y_array, label='error')\n", (12578, 12605), True, 'import matplotlib.pyplot as plt\n'), ((12648, 12687), 'matplotlib.pyplot.plot', 'plt.plot', (['x', "df['error']"], {'label': '"""error"""'}), "(x, df['error'], label='error')\n", (12656, 12687), True, 'import matplotlib.pyplot as plt\n'), ((12739, 12751), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12749, 12751), True, 'import matplotlib.pyplot as plt\n'), ((12756, 12779), 'matplotlib.pyplot.title', 'plt.title', (['"""error plot"""'], {}), "('error plot')\n", (12765, 12779), True, 'import matplotlib.pyplot as plt\n'), ((12999, 13011), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13009, 13011), True, 'import matplotlib.pyplot as plt\n'), ((13145, 13204), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'y_test.T[0]'], {'width': 'width', 'label': '"""truth"""', 'fc': '"""y"""'}), "(x, y_test.T[0], width=width, label='truth', fc='y')\n", (13152, 13204), True, 'import matplotlib.pyplot as plt\n'), ((13265, 13324), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'predicted'], {'width': 'width', 'label': '"""predict"""', 'fc': '"""r"""'}), "(x, predicted, width=width, label='predict', fc='r')\n", (13272, 13324), True, 'import matplotlib.pyplot as plt\n'), ((13377, 13389), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13387, 13389), True, 'import matplotlib.pyplot as plt\n'), ((13394, 13411), 'matplotlib.pyplot.title', 'plt.title', (['"""lstm"""'], {}), "('lstm')\n", (13403, 13411), True, 'import matplotlib.pyplot as plt\n'), ((13580, 13592), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13590, 13592), True, 'import matplotlib.pyplot as plt\n'), ((13597, 13632), 'matplotlib.pyplot.scatter', 'plt.scatter', (['y_test.T[0]', 'predicted'], {}), '(y_test.T[0], predicted)\n', (13608, 13632), True, 'import matplotlib.pyplot as plt\n'), ((13705, 13719), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (13713, 13719), True, 'import matplotlib.pyplot as plt\n'), ((13724, 13738), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (13732, 13738), True, 'import matplotlib.pyplot as plt\n'), ((13743, 13762), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""truth"""'], {}), "('truth')\n", (13753, 13762), True, 'import matplotlib.pyplot as plt\n'), ((13767, 13788), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""predict"""'], {}), "('predict')\n", (13777, 13788), True, 'import matplotlib.pyplot as plt\n'), ((14010, 14022), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14020, 14022), True, 'import matplotlib.pyplot as plt\n'), ((14156, 14214), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'y_test.T[0]'], {'width': 'width', 'label': '"""True"""', 'fc': '"""y"""'}), "(x, y_test.T[0], width=width, label='True', fc='y')\n", (14163, 14214), True, 'import matplotlib.pyplot as plt\n'), ((14275, 14336), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'predicted'], {'width': 'width', 'label': '"""Predicted"""', 'fc': '"""r"""'}), "(x, predicted, width=width, label='Predicted', fc='r')\n", (14282, 14336), True, 'import matplotlib.pyplot as plt\n'), ((14389, 14401), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (14399, 14401), True, 'import matplotlib.pyplot as plt\n'), ((14406, 14423), 'matplotlib.pyplot.title', 'plt.title', (['"""lstm"""'], {}), "('lstm')\n", (14415, 14423), True, 'import matplotlib.pyplot as plt\n'), ((14628, 14640), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14638, 14640), True, 'import matplotlib.pyplot as plt\n'), ((14645, 14680), 'matplotlib.pyplot.scatter', 'plt.scatter', (['y_test.T[0]', 'predicted'], {}), '(y_test.T[0], predicted)\n', (14656, 14680), True, 'import matplotlib.pyplot as plt\n'), ((14753, 14767), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (14761, 14767), True, 'import matplotlib.pyplot as plt\n'), ((14772, 14786), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (14780, 14786), True, 'import matplotlib.pyplot as plt\n'), ((14791, 14809), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""True"""'], {}), "('True')\n", (14801, 14809), True, 'import matplotlib.pyplot as plt\n'), ((14814, 14837), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Predicted"""'], {}), "('Predicted')\n", (14824, 14837), True, 'import matplotlib.pyplot as plt\n'), ((14987, 14999), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14997, 14999), True, 'import matplotlib.pyplot as plt\n'), ((15040, 15078), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_test.T[0]'], {'label': '"""True"""'}), "(x, y_test.T[0], label='True')\n", (15048, 15078), True, 'import matplotlib.pyplot as plt\n'), ((15139, 15180), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'predicted'], {'label': '"""Predicted"""'}), "(x, predicted, label='Predicted')\n", (15147, 15180), True, 'import matplotlib.pyplot as plt\n'), ((15185, 15197), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (15195, 15197), True, 'import matplotlib.pyplot as plt\n'), ((15402, 15440), 'numpy.reshape', 'np.reshape', (['predicted', 'y_test.shape[0]'], {}), '(predicted, y_test.shape[0])\n', (15412, 15440), True, 'import numpy as np\n'), ((15459, 15485), 'numpy.abs', 'np.abs', (['(y_test - predicted)'], {}), '(y_test - predicted)\n', (15465, 15485), True, 'import numpy as np\n'), ((15503, 15523), 'numpy.mean', 'np.mean', (['train_error'], {}), '(train_error)\n', (15510, 15523), True, 'import numpy as np\n'), ((15540, 15559), 'numpy.min', 'np.min', (['train_error'], {}), '(train_error)\n', (15546, 15559), True, 'import numpy as np\n'), ((15576, 15595), 'numpy.max', 'np.max', (['train_error'], {}), '(train_error)\n', (15582, 15595), True, 'import numpy as np\n'), ((15612, 15631), 'numpy.std', 'np.std', (['train_error'], {}), '(train_error)\n', (15618, 15631), True, 'import numpy as np\n'), ((17070, 17127), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'patience': '(40)'}), "(monitor='val_loss', verbose=1, patience=40)\n", (17083, 17127), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, Callback\n'), ((17145, 17254), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""./lstm.h5"""'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'save_weights_only': '(True)'}), "('./lstm.h5', monitor='val_loss', verbose=1, save_best_only=\n True, save_weights_only=True)\n", (17160, 17254), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, Callback\n'), ((18047, 18072), 'pandas.DataFrame', 'pd.DataFrame', (['prediction_'], {}), '(prediction_)\n', (18059, 18072), True, 'import pandas as pd\n'), ((18089, 18110), 'pandas.DataFrame', 'pd.DataFrame', (['y_test_'], {}), '(y_test_)\n', (18101, 18110), True, 'import pandas as pd\n'), ((18534, 18551), 'hist_figure.his_figures', 'his_figures', (['hist'], {}), '(hist)\n', (18545, 18551), False, 'from hist_figure import his_figures\n'), ((4941, 5007), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train_', 'Y_train_'], {'test_size': '(0.1)', 'shuffle': '(False)'}), '(X_train_, Y_train_, test_size=0.1, shuffle=False)\n', (4957, 5007), False, 'from sklearn.model_selection import train_test_split\n'), ((5049, 5113), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'Y_train'], {'test_size': '(0.1)', 'shuffle': '(False)'}), '(X_train, Y_train, test_size=0.1, shuffle=False)\n', (5065, 5113), False, 'from sklearn.model_selection import train_test_split\n'), ((9123, 9187), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'Y_train'], {'test_size': '(0.1)', 'shuffle': '(False)'}), '(X_train, Y_train, test_size=0.1, shuffle=False)\n', (9139, 9187), False, 'from sklearn.model_selection import train_test_split\n'), ((9341, 9405), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'Y_train'], {'test_size': '(0.1)', 'shuffle': '(False)'}), '(X_train, Y_train, test_size=0.1, shuffle=False)\n', (9357, 9405), False, 'from sklearn.model_selection import train_test_split\n'), ((9447, 9511), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'Y_train'], {'test_size': '(0.1)', 'shuffle': '(False)'}), '(X_train, Y_train, test_size=0.1, shuffle=False)\n', (9463, 9511), False, 'from sklearn.model_selection import train_test_split\n'), ((11088, 11182), 'keras.layers.LSTM', 'LSTM', (['layers[2]'], {'stateful': 'STATEFUL', 'return_sequences': '(False)', 'kernel_initializer': '"""he_normal"""'}), "(layers[2], stateful=STATEFUL, return_sequences=False,\n kernel_initializer='he_normal')\n", (11092, 11182), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Input, Convolution1D, Convolution2D, LSTM\n'), ((11271, 11283), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (11278, 11283), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Input, Convolution1D, Convolution2D, LSTM\n'), ((11491, 11511), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (11509, 11511), False, 'from keras.layers.normalization import BatchNormalization\n'), ((11527, 11547), 'keras.layers.Activation', 'Activation', (['"""linear"""'], {}), "('linear')\n", (11537, 11547), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Input, Convolution1D, Convolution2D, LSTM\n'), ((15686, 15701), 'numpy.mean', 'np.mean', (['X_test'], {}), '(X_test)\n', (15693, 15701), True, 'import numpy as np\n'), ((19229, 19251), 'numpy.asarray', 'np.asarray', (['total_mses'], {}), '(total_mses)\n', (19239, 19251), True, 'import numpy as np\n'), ((10423, 10567), 'keras.layers.LSTM', 'LSTM', (['layers[1]'], {'input_shape': '(X_train.shape[1], X_train.shape[2])', 'stateful': 'STATEFUL', 'return_sequences': '(True)', 'kernel_initializer': '"""he_normal"""'}), "(layers[1], input_shape=(X_train.shape[1], X_train.shape[2]), stateful=\n STATEFUL, return_sequences=True, kernel_initializer='he_normal')\n", (10427, 10567), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Input, Convolution1D, Convolution2D, LSTM\n'), ((10706, 10873), 'keras.layers.LSTM', 'LSTM', (['layers[1]'], {'batch_input_shape': '(batch_size, X_train.shape[1], X_train.shape[2])', 'stateful': 'STATEFUL', 'return_sequences': '(True)', 'kernel_initializer': '"""he_normal"""'}), "(layers[1], batch_input_shape=(batch_size, X_train.shape[1], X_train.\n shape[2]), stateful=STATEFUL, return_sequences=True, kernel_initializer\n ='he_normal')\n", (10710, 10873), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Input, Convolution1D, Convolution2D, LSTM\n'), ((12013, 12024), 'time.time', 'time.time', ([], {}), '()\n', (12022, 12024), False, 'import time\n'), ((11421, 11429), 'keras.regularizers.l2', 'l2', (['(0.01)'], {}), '(0.01)\n', (11423, 11429), False, 'from keras.regularizers import l1, l2\n'), ((11461, 11469), 'keras.regularizers.l1', 'l1', (['(0.01)'], {}), '(0.01)\n', (11463, 11469), False, 'from keras.regularizers import l1, l2\n')] |
# -*- coding: utf-8 -*-
import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
from os import path, makedirs
import numpy as np
np.random.seed(1)
import random
random.seed(1)
import timeit
import cv2
import pandas as pd
from multiprocessing import Pool
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
df = pd.read_csv('train_folds.csv')
pred_folders = [
'pred_50_9ch_oof_0',
'pred_92_9ch_oof_0',
'pred_154_9ch_oof_0',
'pred_101_9ch_oof_0'
]
coefs = [2, 2, 2, 1]
def process_image(fid):
fid = fid + '.png'
used_msks = []
for pr_f in pred_folders:
msk1 = cv2.imread(path.join('/wdata/', pr_f, '{0}.png'.format(fid.split('.')[0])), cv2.IMREAD_UNCHANGED)
used_msks.append(msk1)
msk = np.zeros_like(used_msks[0], dtype='float')
for i in range(len(pred_folders)):
p = used_msks[i]
msk += (coefs[i] * p.astype('float'))
msk /= np.sum(coefs)
cv2.imwrite(path.join('/wdata/merged_oof', fid), msk.astype('uint8'), [cv2.IMWRITE_PNG_COMPRESSION, 9])
if __name__ == '__main__':
t0 = timeit.default_timer()
makedirs('/wdata/merged_oof', exist_ok=True)
val_files = df[df['fold'] < 8]['id'].values
with Pool() as pool:
results = pool.map(process_image, val_files)
elapsed = timeit.default_timer() - t0
print('Time: {:.3f} min'.format(elapsed / 60)) | [
"cv2.ocl.setUseOpenCL",
"cv2.setNumThreads",
"pandas.read_csv",
"os.makedirs",
"timeit.default_timer",
"os.path.join",
"random.seed",
"numpy.sum",
"numpy.random.seed",
"multiprocessing.Pool",
"numpy.zeros_like"
] | [((207, 224), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (221, 224), True, 'import numpy as np\n'), ((241, 255), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (252, 255), False, 'import random\n'), ((341, 361), 'cv2.setNumThreads', 'cv2.setNumThreads', (['(0)'], {}), '(0)\n', (358, 361), False, 'import cv2\n'), ((363, 390), 'cv2.ocl.setUseOpenCL', 'cv2.ocl.setUseOpenCL', (['(False)'], {}), '(False)\n', (383, 390), False, 'import cv2\n'), ((399, 429), 'pandas.read_csv', 'pd.read_csv', (['"""train_folds.csv"""'], {}), "('train_folds.csv')\n", (410, 429), True, 'import pandas as pd\n'), ((847, 889), 'numpy.zeros_like', 'np.zeros_like', (['used_msks[0]'], {'dtype': '"""float"""'}), "(used_msks[0], dtype='float')\n", (860, 889), True, 'import numpy as np\n'), ((1017, 1030), 'numpy.sum', 'np.sum', (['coefs'], {}), '(coefs)\n', (1023, 1030), True, 'import numpy as np\n'), ((1182, 1204), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1202, 1204), False, 'import timeit\n'), ((1212, 1256), 'os.makedirs', 'makedirs', (['"""/wdata/merged_oof"""'], {'exist_ok': '(True)'}), "('/wdata/merged_oof', exist_ok=True)\n", (1220, 1256), False, 'from os import path, makedirs\n'), ((1050, 1085), 'os.path.join', 'path.join', (['"""/wdata/merged_oof"""', 'fid'], {}), "('/wdata/merged_oof', fid)\n", (1059, 1085), False, 'from os import path, makedirs\n'), ((1324, 1330), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (1328, 1330), False, 'from multiprocessing import Pool\n'), ((1415, 1437), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1435, 1437), False, 'import timeit\n')] |
#!/usr/bin/env python
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>, based on code from <NAME>
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
from model.config import cfg
from model.test import im_detect
from model.nms_wrapper import nms
from utils.timer import Timer
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import os, cv2
import argparse
from nets.vgg16 import vgg16
from nets.resnet_v1 import resnetv1
import pyrealsense2 as rs
CLASSES = ('__background__',
'box', 'sucker')
NETS = {'vgg16': ('vgg16_faster_rcnn_iter_2000.ckpt',),'res101': ('res101_faster_rcnn_iter_110000.ckpt',)}
DATASETS= {'pascal_voc': ('voc_2007_trainval',),'pascal_voc_0712': ('voc_2007_trainval+voc_2012_trainval',)}
def vis_detections(color_image, depth_colormap, class_col, dets_col, thresh=0.5):
"""Draw detected bounding boxes."""
for cls_ind, class_name in enumerate(class_col):
dets = dets_col[cls_ind]
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
continue
for i in inds:
bbox = [int(e) for e in dets[i, :4]]
score = dets[i, -1]
cv2.rectangle(color_image, (bbox[0], bbox[1]),
(bbox[2], bbox[3]), (0, 0, 255), 3)
cv2.rectangle(depth_colormap, (bbox[0], bbox[1]),
(bbox[2], bbox[3]), (0, 0, 255), 3)
font = cv2.FONT_HERSHEY_SIMPLEX
color_image = cv2.putText(color_image, '{:s} {:.3f}'.format(class_name, score),
(bbox[0], max(bbox[1] - 2, 1)), font, 0.5, (255, 255, 255), 2)
depth_colormap = cv2.putText(depth_colormap, '{:s} {:.3f}'.format(class_name, score),
(bbox[0], max(bbox[1] - 2, 1)), font, 0.5, (255, 255, 255), 2)
# Stack both images horizontally
images = np.hstack((color_image, depth_colormap))
# Show images
cv2.imshow('RealSense', images)
def calc_histogram(depth_image, class_col, dets_col, thresh=0.5):
# return value
depth_col = np.zeros((len(class_col), 2), dtype=float)
bbox_col = np.zeros((len(class_col), 4), dtype=float)
# per class
for cls_ind in range(len(class_col)):
dets = dets_col[cls_ind]
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
continue
ind = np.argmax(dets[:, -1])
bbox = [int(e) for e in dets[ind, :4]]
depth_select = depth_image[bbox[1]:bbox[3], bbox[0]:bbox[2]]
# plt.imshow(depth_select)
# plt.colorbar()
# plt.show()
depth_select = np.reshape(depth_select, (-1))
depth_index = np.array([i for i, elem in enumerate(depth_select) if elem > 1500],
dtype=np.int32)
depth_select = depth_select[depth_index]
depth_hist, bin_edge = np.histogram(depth_select, bins="fd")
# plt.hist(depth_select, bins="fd")
# plt.show()
# plt.close("all")
depth_mean = np.mean([elem for elem in depth_hist])
front = bin_edge[0]
end = bin_edge[-1]
in_middle = False
for i, elem in enumerate(depth_hist):
if elem >= depth_mean:
front = bin_edge[i]
in_middle = True
if in_middle and elem <= depth_mean:
end = bin_edge[i]
in_middle = False
break
depth_col[cls_ind, :] = np.array((front, end))
bbox_col[cls_ind, :] = np.array((dets[ind, :4]))
return depth_col, bbox_col
def demo(sess, net, color_image, depth_colormap):
"""Detect object classes in an image using pre-computed object proposals."""
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(sess, net, color_image)
timer.toc()
print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))
# Visualize detections for each class
CONF_THRESH = 0.7
NMS_THRESH = 0.3
dets_col = []
cls_col = []
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
dets_col.append(dets)
cls_col.append(cls)
vis_detections(color_image, depth_colormap, cls_col, dets_col, thresh=CONF_THRESH)
depth_col, bbox_col = calc_histogram(depth_image, cls_col, dets_col, thresh=CONF_THRESH)
print("box depth:", depth_col[0], "sucker depth:", depth_col[1])
print("box bbox:", bbox_col[0], "sucker bbox", bbox_col[1])
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Tensorflow Faster R-CNN demo')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16 res101]',
choices=NETS.keys(), default='vgg16')
parser.add_argument('--dataset', dest='dataset', help='Trained dataset [pascal_voc pascal_voc_0712]',
choices=DATASETS.keys(), default='pascal_voc')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
# model path
demonet = args.demo_net
dataset = args.dataset
tfmodel = os.path.join('output', demonet, DATASETS[dataset][0], 'default',
NETS[demonet][0])
if not os.path.isfile(tfmodel + '.meta'):
raise IOError(('{:s} not found.\nDid you download the proper networks from '
'our server and place them properly?').format(tfmodel + '.meta'))
# set config
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth=True
# init session
sess = tf.Session(config=tfconfig)
# load network
if demonet == 'vgg16':
net = vgg16()
elif demonet == 'res101':
net = resnetv1(num_layers=101)
else:
raise NotImplementedError
net.create_architecture("TEST", 3,
tag='default', anchor_scales=[8, 16, 32])
saver = tf.train.Saver()
saver.restore(sess, tfmodel)
print('Loaded network {:s}'.format(tfmodel))
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
pipeline.start(config)
while True:
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
demo(sess, net, color_image, depth_colormap)
if cv2.waitKey(1) & 0xFF == ord('q'):
print("I'm done")
break
| [
"cv2.rectangle",
"cv2.convertScaleAbs",
"numpy.hstack",
"cv2.imshow",
"numpy.array",
"nets.vgg16.vgg16",
"numpy.mean",
"numpy.histogram",
"numpy.reshape",
"argparse.ArgumentParser",
"numpy.where",
"utils.timer.Timer",
"tensorflow.Session",
"nets.resnet_v1.resnetv1",
"pyrealsense2.config"... | [((2282, 2322), 'numpy.hstack', 'np.hstack', (['(color_image, depth_colormap)'], {}), '((color_image, depth_colormap))\n', (2291, 2322), True, 'import numpy as np\n'), ((2346, 2377), 'cv2.imshow', 'cv2.imshow', (['"""RealSense"""', 'images'], {}), "('RealSense', images)\n", (2356, 2377), False, 'import os, cv2\n'), ((4199, 4206), 'utils.timer.Timer', 'Timer', ([], {}), '()\n', (4204, 4206), False, 'from utils.timer import Timer\n'), ((4243, 4276), 'model.test.im_detect', 'im_detect', (['sess', 'net', 'color_image'], {}), '(sess, net, color_image)\n', (4252, 4276), False, 'from model.test import im_detect\n'), ((5328, 5395), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Tensorflow Faster R-CNN demo"""'}), "(description='Tensorflow Faster R-CNN demo')\n", (5351, 5395), False, 'import argparse\n'), ((5963, 6050), 'os.path.join', 'os.path.join', (['"""output"""', 'demonet', 'DATASETS[dataset][0]', '"""default"""', 'NETS[demonet][0]'], {}), "('output', demonet, DATASETS[dataset][0], 'default', NETS[\n demonet][0])\n", (5975, 6050), False, 'import os, cv2\n'), ((6331, 6372), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (6345, 6372), True, 'import tensorflow as tf\n'), ((6447, 6474), 'tensorflow.Session', 'tf.Session', ([], {'config': 'tfconfig'}), '(config=tfconfig)\n', (6457, 6474), True, 'import tensorflow as tf\n'), ((6775, 6791), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (6789, 6791), True, 'import tensorflow as tf\n'), ((6931, 6944), 'pyrealsense2.pipeline', 'rs.pipeline', ([], {}), '()\n', (6942, 6944), True, 'import pyrealsense2 as rs\n'), ((6958, 6969), 'pyrealsense2.config', 'rs.config', ([], {}), '()\n', (6967, 6969), True, 'import pyrealsense2 as rs\n'), ((2795, 2817), 'numpy.argmax', 'np.argmax', (['dets[:, -1]'], {}), '(dets[:, -1])\n', (2804, 2817), True, 'import numpy as np\n'), ((3041, 3069), 'numpy.reshape', 'np.reshape', (['depth_select', '(-1)'], {}), '(depth_select, -1)\n', (3051, 3069), True, 'import numpy as np\n'), ((3289, 3326), 'numpy.histogram', 'np.histogram', (['depth_select'], {'bins': '"""fd"""'}), "(depth_select, bins='fd')\n", (3301, 3326), True, 'import numpy as np\n'), ((3442, 3480), 'numpy.mean', 'np.mean', (['[elem for elem in depth_hist]'], {}), '([elem for elem in depth_hist])\n', (3449, 3480), True, 'import numpy as np\n'), ((3884, 3906), 'numpy.array', 'np.array', (['(front, end)'], {}), '((front, end))\n', (3892, 3906), True, 'import numpy as np\n'), ((3938, 3961), 'numpy.array', 'np.array', (['dets[ind, :4]'], {}), '(dets[ind, :4])\n', (3946, 3961), True, 'import numpy as np\n'), ((4839, 4860), 'model.nms_wrapper.nms', 'nms', (['dets', 'NMS_THRESH'], {}), '(dets, NMS_THRESH)\n', (4842, 4860), False, 'from model.nms_wrapper import nms\n'), ((6089, 6122), 'os.path.isfile', 'os.path.isfile', (["(tfmodel + '.meta')"], {}), "(tfmodel + '.meta')\n", (6103, 6122), False, 'import os, cv2\n'), ((6535, 6542), 'nets.vgg16.vgg16', 'vgg16', ([], {}), '()\n', (6540, 6542), False, 'from nets.vgg16 import vgg16\n'), ((7819, 7868), 'cv2.namedWindow', 'cv2.namedWindow', (['"""RealSense"""', 'cv2.WINDOW_AUTOSIZE'], {}), "('RealSense', cv2.WINDOW_AUTOSIZE)\n", (7834, 7868), False, 'import os, cv2\n'), ((1348, 1379), 'numpy.where', 'np.where', (['(dets[:, -1] >= thresh)'], {}), '(dets[:, -1] >= thresh)\n', (1356, 1379), True, 'import numpy as np\n'), ((1561, 1648), 'cv2.rectangle', 'cv2.rectangle', (['color_image', '(bbox[0], bbox[1])', '(bbox[2], bbox[3])', '(0, 0, 255)', '(3)'], {}), '(color_image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, \n 255), 3)\n', (1574, 1648), False, 'import os, cv2\n'), ((1680, 1769), 'cv2.rectangle', 'cv2.rectangle', (['depth_colormap', '(bbox[0], bbox[1])', '(bbox[2], bbox[3])', '(0, 0, 255)', '(3)'], {}), '(depth_colormap, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0,\n 255), 3)\n', (1693, 1769), False, 'import os, cv2\n'), ((2689, 2720), 'numpy.where', 'np.where', (['(dets[:, -1] >= thresh)'], {}), '(dets[:, -1] >= thresh)\n', (2697, 2720), True, 'import numpy as np\n'), ((6587, 6611), 'nets.resnet_v1.resnetv1', 'resnetv1', ([], {'num_layers': '(101)'}), '(num_layers=101)\n', (6595, 6611), False, 'from nets.resnet_v1 import resnetv1\n'), ((7746, 7790), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['depth_image'], {'alpha': '(0.03)'}), '(depth_image, alpha=0.03)\n', (7765, 7790), False, 'import os, cv2\n'), ((4729, 4778), 'numpy.hstack', 'np.hstack', (['(cls_boxes, cls_scores[:, np.newaxis])'], {}), '((cls_boxes, cls_scores[:, np.newaxis]))\n', (4738, 4778), True, 'import numpy as np\n'), ((7935, 7949), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7946, 7949), False, 'import os, cv2\n')] |
#! /usr/bin/env python
#! coding:utf-8
from pathlib import Path
import matplotlib.pyplot as plt
from torch import log
from tqdm import tqdm
import torch
import torch.nn as nn
import argparse
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from sklearn.metrics import confusion_matrix
from dataloader.jhmdb_loader import load_jhmdb_data, Jdata_generator, JConfig
from dataloader.shrec_loader import load_shrec_data, Sdata_generator, SConfig
from models.DDNet_Original import DDNet_Original as DDNet
from utils import makedir
import sys
import time
import numpy as np
import logging
sys.path.insert(0, './pytorch-summary/torchsummary/')
from torchsummary import summary # noqa
savedir = Path('experiments') / Path(str(int(time.time())))
makedir(savedir)
logging.basicConfig(filename=savedir/'train.log', level=logging.INFO)
history = {
"train_loss": [],
"test_loss": [],
"test_acc": []
}
def train(args, model, device, train_loader, optimizer, epoch, criterion):
model.train()
train_loss = 0
for batch_idx, (data1, data2, target) in enumerate(tqdm(train_loader)):
M, P, target = data1.to(device), data2.to(device), target.to(device)
optimizer.zero_grad()
output = model(M, P)
loss = criterion(output, target)
train_loss += loss.detach().item()
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
msg = ('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data1), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
print(msg)
logging.info(msg)
if args.dry_run:
break
history['train_loss'].append(train_loss)
return train_loss
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
criterion = nn.CrossEntropyLoss(reduction='sum')
with torch.no_grad():
for _, (data1, data2, target) in enumerate(tqdm(test_loader)):
M, P, target = data1.to(device), data2.to(device), target.to(device)
output = model(M, P)
# sum up batch loss
test_loss += criterion(output, target).item()
# get the index of the max log-probability
pred = output.argmax(dim=1, keepdim=True)
# output shape (B,Class)
# target_shape (B)
# pred shape (B,1)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
history['test_loss'].append(test_loss)
history['test_acc'].append(correct / len(test_loader.dataset))
msg = ('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
print(msg)
logging.info(msg)
def main():
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=199, metavar='N',
help='number of epochs to train (default: 199)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--gamma', type=float, default=0.5, metavar='M',
help='Learning rate step gamma (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--dry-run', action='store_true', default=False,
help='quickly check a single pass')
parser.add_argument('--log-interval', type=int, default=2, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--dataset', type=int, required=True, metavar='N',
help='0 for JHMDB, 1 for SHREC coarse, 2 for SHREC fine, others is undefined')
parser.add_argument('--model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--calc_time', action='store_true', default=False,
help='calc calc time per sample')
args = parser.parse_args()
logging.info(args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'batch_size': args.batch_size}
if use_cuda:
kwargs.update({'num_workers': 1,
'pin_memory': True,
'shuffle': True},)
# alias
Config = None
data_generator = None
load_data = None
clc_num = 0
if args.dataset == 0:
Config = JConfig()
data_generator = Jdata_generator
load_data = load_jhmdb_data
clc_num = Config.clc_num
elif args.dataset == 1:
Config = SConfig()
load_data = load_shrec_data
clc_num = Config.class_coarse_num
data_generator = Sdata_generator('coarse_label')
elif args.dataset == 2:
Config = SConfig()
clc_num = Config.class_fine_num
load_data = load_shrec_data
data_generator = Sdata_generator('fine_label')
else:
print("Unsupported dataset!")
sys.exit(1)
C = Config
Train, Test, le = load_data()
X_0, X_1, Y = data_generator(Train, C, le)
X_0 = torch.from_numpy(X_0).type('torch.FloatTensor')
X_1 = torch.from_numpy(X_1).type('torch.FloatTensor')
Y = torch.from_numpy(Y).type('torch.LongTensor')
X_0_t, X_1_t, Y_t = data_generator(Test, C, le)
X_0_t = torch.from_numpy(X_0_t).type('torch.FloatTensor')
X_1_t = torch.from_numpy(X_1_t).type('torch.FloatTensor')
Y_t = torch.from_numpy(Y_t).type('torch.LongTensor')
trainset = torch.utils.data.TensorDataset(X_0, X_1, Y)
train_loader = torch.utils.data.DataLoader(trainset, **kwargs)
testset = torch.utils.data.TensorDataset(X_0_t, X_1_t, Y_t)
test_loader = torch.utils.data.DataLoader(
testset, batch_size=args.test_batch_size)
Net = DDNet(C.frame_l, C.joint_n, C.joint_d,
C.feat_d, C.filters, clc_num)
model = Net.to(device)
summary(model, [(C.frame_l, C.feat_d), (C.frame_l, C.joint_n, C.joint_d)])
optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))
criterion = nn.CrossEntropyLoss()
scheduler = ReduceLROnPlateau(
optimizer, factor=args.gamma, patience=5, cooldown=0.5, min_lr=5e-6, verbose=True)
for epoch in range(1, args.epochs + 1):
train_loss = train(args, model, device, train_loader,
optimizer, epoch, criterion)
test(model, device, test_loader)
scheduler.step(train_loss)
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1)
ax1.plot(history['train_loss'])
ax1.plot(history['test_loss'])
ax1.legend(['Train', 'Test'], loc='upper left')
ax1.set_xlabel('Epoch')
ax1.set_title('Loss')
ax2.set_title('Model accuracy')
ax2.set_ylabel('Accuracy')
ax2.set_xlabel('Epoch')
ax2.plot(history['test_acc'])
xmax = np.argmax(history['test_acc'])
ymax = np.max(history['test_acc'])
text = "x={}, y={:.3f}".format(xmax, ymax)
ax2.annotate(text, xy=(xmax, ymax))
ax3.set_title('Confusion matrix')
model.eval()
with torch.no_grad():
Y_pred = model(X_0_t.to(device), X_1_t.to(
device)).cpu().numpy()
Y_test = Y_t.numpy()
cnf_matrix = confusion_matrix(
Y_test, np.argmax(Y_pred, axis=1))
ax3.imshow(cnf_matrix)
fig.tight_layout()
fig.savefig(str(savedir / "perf.png"))
if args.save_model:
torch.save(model.state_dict(), str(savedir/"model.pt"))
if args.calc_time:
device = ['cpu', 'cuda']
# calc time
for d in device:
tmp_X_0_t = X_0_t.to(d)
tmp_X_1_t = X_1_t.to(d)
model = model.to(d)
# warm up
_ = model(tmp_X_0_t, tmp_X_1_t)
tmp_X_0_t = tmp_X_0_t.unsqueeze(1)
tmp_X_1_t = tmp_X_1_t.unsqueeze(1)
start = time.perf_counter_ns()
for i in range(tmp_X_0_t.shape[0]):
_ = model(tmp_X_0_t[i, :, :, :], tmp_X_1_t[i, :, :, :])
end = time.perf_counter_ns()
msg = ("total {}ns, {:.2f}ns per one on {}".format((end - start),
((end - start) / (X_0_t.shape[0])), d))
print(msg)
logging.info(msg)
if __name__ == '__main__':
main()
| [
"sys.path.insert",
"torch.nn.CrossEntropyLoss",
"torch.from_numpy",
"utils.makedir",
"torch.cuda.is_available",
"sys.exit",
"time.perf_counter_ns",
"logging.info",
"argparse.ArgumentParser",
"pathlib.Path",
"dataloader.shrec_loader.SConfig",
"numpy.max",
"torchsummary.summary",
"torch.opti... | [((617, 670), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./pytorch-summary/torchsummary/"""'], {}), "(0, './pytorch-summary/torchsummary/')\n", (632, 670), False, 'import sys\n'), ((773, 789), 'utils.makedir', 'makedir', (['savedir'], {}), '(savedir)\n', (780, 789), False, 'from utils import makedir\n'), ((790, 861), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': "(savedir / 'train.log')", 'level': 'logging.INFO'}), "(filename=savedir / 'train.log', level=logging.INFO)\n", (809, 861), False, 'import logging\n'), ((723, 742), 'pathlib.Path', 'Path', (['"""experiments"""'], {}), "('experiments')\n", (727, 742), False, 'from pathlib import Path\n'), ((1939, 1975), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (1958, 1975), True, 'import torch.nn as nn\n'), ((2910, 2927), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (2922, 2927), False, 'import logging\n'), ((2979, 3004), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3002, 3004), False, 'import argparse\n'), ((4779, 4797), 'logging.info', 'logging.info', (['args'], {}), '(args)\n', (4791, 4797), False, 'import logging\n'), ((4874, 4917), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (4886, 4917), False, 'import torch\n'), ((6324, 6367), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['X_0', 'X_1', 'Y'], {}), '(X_0, X_1, Y)\n', (6354, 6367), False, 'import torch\n'), ((6387, 6434), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {}), '(trainset, **kwargs)\n', (6414, 6434), False, 'import torch\n'), ((6450, 6499), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['X_0_t', 'X_1_t', 'Y_t'], {}), '(X_0_t, X_1_t, Y_t)\n', (6480, 6499), False, 'import torch\n'), ((6518, 6587), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': 'args.test_batch_size'}), '(testset, batch_size=args.test_batch_size)\n', (6545, 6587), False, 'import torch\n'), ((6608, 6676), 'models.DDNet_Original.DDNet_Original', 'DDNet', (['C.frame_l', 'C.joint_n', 'C.joint_d', 'C.feat_d', 'C.filters', 'clc_num'], {}), '(C.frame_l, C.joint_n, C.joint_d, C.feat_d, C.filters, clc_num)\n', (6613, 6676), True, 'from models.DDNet_Original import DDNet_Original as DDNet\n'), ((6725, 6799), 'torchsummary.summary', 'summary', (['model', '[(C.frame_l, C.feat_d), (C.frame_l, C.joint_n, C.joint_d)]'], {}), '(model, [(C.frame_l, C.feat_d), (C.frame_l, C.joint_n, C.joint_d)])\n', (6732, 6799), False, 'from torchsummary import summary\n'), ((6896, 6917), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (6915, 6917), True, 'import torch.nn as nn\n'), ((6934, 7039), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', (['optimizer'], {'factor': 'args.gamma', 'patience': '(5)', 'cooldown': '(0.5)', 'min_lr': '(5e-06)', 'verbose': '(True)'}), '(optimizer, factor=args.gamma, patience=5, cooldown=0.5,\n min_lr=5e-06, verbose=True)\n', (6951, 7039), False, 'from torch.optim.lr_scheduler import ReduceLROnPlateau\n'), ((7310, 7340), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'ncols': '(1)'}), '(nrows=3, ncols=1)\n', (7322, 7340), True, 'import matplotlib.pyplot as plt\n'), ((7659, 7689), 'numpy.argmax', 'np.argmax', (["history['test_acc']"], {}), "(history['test_acc'])\n", (7668, 7689), True, 'import numpy as np\n'), ((7701, 7728), 'numpy.max', 'np.max', (["history['test_acc']"], {}), "(history['test_acc'])\n", (7707, 7728), True, 'import numpy as np\n'), ((1105, 1123), 'tqdm.tqdm', 'tqdm', (['train_loader'], {}), '(train_loader)\n', (1109, 1123), False, 'from tqdm import tqdm\n'), ((1985, 2000), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1998, 2000), False, 'import torch\n'), ((4834, 4859), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4857, 4859), False, 'import torch\n'), ((5244, 5253), 'dataloader.jhmdb_loader.JConfig', 'JConfig', ([], {}), '()\n', (5251, 5253), False, 'from dataloader.jhmdb_loader import load_jhmdb_data, Jdata_generator, JConfig\n'), ((7881, 7896), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7894, 7896), False, 'import torch\n'), ((8060, 8085), 'numpy.argmax', 'np.argmax', (['Y_pred'], {'axis': '(1)'}), '(Y_pred, axis=1)\n', (8069, 8085), True, 'import numpy as np\n'), ((1696, 1713), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (1708, 1713), False, 'import logging\n'), ((2053, 2070), 'tqdm.tqdm', 'tqdm', (['test_loader'], {}), '(test_loader)\n', (2057, 2070), False, 'from tqdm import tqdm\n'), ((5409, 5418), 'dataloader.shrec_loader.SConfig', 'SConfig', ([], {}), '()\n', (5416, 5418), False, 'from dataloader.shrec_loader import load_shrec_data, Sdata_generator, SConfig\n'), ((5522, 5553), 'dataloader.shrec_loader.Sdata_generator', 'Sdata_generator', (['"""coarse_label"""'], {}), "('coarse_label')\n", (5537, 5553), False, 'from dataloader.shrec_loader import load_shrec_data, Sdata_generator, SConfig\n'), ((5915, 5936), 'torch.from_numpy', 'torch.from_numpy', (['X_0'], {}), '(X_0)\n', (5931, 5936), False, 'import torch\n'), ((5973, 5994), 'torch.from_numpy', 'torch.from_numpy', (['X_1'], {}), '(X_1)\n', (5989, 5994), False, 'import torch\n'), ((6029, 6048), 'torch.from_numpy', 'torch.from_numpy', (['Y'], {}), '(Y)\n', (6045, 6048), False, 'import torch\n'), ((6139, 6162), 'torch.from_numpy', 'torch.from_numpy', (['X_0_t'], {}), '(X_0_t)\n', (6155, 6162), False, 'import torch\n'), ((6201, 6224), 'torch.from_numpy', 'torch.from_numpy', (['X_1_t'], {}), '(X_1_t)\n', (6217, 6224), False, 'import torch\n'), ((6261, 6282), 'torch.from_numpy', 'torch.from_numpy', (['Y_t'], {}), '(Y_t)\n', (6277, 6282), False, 'import torch\n'), ((8654, 8676), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (8674, 8676), False, 'import time\n'), ((8815, 8837), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (8835, 8837), False, 'import time\n'), ((9054, 9071), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (9066, 9071), False, 'import logging\n'), ((758, 769), 'time.time', 'time.time', ([], {}), '()\n', (767, 769), False, 'import time\n'), ((5599, 5608), 'dataloader.shrec_loader.SConfig', 'SConfig', ([], {}), '()\n', (5606, 5608), False, 'from dataloader.shrec_loader import load_shrec_data, Sdata_generator, SConfig\n'), ((5710, 5739), 'dataloader.shrec_loader.Sdata_generator', 'Sdata_generator', (['"""fine_label"""'], {}), "('fine_label')\n", (5725, 5739), False, 'from dataloader.shrec_loader import load_shrec_data, Sdata_generator, SConfig\n'), ((5796, 5807), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5804, 5807), False, 'import sys\n')] |
from tqdm import tqdm
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from scipy.optimize import fmin_slsqp
from toolz import partial
from sklearn.model_selection import KFold, TimeSeriesSplit, RepeatedKFold
from sklearn.linear_model import ElasticNetCV, LassoCV, RidgeCV
from bayes_opt import BayesianOptimization
class Optimize(object):
####
# Synthetic Difference in Differences (SDID)
####
def est_zeta(self, Y_pre_c) -> float:
"""
# SDID
Parameter to adjust the L2 penalty term
"""
return (self.n_treat * self.n_post_term) ** (1 / 4) * np.std(
Y_pre_c.diff().dropna().values
)
def est_omega(self, Y_pre_c, Y_pre_t, zeta):
"""
# SDID
estimating omega
"""
Y_pre_t = Y_pre_t.copy()
n_features = Y_pre_c.shape[1]
nrow = Y_pre_c.shape[0]
_w = np.repeat(1 / n_features, n_features)
_w0 = 1
start_w = np.append(_w, _w0)
if type(Y_pre_t) == pd.core.frame.DataFrame:
Y_pre_t = Y_pre_t.mean(axis=1)
# Required to have non negative values
max_bnd = abs(Y_pre_t.mean()) * 2
w_bnds = tuple(
(0, 1) if i < n_features else (max_bnd * -1, max_bnd)
for i in range(n_features + 1)
)
caled_w = fmin_slsqp(
partial(self.l2_loss, X=Y_pre_c, y=Y_pre_t, zeta=zeta, nrow=nrow),
start_w,
f_eqcons=lambda x: np.sum(x[:n_features]) - 1,
bounds=w_bnds,
disp=False,
)
return caled_w
def est_lambda(self, Y_pre_c, Y_post_c):
"""
# SDID
estimating lambda
"""
Y_pre_c_T = Y_pre_c.T
Y_post_c_T = Y_post_c.T
n_pre_term = Y_pre_c_T.shape[1]
_lambda = np.repeat(1 / n_pre_term, n_pre_term)
_lambda0 = 1
start_lambda = np.append(_lambda, _lambda0)
if type(Y_post_c_T) == pd.core.frame.DataFrame:
Y_post_c_T = Y_post_c_T.mean(axis=1)
max_bnd = abs(Y_post_c_T.mean()) * 2
lambda_bnds = tuple(
(0, 1) if i < n_pre_term else (max_bnd * -1, max_bnd)
for i in range(n_pre_term + 1)
)
caled_lambda = fmin_slsqp(
partial(self.l2_loss, X=Y_pre_c_T, y=Y_post_c_T, zeta=0, nrow=0),
start_lambda,
f_eqcons=lambda x: np.sum(x[:n_pre_term]) - 1,
bounds=lambda_bnds,
disp=False,
)
return caled_lambda[:n_pre_term]
def l2_loss(self, W, X, y, zeta, nrow) -> float:
"""
Loss function with L2 penalty
"""
if type(y) == pd.core.frame.DataFrame:
y = y.mean(axis=1)
_X = X.copy()
_X["intersept"] = 1
return np.sum((y - _X.dot(W)) ** 2) + nrow * zeta ** 2 * np.sum(W[:-1] ** 2)
####
# Synthetic Control Method (SC)
####
def rmse_loss(self, W, X, y, intersept=True) -> float:
if type(y) == pd.core.frame.DataFrame:
y = y.mean(axis=1)
_X = X.copy()
if intersept:
_X["intersept"] = 1
return np.mean(np.sqrt((y - _X.dot(W)) ** 2))
def rmse_loss_with_V(self, W, V, X, y) -> float:
if type(y) == pd.core.frame.DataFrame:
y = y.mean(axis=1)
_rss = (y - X.dot(W)) ** 2
_n = len(y)
_importance = np.zeros((_n, _n))
np.fill_diagonal(_importance, V)
return np.sum(_importance @ _rss)
def _v_loss(self, V, X, y, return_loss=True):
Y_pre_t = self.Y_pre_t.copy()
n_features = self.Y_pre_c.shape[1]
_w = np.repeat(1 / n_features, n_features)
if type(Y_pre_t) == pd.core.frame.DataFrame:
Y_pre_t = Y_pre_t.mean(axis=1)
w_bnds = tuple((0, 1) for i in range(n_features))
_caled_w = fmin_slsqp(
partial(self.rmse_loss_with_V, V=V, X=X, y=y),
_w,
f_eqcons=lambda x: np.sum(x) - 1,
bounds=w_bnds,
disp=False,
)
if return_loss:
return self.rmse_loss(_caled_w, self.Y_pre_c, Y_pre_t, intersept=False)
else:
return _caled_w
def estimate_v(self, additional_X, additional_y):
_len = len(additional_X)
_v = np.repeat(1 / _len, _len)
caled_v = fmin_slsqp(
partial(self._v_loss, X=additional_X, y=additional_y),
_v,
f_eqcons=lambda x: np.sum(x) - 1,
bounds=tuple((0, 1) for i in range(_len)),
disp=False,
)
return caled_v
def est_omega_ADH(
self, Y_pre_c, Y_pre_t, additional_X=pd.DataFrame(), additional_y=pd.DataFrame()
):
"""
# SC
estimating omega for synthetic control method (not for synthetic diff.-in-diff.)
"""
Y_pre_t = Y_pre_t.copy()
n_features = Y_pre_c.shape[1]
nrow = Y_pre_c.shape[0]
_w = np.repeat(1 / n_features, n_features)
if type(Y_pre_t) == pd.core.frame.DataFrame:
Y_pre_t = Y_pre_t.mean(axis=1)
# Required to have non negative values
w_bnds = tuple((0, 1) for i in range(n_features))
if len(additional_X) == 0:
caled_w = fmin_slsqp(
partial(self.rmse_loss, X=Y_pre_c, y=Y_pre_t, intersept=False),
_w,
f_eqcons=lambda x: np.sum(x) - 1,
bounds=w_bnds,
disp=False,
)
return caled_w
else:
assert additional_X.shape[1] == Y_pre_c.shape[1]
if type(additional_y) == pd.core.frame.DataFrame:
additional_y = additional_y.mean(axis=1)
# normalized
temp_df = pd.concat([additional_X, additional_y], axis=1)
ss = StandardScaler()
ss_df = pd.DataFrame(
ss.fit_transform(temp_df), columns=temp_df.columns, index=temp_df.index
)
ss_X = ss_df.iloc[:, :-1]
ss_y = ss_df.iloc[:, -1]
add_X = pd.concat([Y_pre_c, ss_X])
add_y = pd.concat([Y_pre_t, ss_y])
self.caled_v = self.estimate_v(additional_X=add_X, additional_y=add_y)
return self._v_loss(self.caled_v, X=add_X, y=add_y, return_loss=False)
#####
# cv search for zeta
####
def _zeta_given_cv_loss_inverse(self, zeta, cv=5, split_type="KFold"):
return -1 * self._zeta_given_cv_loss(zeta, cv, split_type)[0]
def _zeta_given_cv_loss(self, zeta, cv=5, split_type="KFold"):
nrow = self.Y_pre_c.shape[0]
if split_type == "KFold":
kf = KFold(n_splits=cv, random_state=self.random_seed)
elif split_type == "TimeSeriesSplit":
kf = TimeSeriesSplit(n_splits=cv)
elif split_type == "RepeatedKFold":
_cv = max(2, int(cv / 2))
kf = RepeatedKFold(
n_splits=_cv, n_repeats=_cv, random_state=self.random_seed
)
loss_result = []
nf_result = []
for train_index, test_index in kf.split(self.Y_pre_c, self.Y_pre_t):
train_w = self.est_omega(
self.Y_pre_c.iloc[train_index], self.Y_pre_t.iloc[train_index], zeta
)
nf_result.append(np.sum(np.round(np.abs(train_w), 3) > 0) - 1)
loss_result.append(
self.rmse_loss(
train_w,
self.Y_pre_c.iloc[test_index],
self.Y_pre_t.iloc[test_index],
)
)
return np.mean(loss_result), np.mean(nf_result)
def grid_search_zeta(
self, cv=5, n_candidate=20, candidate_zata=[], split_type="KFold"
):
"""
Search for zeta using grid search instead of theoretical values
"""
if len(candidate_zata) == 0:
for _z in np.linspace(0.1, self.base_zeta * 2, n_candidate):
candidate_zata.append(_z)
candidate_zata.append(self.base_zeta)
candidate_zata.append(0)
candidate_zata = sorted(candidate_zata)
result_loss_dict = {}
result_nf_dict = {}
print("cv: zeta")
for _zeta in tqdm(candidate_zata):
result_loss_dict[_zeta], result_nf_dict[_zeta] = self._zeta_given_cv_loss(
_zeta, cv=cv, split_type=split_type
)
loss_sorted = sorted(result_loss_dict.items(), key=lambda x: x[1])
return loss_sorted[0]
def bayes_opt_zeta(
self,
cv=5,
init_points=5,
n_iter=5,
zeta_max=None,
zeta_min=None,
split_type="KFold",
):
"""
Search for zeta using Bayesian Optimization instead of theoretical values
"""
if zeta_max == None:
zeta_max = self.base_zeta * 1.02
zeta_max2 = self.base_zeta * 2
if zeta_min == None:
zeta_min = self.base_zeta * 0.98
zeta_min2 = 0.01
pbounds = {"zeta": (zeta_min, zeta_max)}
optimizer = BayesianOptimization(
f=partial(self._zeta_given_cv_loss_inverse, cv=cv, split_type=split_type),
pbounds=pbounds,
random_state=self.random_seed,
)
optimizer.maximize(
init_points=2,
n_iter=2,
)
optimizer.set_bounds(new_bounds={"zeta": (zeta_min2, zeta_max2)})
optimizer.maximize(
init_points=init_points,
n_iter=n_iter,
)
optimizer.max["params"]["zeta"]
return (optimizer.max["params"]["zeta"], optimizer.max["target"] * -1)
#####
# The following is for sparse estimation
####
def est_omega_ElasticNet(self, Y_pre_c, Y_pre_t):
Y_pre_t = Y_pre_t.copy()
if type(Y_pre_t) == pd.core.frame.DataFrame:
Y_pre_t = Y_pre_t.mean(axis=1)
# Y_pre_t.columns = "treatment_group"
regr = ElasticNetCV(cv=5, random_state=0)
regr.fit(Y_pre_c, Y_pre_t)
self.elastic_net_alpha = regr.alpha_
caled_w = regr.coef_
return np.append(caled_w, regr.intercept_)
def est_omega_Lasso(self, Y_pre_c, Y_pre_t):
Y_pre_t = Y_pre_t.copy()
if type(Y_pre_t) == pd.core.frame.DataFrame:
Y_pre_t = Y_pre_t.mean(axis=1)
regr = LassoCV(cv=5, random_state=0)
regr.fit(Y_pre_c, Y_pre_t)
self.lasso_alpha = regr.alpha_
caled_w = regr.coef_
return np.append(caled_w, regr.intercept_)
def est_omega_Ridge(self, Y_pre_c, Y_pre_t):
Y_pre_t = Y_pre_t.copy()
if type(Y_pre_t) == pd.core.frame.DataFrame:
Y_pre_t = Y_pre_t.mean(axis=1)
regr = RidgeCV(cv=5)
regr.fit(Y_pre_c, Y_pre_t)
self.ridge_alpha = regr.alpha_
caled_w = regr.coef_
return np.append(caled_w, regr.intercept_)
| [
"sklearn.linear_model.RidgeCV",
"sklearn.model_selection.KFold",
"numpy.mean",
"sklearn.linear_model.ElasticNetCV",
"numpy.repeat",
"sklearn.model_selection.TimeSeriesSplit",
"numpy.linspace",
"toolz.partial",
"pandas.DataFrame",
"sklearn.model_selection.RepeatedKFold",
"numpy.abs",
"sklearn.l... | [((931, 968), 'numpy.repeat', 'np.repeat', (['(1 / n_features)', 'n_features'], {}), '(1 / n_features, n_features)\n', (940, 968), True, 'import numpy as np\n'), ((1004, 1022), 'numpy.append', 'np.append', (['_w', '_w0'], {}), '(_w, _w0)\n', (1013, 1022), True, 'import numpy as np\n'), ((1861, 1898), 'numpy.repeat', 'np.repeat', (['(1 / n_pre_term)', 'n_pre_term'], {}), '(1 / n_pre_term, n_pre_term)\n', (1870, 1898), True, 'import numpy as np\n'), ((1944, 1972), 'numpy.append', 'np.append', (['_lambda', '_lambda0'], {}), '(_lambda, _lambda0)\n', (1953, 1972), True, 'import numpy as np\n'), ((3441, 3459), 'numpy.zeros', 'np.zeros', (['(_n, _n)'], {}), '((_n, _n))\n', (3449, 3459), True, 'import numpy as np\n'), ((3469, 3501), 'numpy.fill_diagonal', 'np.fill_diagonal', (['_importance', 'V'], {}), '(_importance, V)\n', (3485, 3501), True, 'import numpy as np\n'), ((3518, 3544), 'numpy.sum', 'np.sum', (['(_importance @ _rss)'], {}), '(_importance @ _rss)\n', (3524, 3544), True, 'import numpy as np\n'), ((3691, 3728), 'numpy.repeat', 'np.repeat', (['(1 / n_features)', 'n_features'], {}), '(1 / n_features, n_features)\n', (3700, 3728), True, 'import numpy as np\n'), ((4349, 4374), 'numpy.repeat', 'np.repeat', (['(1 / _len)', '_len'], {}), '(1 / _len, _len)\n', (4358, 4374), True, 'import numpy as np\n'), ((4716, 4730), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4728, 4730), True, 'import pandas as pd\n'), ((4745, 4759), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4757, 4759), True, 'import pandas as pd\n'), ((5011, 5048), 'numpy.repeat', 'np.repeat', (['(1 / n_features)', 'n_features'], {}), '(1 / n_features, n_features)\n', (5020, 5048), True, 'import numpy as np\n'), ((8301, 8321), 'tqdm.tqdm', 'tqdm', (['candidate_zata'], {}), '(candidate_zata)\n', (8305, 8321), False, 'from tqdm import tqdm\n'), ((10049, 10083), 'sklearn.linear_model.ElasticNetCV', 'ElasticNetCV', ([], {'cv': '(5)', 'random_state': '(0)'}), '(cv=5, random_state=0)\n', (10061, 10083), False, 'from sklearn.linear_model import ElasticNetCV, LassoCV, RidgeCV\n'), ((10211, 10246), 'numpy.append', 'np.append', (['caled_w', 'regr.intercept_'], {}), '(caled_w, regr.intercept_)\n', (10220, 10246), True, 'import numpy as np\n'), ((10443, 10472), 'sklearn.linear_model.LassoCV', 'LassoCV', ([], {'cv': '(5)', 'random_state': '(0)'}), '(cv=5, random_state=0)\n', (10450, 10472), False, 'from sklearn.linear_model import ElasticNetCV, LassoCV, RidgeCV\n'), ((10594, 10629), 'numpy.append', 'np.append', (['caled_w', 'regr.intercept_'], {}), '(caled_w, regr.intercept_)\n', (10603, 10629), True, 'import numpy as np\n'), ((10826, 10839), 'sklearn.linear_model.RidgeCV', 'RidgeCV', ([], {'cv': '(5)'}), '(cv=5)\n', (10833, 10839), False, 'from sklearn.linear_model import ElasticNetCV, LassoCV, RidgeCV\n'), ((10961, 10996), 'numpy.append', 'np.append', (['caled_w', 'regr.intercept_'], {}), '(caled_w, regr.intercept_)\n', (10970, 10996), True, 'import numpy as np\n'), ((1396, 1461), 'toolz.partial', 'partial', (['self.l2_loss'], {'X': 'Y_pre_c', 'y': 'Y_pre_t', 'zeta': 'zeta', 'nrow': 'nrow'}), '(self.l2_loss, X=Y_pre_c, y=Y_pre_t, zeta=zeta, nrow=nrow)\n', (1403, 1461), False, 'from toolz import partial\n'), ((2321, 2385), 'toolz.partial', 'partial', (['self.l2_loss'], {'X': 'Y_pre_c_T', 'y': 'Y_post_c_T', 'zeta': '(0)', 'nrow': '(0)'}), '(self.l2_loss, X=Y_pre_c_T, y=Y_post_c_T, zeta=0, nrow=0)\n', (2328, 2385), False, 'from toolz import partial\n'), ((3928, 3973), 'toolz.partial', 'partial', (['self.rmse_loss_with_V'], {'V': 'V', 'X': 'X', 'y': 'y'}), '(self.rmse_loss_with_V, V=V, X=X, y=y)\n', (3935, 3973), False, 'from toolz import partial\n'), ((4418, 4471), 'toolz.partial', 'partial', (['self._v_loss'], {'X': 'additional_X', 'y': 'additional_y'}), '(self._v_loss, X=additional_X, y=additional_y)\n', (4425, 4471), False, 'from toolz import partial\n'), ((5815, 5862), 'pandas.concat', 'pd.concat', (['[additional_X, additional_y]'], {'axis': '(1)'}), '([additional_X, additional_y], axis=1)\n', (5824, 5862), True, 'import pandas as pd\n'), ((5880, 5896), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5894, 5896), False, 'from sklearn.preprocessing import StandardScaler\n'), ((6130, 6156), 'pandas.concat', 'pd.concat', (['[Y_pre_c, ss_X]'], {}), '([Y_pre_c, ss_X])\n', (6139, 6156), True, 'import pandas as pd\n'), ((6177, 6203), 'pandas.concat', 'pd.concat', (['[Y_pre_t, ss_y]'], {}), '([Y_pre_t, ss_y])\n', (6186, 6203), True, 'import pandas as pd\n'), ((6719, 6768), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'cv', 'random_state': 'self.random_seed'}), '(n_splits=cv, random_state=self.random_seed)\n', (6724, 6768), False, 'from sklearn.model_selection import KFold, TimeSeriesSplit, RepeatedKFold\n'), ((7646, 7666), 'numpy.mean', 'np.mean', (['loss_result'], {}), '(loss_result)\n', (7653, 7666), True, 'import numpy as np\n'), ((7668, 7686), 'numpy.mean', 'np.mean', (['nf_result'], {}), '(nf_result)\n', (7675, 7686), True, 'import numpy as np\n'), ((7952, 8001), 'numpy.linspace', 'np.linspace', (['(0.1)', '(self.base_zeta * 2)', 'n_candidate'], {}), '(0.1, self.base_zeta * 2, n_candidate)\n', (7963, 8001), True, 'import numpy as np\n'), ((2889, 2908), 'numpy.sum', 'np.sum', (['(W[:-1] ** 2)'], {}), '(W[:-1] ** 2)\n', (2895, 2908), True, 'import numpy as np\n'), ((5338, 5400), 'toolz.partial', 'partial', (['self.rmse_loss'], {'X': 'Y_pre_c', 'y': 'Y_pre_t', 'intersept': '(False)'}), '(self.rmse_loss, X=Y_pre_c, y=Y_pre_t, intersept=False)\n', (5345, 5400), False, 'from toolz import partial\n'), ((6832, 6860), 'sklearn.model_selection.TimeSeriesSplit', 'TimeSeriesSplit', ([], {'n_splits': 'cv'}), '(n_splits=cv)\n', (6847, 6860), False, 'from sklearn.model_selection import KFold, TimeSeriesSplit, RepeatedKFold\n'), ((9192, 9263), 'toolz.partial', 'partial', (['self._zeta_given_cv_loss_inverse'], {'cv': 'cv', 'split_type': 'split_type'}), '(self._zeta_given_cv_loss_inverse, cv=cv, split_type=split_type)\n', (9199, 9263), False, 'from toolz import partial\n'), ((6960, 7033), 'sklearn.model_selection.RepeatedKFold', 'RepeatedKFold', ([], {'n_splits': '_cv', 'n_repeats': '_cv', 'random_state': 'self.random_seed'}), '(n_splits=_cv, n_repeats=_cv, random_state=self.random_seed)\n', (6973, 7033), False, 'from sklearn.model_selection import KFold, TimeSeriesSplit, RepeatedKFold\n'), ((1515, 1537), 'numpy.sum', 'np.sum', (['x[:n_features]'], {}), '(x[:n_features])\n', (1521, 1537), True, 'import numpy as np\n'), ((2444, 2466), 'numpy.sum', 'np.sum', (['x[:n_pre_term]'], {}), '(x[:n_pre_term])\n', (2450, 2466), True, 'import numpy as np\n'), ((4022, 4031), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (4028, 4031), True, 'import numpy as np\n'), ((4520, 4529), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (4526, 4529), True, 'import numpy as np\n'), ((5457, 5466), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (5463, 5466), True, 'import numpy as np\n'), ((7373, 7388), 'numpy.abs', 'np.abs', (['train_w'], {}), '(train_w)\n', (7379, 7388), True, 'import numpy as np\n')] |
import json
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ImagingReso import _utilities
import plotly.tools as tls
class Resonance(object):
e_min = 1e-5
e_max = 1e8
stack = {} # compound, thickness, atomic_ratio of each layer with isotopes information
stack_sigma = {} # all the energy and sigma of the isotopes and compounds
stack_signal = {} # transmission and attenuation signal for every isotope and compound
total_signal = {} # transmission and attenuation of the entire sample
density_lock = {} # dictionary that will defined the densities locked
energy_max = np.NaN
energy_min = np.NaN
energy_step = np.NaN
def __init__(self, stack={}, energy_max=1, energy_min=0.001, energy_step=0.001,
database='ENDF_VII', temperature='294K'):
"""initialize resonance object
:param stack: dictionary to store sample info
example: {'layer1': {'elements':['Ag','Si],
'atomic_ratio': [1, 2],
'thickness': {'value': 0.025,
'units': 'mm',
},
'density': {'units': 'g/cm3',
'value': 0.5,
},
}
:type stack: dictionary
:param energy_max: (default 300) max energy in eV to use in calculation
:type energy_max: float
:param energy_min: (default 0) min energy in eV to use in calculation
:type energy_min: float
:param energy_step: (default 0.1) energy step to use in extrapolation of sigma data
:type energy_step: float
:param database: database to extract cross-section info. ['ENDF_VII', 'ENDF_VIII'], both are database at 294K
:type database: str
"""
if database not in ['ENDF_VII', 'ENDF_VIII', '_data_for_unittest']:
raise ValueError(
"Database {} entered not existed. \nCurrent support: ['ENDF_VII', 'ENDF_VIII'] ".format(database))
# ENDF_VII only has nuclide 'C-0', replaced with 'C-12' and 'C-13' from ENDF_VIII.
# ENDF_VIII data base has problematic 'B-10' ace file, replaced with 'B-10' from ENFF_VII.
if database == 'ENDF_VIII':
pass
self.database = database
self.__element_metadata = {}
if energy_min < self.e_min:
raise ValueError("Energy min (eV) must be >= {}".format(self.e_min))
self.energy_min = energy_min
if energy_max > self.e_max:
raise ValueError("Energy max (eV) must be <= {}".format(self.e_max))
self.energy_max = energy_max
if energy_min == energy_max:
raise ValueError("Energy min and max should not have the same value!")
if (energy_max - energy_min) < energy_step:
raise ValueError("Energy step is bigger than range of energy specified!")
self.energy_step = energy_step
if not stack == {}:
# checking that every element of each stack is defined
_utilities.checking_stack(stack=stack, database=self.database)
new_stack = self.__update_stack_with_isotopes_infos(stack=stack)
self.stack = new_stack
# if layer density has been defined, lock it
self.__lock_density_if_defined(stack=self.stack)
# calculate stack_sigma, layer density, atoms_per_cm3 ...
self.__math_on_stack()
def __str__(self):
"""what to display if user does
>>> o_reso = Resolution()
>>> print(o_reso)
"""
return json.dumps(self.stack, indent=4)
def __repr__(self):
"""what to display if user does
>>> o_reso = Resolution()
>>> o_reso
"""
return json.dumps(self.stack, indent=4)
def add_layer(self, formula='', thickness=np.NaN, density=np.NaN):
"""provide another way to define the layers (stack)
Parameters:
===========
formula: string
ex: 'CoAg2'
ex: 'Al'
thickness: float (in mm)
density: float (g/cm3)
"""
if formula == '':
return
if formula in self.stack.keys():
raise ValueError("Layer '{}' is already in the sample stack.".format(formula))
_new_stack = _utilities.formula_to_dictionary(formula=formula,
thickness=thickness,
density=density,
database=self.database)
# check if density has been defined
self.__lock_density_if_defined(stack=_new_stack)
new_stack = self.__update_stack_with_isotopes_infos(stack=_new_stack)
self.stack = {**self.stack, **new_stack}
# calculate stack_sigma, layer density, atoms_per_cm3 ...
self.__math_on_stack()
def get_isotopic_ratio(self, compound='', element=''):
"""returns the list of isotopes for the element of the compound defined with their stoichiometric values
Parameters:
===========
compound: string (default is empty). If empty, all the stoichiometric will be displayed
element: string (default is same as compound).
Raises:
=======
ValueError if element is not defined in the stack
"""
_stack = self.stack
compound = str(compound)
if compound == '':
_list_compounds = _stack.keys()
list_all_dict = {}
for _compound in _list_compounds:
_compound = str(_compound)
_list_element = _stack[_compound]['elements']
list_all_dict[_compound] = {}
for _element in _list_element:
list_all_dict[_compound][_element] = self.get_isotopic_ratio(
compound=_compound,
element=_element)
return list_all_dict
# checking compound is valid
list_compounds = _stack.keys()
if compound not in list_compounds:
list_compounds_joined = ', '.join(list_compounds)
raise ValueError("Compound '{}' could not be find in {}".format(compound, list_compounds_joined))
# checking element is valid
if element == '':
# we assume that the element and compounds names matched
element = compound
list_element = _stack[compound].keys()
if element not in list_element:
list_element_joined = ', '.join(list_element)
raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined))
list_istopes = _stack[compound][element]['isotopes']['list']
list_ratio = _stack[compound][element]['isotopes']['isotopic_ratio']
iso_ratio = zip(list_istopes, list_ratio)
_stoichiometric_ratio = {}
for _iso, _ratio in iso_ratio:
_stoichiometric_ratio[_iso] = _ratio
return _stoichiometric_ratio
def set_isotopic_ratio(self, compound='', element='', list_ratio=[]):
"""defines the new set of ratio of the compound/element and trigger the calculation to update the density
Parameters:
===========
compound: string (default is ''). Name of compound
element: string (default is ''). Name of element
list_ratio: list (default is []). list of new stoichiometric_ratio
Raises:
=======
ValueError if compound does not exist
ValueError if element does not exist
ValueError if list_ratio does not have the right format
"""
_stack = self.stack
list_compounds = _stack.keys()
if compound not in _stack.keys():
list_compounds_joined = ', '.join(list_compounds)
raise ValueError("Compound '{}' could not be find in {}".format(compound, list_compounds_joined))
if element == '':
# we assume that the element and compounds names matched
element = compound
list_element = _stack[compound].keys()
if element not in list_element:
list_element_joined = ', '.join(list_element)
raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined))
old_list_ratio = _stack[compound][element]['isotopes']['isotopic_ratio']
if not (len(old_list_ratio) == len(list_ratio)):
raise ValueError("New list of ratio ({} elements) does not match old list size ({} elements!".format(len(
list_ratio), len(old_list_ratio)))
_utilities.check_iso_ratios(ratios=list_ratio, tol=0.005)
self.stack[compound][element]['isotopes']['isotopic_ratio'] = list_ratio
self.__update_molar_mass(compound=compound, element=element)
self.__update_density(compound=compound, element=element)
# update entire stack
self.__math_on_stack()
def get_density(self, compound='', element=''):
"""returns the list of isotopes for the element of the compound defined with their density
Parameters:
===========
compound: string (default is empty). If empty, all the stoichiometric will be displayed
element: string (default is same as compound).
Raises:
=======
ValueError if element is not defined in the stack
"""
_stack = self.stack
if compound == '':
_list_compounds = _stack.keys()
list_all_dict = {}
for _compound in _list_compounds:
_list_element = _stack[_compound]['elements']
list_all_dict[_compound] = {}
for _element in _list_element:
list_all_dict[_compound][_element] = self.get_density(
compound=_compound,
element=_element)
return list_all_dict
# checking compound is valid
list_compounds = _stack.keys()
if compound not in list_compounds:
list_compounds_joined = ', '.join(list_compounds)
raise ValueError("Compound '{}' could not be find in {}".format(compile, list_compounds_joined))
# checking element is valid
if element == '':
# we assume that the element and compounds names matched
element = compound
list_element = _stack[compound].keys()
if element not in list_element:
list_element_joined = ', '.join(list_element)
raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined))
return _stack[compound][element]['density']['value']
def __math_on_stack(self, used_lock=False):
"""will perform all the various update of the stack, such as populating the stack_sigma, caluclate the density of the
layers....etc. """
# populate stack_sigma (Sigma vs Energy for every element)
self.__get_sigmas()
# populate compound density (if none provided)
self.__update_layer_density()
# populate compound molar mass
# self.__update_layer_molar_mass() ### included in __calculate_atoms_per_cm3
# populate atoms_per_cm3
self.__calculate_atoms_per_cm3(used_lock=used_lock)
# calculate transmission and attenuation
self.__calculate_transmission_attenuation()
def __lock_density_if_defined(self, stack: dict):
"""lock (True) the density lock if the density has been been defined during initialization
Store the resulting dictionary into density_lock
Parameters:
===========
stack: dictionary (optional)
if not provided, the entire stack will be used
"""
if self.stack == {}:
density_lock = {}
else:
density_lock = self.density_lock
for _compound in stack.keys():
_density = stack[_compound]['density']['value']
if np.isnan(_density):
density_lock[_compound] = False
else:
density_lock[_compound] = True
self.density_lock = density_lock
def __calculate_transmission_attenuation(self):
""" """
stack = self.stack
stack_sigma = self.stack_sigma
stack_signal = {}
total_signal = {}
total_transmisison = 1.
# compound level
for _name_of_compound in stack.keys():
stack_signal[_name_of_compound] = {}
mu_per_cm_compound = 0
transmission_compound = 1.
energy_compound = []
_list_element = stack[_name_of_compound]['elements']
_thickness_cm = _utilities.set_distance_units(value=stack[_name_of_compound]['thickness']['value'],
from_units=stack[_name_of_compound]['thickness']['units'],
to_units='cm')
# element level
for _element in _list_element:
stack_signal[_name_of_compound][_element] = {}
_atoms_per_cm3 = stack[_name_of_compound][_element]['atoms_per_cm3']
# isotope level
for _iso in stack[_name_of_compound][_element]['isotopes']['list']:
stack_signal[_name_of_compound][_element][_iso] = {}
_sigma_iso = stack_sigma[_name_of_compound][_element][_iso]['sigma_b']
_mu_per_cm_iso, _transmission_iso = _utilities.calculate_transmission(
thickness_cm=_thickness_cm,
atoms_per_cm3=_atoms_per_cm3,
sigma_b=_sigma_iso)
stack_signal[_name_of_compound][_element][_iso]['mu_per_cm'] = _mu_per_cm_iso
stack_signal[_name_of_compound][_element][_iso]['transmission'] = _transmission_iso
stack_signal[_name_of_compound][_element][_iso]['attenuation'] = 1. - _transmission_iso
stack_signal[_name_of_compound][_element][_iso]['energy_eV'] = \
stack_sigma[_name_of_compound][_element][_iso]['energy_eV']
_sigma_ele = stack_sigma[_name_of_compound][_element]['sigma_b']
_mu_per_cm_ele, _transmission_ele = _utilities.calculate_transmission(
thickness_cm=_thickness_cm,
atoms_per_cm3=_atoms_per_cm3,
sigma_b=_sigma_ele)
stack_signal[_name_of_compound][_element]['mu_per_cm'] = _mu_per_cm_ele
stack_signal[_name_of_compound][_element]['transmission'] = _transmission_ele
stack_signal[_name_of_compound][_element]['attenuation'] = 1. - _transmission_ele
stack_signal[_name_of_compound][_element]['energy_eV'] = \
stack_sigma[_name_of_compound][_element]['energy_eV']
mu_per_cm_compound += _mu_per_cm_ele # plus
transmission_compound *= _transmission_ele # multiply
if len(energy_compound) == 0:
energy_compound = stack_sigma[_name_of_compound][_element]['energy_eV']
stack_signal[_name_of_compound]['mu_per_cm'] = mu_per_cm_compound
stack_signal[_name_of_compound]['transmission'] = transmission_compound
stack_signal[_name_of_compound]['attenuation'] = 1. - transmission_compound
stack_signal[_name_of_compound]['energy_eV'] = energy_compound
total_transmisison *= transmission_compound
total_attenuation = 1. - total_transmisison
self.stack_signal = stack_signal
total_signal['transmission'] = total_transmisison
total_signal['attenuation'] = total_attenuation
total_signal['energy_eV'] = energy_compound
self.total_signal = total_signal
def __calculate_atoms_per_cm3(self, used_lock=False):
"""calculate for each element, the atoms per cm3"""
stack = self.stack
_density_lock = self.density_lock
for _name_of_compound in stack.keys():
if used_lock and _density_lock[_name_of_compound]:
continue
molar_mass_layer, atoms_per_cm3_layer = _utilities.get_atoms_per_cm3_of_layer(
compound_dict=stack[_name_of_compound])
# Update layer molar mass
stack[_name_of_compound]['molar_mass'] = {'value': molar_mass_layer,
'units': 'g/mol'}
# Update atoms per cm3
stack[_name_of_compound]['atoms_per_cm3'] = atoms_per_cm3_layer
for _index, _name_of_ele in enumerate(stack[_name_of_compound]['elements']):
stack[_name_of_compound][_name_of_ele]['atoms_per_cm3'] = atoms_per_cm3_layer * \
stack[_name_of_compound][
'stoichiometric_ratio'][_index]
self.stack = stack
def __fill_missing_keys(self, stack: dict):
_list_key_to_check = ['density']
_list_key_value = [{'value': np.NaN,
'units': 'g/cm3'}]
list_compound = stack.keys()
for _key in list_compound:
_inside_keys = stack[_key].keys()
_key_value_to_search = zip(_list_key_to_check, _list_key_value)
for _key_to_find, _value_to_add in _key_value_to_search:
if not (_key_to_find in _inside_keys):
stack[_key][_key_to_find] = _value_to_add.copy()
return stack
def __update_stack_with_isotopes_infos(self, stack: dict):
"""retrieve the isotopes, isotopes file names, mass and atomic_ratio from each element in stack"""
for _key in stack:
_elements = stack[_key]['elements']
for _element in _elements:
_dict = _utilities.get_isotope_dicts(element=_element, database=self.database)
stack[_key][_element] = _dict
stack = self.__fill_missing_keys(stack=stack)
return stack
def __update_layer_density(self):
"""calculate or update the layer density"""
_stack = self.stack
_density_lock = self.density_lock
list_compound = _stack.keys()
for _key in list_compound:
if _density_lock[_key]:
continue
_list_ratio = _stack[_key]['stoichiometric_ratio']
_list_density = []
for _element in _stack[_key]['elements']:
_list_density.append(_stack[_key][_element]['density']['value'])
_compound_density = _utilities.get_compound_density(list_density=_list_density,
list_ratio=_list_ratio)
_stack[_key]['density']['value'] = _compound_density
self.stack = _stack
def __update_density(self, compound='', element=''):
"""Re-calculate the density of the element given due to stoichiometric changes as
well as the compound density (if density is not locked)
Parameters:
===========
compound: string (default is '') name of compound
element: string (default is '') name of element
"""
_density_element = 0
list_ratio = self.stack[compound][element]['isotopes']['isotopic_ratio']
list_density = self.stack[compound][element]['isotopes']['density']['value']
ratio_density = zip(list_ratio, list_density)
for _ratio, _density in ratio_density:
_density_element += np.float(_ratio) * np.float(_density)
self.stack[compound][element]['density']['value'] = _density_element
_density_lock = self.density_lock
if not _density_lock[compound]:
self.__update_layer_density()
# def __update_layer_molar_mass(self):
# """calculate or update the layer molar mass"""
# _stack = self.stack
# _molar_mass = np.nan
# for _key in _stack.keys():
# _list_ratio = _stack[_key]['stoichiometric_ratio']
# _list_molar_mass = []
# for _element in _stack[_key]['elements']:
# _list_molar_mass.append(_stack[_key][_element]['molar_mass']['value'])
# _molar_mass = _utilities.get_compound_molar_mass(list_molar_mass=_list_molar_mass,
# list_ratio=_list_ratio)
# _stack[_key]['molar_mass'] = {'value': _molar_mass,
# 'units': 'g/mol'}
# self.stack = _stack
def __update_molar_mass(self, compound='', element=''):
"""Re-calculate the molar mass of the element given due to stoichiometric changes
Parameters:
==========
compound: string (default is '') name of compound
element: string (default is '') name of element
"""
_molar_mass_element = 0
list_ratio = self.stack[compound][element]['isotopes']['isotopic_ratio']
list_mass = self.stack[compound][element]['isotopes']['mass']['value']
ratio_mass = zip(list_ratio, list_mass)
for _ratio, _mass in ratio_mass:
_molar_mass_element += np.float(_ratio) * np.float(_mass)
self.stack[compound][element]['molar_mass']['value'] = _molar_mass_element
def __get_sigmas(self):
"""will populate the stack_sigma dictionary with the energy and sigma array
for all the compound/element and isotopes"""
stack_sigma = {}
_stack = self.stack
_file_path = os.path.abspath(os.path.dirname(__file__))
_database_folder = os.path.join(_file_path, 'reference_data', self.database)
_list_compounds = _stack.keys()
for _compound in _list_compounds:
_list_element = _stack[_compound]['elements']
stack_sigma[_compound] = {}
for _element in _list_element:
stack_sigma[_compound][_element] = {}
_list_isotopes = _stack[_compound][_element]['isotopes']['list']
_list_file_names = _stack[_compound][_element]['isotopes']['file_names']
_list_isotopic_ratio = _stack[_compound][_element]['isotopes']['isotopic_ratio']
_iso_file_ratio = zip(_list_isotopes, _list_file_names, _list_isotopic_ratio)
stack_sigma[_compound][_element]['isotopic_ratio'] = _list_isotopic_ratio
# _dict_sigma_isotopes_sum = {}
_sigma_all_isotopes = 0
_energy_all_isotopes = 0
for _iso, _file, _ratio in _iso_file_ratio:
stack_sigma[_compound][_element][_iso] = {}
# print(_iso, _file, _ratio)
if _compound in _utilities.h_bond_list:
if _iso == '1-H':
_utilities.is_element_in_database(element='H', database='Bonded_H')
_database_folder_h = os.path.join(_file_path, 'reference_data', 'Bonded_H')
sigma_file = os.path.join(_database_folder_h, 'H-{}.csv'.format(_compound))
if _compound == 'ZrH':
print("NOTICE:\n"
"Your entry {} contains bonded H, and has experimental data available.\n"
"Therefore, '1-H' cross-section has been replaced by the data "
"reported at https://t2.lanl.gov/nis/data/endf/endfvii-thermal.html".format(_compound))
else:
print("NOTICE:\n"
"Your entry {} contains bonded H, and has experimental data available.\n"
"Therefore, '1-H' cross-section has been replaced by the data "
"reported at https://doi.org/10.1103/PhysRev.76.1750".format(_compound))
else:
sigma_file = os.path.join(_database_folder, _file)
else:
sigma_file = os.path.join(_database_folder, _file)
_dict = _utilities.get_sigma(database_file_name=sigma_file,
e_min=self.energy_min,
e_max=self.energy_max,
e_step=self.energy_step)
stack_sigma[_compound][_element][_iso]['energy_eV'] = _dict['energy_eV']
stack_sigma[_compound][_element][_iso]['sigma_b'] = _dict['sigma_b'] * _ratio
stack_sigma[_compound][_element][_iso]['sigma_b_raw'] = _dict['sigma_b']
# sigma for all isotopes with their isotopic ratio
_sigma_all_isotopes += _dict['sigma_b'] * _ratio
_energy_all_isotopes += _dict['energy_eV']
# energy axis (x-axis) is averaged to take into account differences between x-axis of isotopes
_mean_energy_all_isotopes = _energy_all_isotopes / len(_list_isotopes)
stack_sigma[_compound][_element]['energy_eV'] = _mean_energy_all_isotopes
stack_sigma[_compound][_element]['sigma_b'] = _sigma_all_isotopes
self.stack_sigma = stack_sigma
def plot(self, y_axis='attenuation', x_axis='energy',
logx=False, logy=False,
mixed=True, all_layers=False, all_elements=False,
all_isotopes=False, items_to_plot=None,
time_unit='us', offset_us=0., source_to_detector_m=16.,
time_resolution_us=0.16, t_start_us=1,
plotly=False, ax_mpl=None,
fmt='-', ms='2', lw='1.5', alpha=1):
# offset delay values is normal 2.99 us with NONE actual MCP delay settings
"""display the transmission or attenuation of compound, element and/or isotopes specified
Parameters:
===========
:param x_axis: x type for export. Must be either ['energy'|'lambda'|'time'|'number']
:type x_axis: str
:param y_axis: y type for export. Must be either ['transmission'|'attenuation'|'sigma'|'sigma_raw'|'mu_per_cm']
:type y_axis: str
:param logx: True -> display x in log scale
:type logx: boolean.
:param logy: True -> display y in log scale
:type logy: boolean.
:param mixed: boolean. True -> display the total of each layer
False -> not displayed
:param all_layers: boolean. True -> display all layers
False -> not displayed
:param all_elements: boolean. True -> display all elements signal
False -> not displayed
:param all_isotopes: boolean. True -> display all isotopes signal
False -> not displayed
:param items_to_plot: array that describes what to plot
ex:
[['CoAg','Ag','107-Ag'], ['CoAg']]
if the dictionary is empty, everything is exported
:param time_unit: string. Must be either ['s'|'us'|'ns']
Note: this will be used only when x_axis='time'
:param offset_us: default: 0
Note: only used when x_axis='number' or 'time'
:param source_to_detector_m: Note: this will be used only when x_axis='number' or 'time'
:param time_resolution_us: Note: this will be used only when x_axis='number'
:param t_start_us: when is the first acquisition occurred. default: 1
Note: this will be used only when x_axis='number'
:param plotly: control to use plotly to display or not.
:type plotly: bool
:param ax_mpl: matplotlib.axes to plot against
:type ax_mpl: matplotlib.axes
:param fmt: matplotlib.axes.plot kwargs
:type fmt: str
:param ms: matplotlib.axes.plot kwargs
:type ms: float
:param lw: matplotlib.axes.plot kwargs
:type lw: float
:param alpha: matplotlib.axes.plot kwargs
:type alpha: float
"""
if x_axis not in _utilities.x_type_list:
raise ValueError("Please specify the x-axis type using one from '{}'.".format(_utilities.x_type_list))
if time_unit not in _utilities.time_unit_list:
raise ValueError("Please specify the time unit using one from '{}'.".format(_utilities.time_unit_list))
if y_axis not in _utilities.y_type_list:
raise ValueError("Please specify the y-axis type using one from '{}'.".format(_utilities.y_type_list))
# figure size
# plt.figure(figsize=(8, 8))
# stack from self
_stack_signal = self.stack_signal
_stack = self.stack
_stack_sigma = self.stack_sigma
_x_axis = self.total_signal['energy_eV']
x_axis_label = None
# Creating the matplotlib graph..
if ax_mpl is None:
fig_mpl, ax_mpl = plt.subplots()
"""X-axis"""
# determine values and labels for x-axis with options from
# 'energy(eV)' & 'lambda(A)' & 'time(us)' & 'image number(#)'
if x_axis == 'energy':
x_axis_label = 'Energy (eV)'
if x_axis == 'lambda':
x_axis_label = u"Wavelength (\u212B)"
_x_axis = _utilities.ev_to_angstroms(array=_x_axis)
if x_axis == 'time':
if time_unit == 's':
x_axis_label = 'Time (s)'
_x_axis = _utilities.ev_to_s(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us)
if time_unit == 'us':
x_axis_label = 'Time (us)'
_x_axis = 1e6 * _utilities.ev_to_s(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us)
if time_unit == 'ns':
x_axis_label = 'Time (ns)'
_x_axis = 1e9 * _utilities.ev_to_s(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us)
print("'{}' was obtained with the following:\nsource_to_detector_m={}\noffset_us={}"
.format(x_axis_label, source_to_detector_m, offset_us))
if x_axis == 'number':
x_axis_label = 'Image number (#)'
_x_axis = _utilities.ev_to_image_number(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us,
time_resolution_us=time_resolution_us,
t_start_us=t_start_us)
print("'{}' was obtained with the following:\nsource_to_detector_m={}\noffset_us={}\ntime_resolution_us={}"
.format(x_axis_label, source_to_detector_m, offset_us, time_resolution_us))
if x_axis_label is None:
raise ValueError("x_axis_label does NOT exist, please check.")
"""Y-axis"""
# determine to plot transmission or attenuation
# determine to put transmission or attenuation words for y-axis
y_axis_tag = y_axis
if y_axis == 'transmission':
y_axis_label = 'Neutron Transmission'
elif y_axis == 'attenuation':
y_axis_label = 'Neutron Attenuation'
elif y_axis == 'sigma':
y_axis_tag = 'sigma_b'
y_axis_label = 'Cross-section (barns)'
elif y_axis == 'sigma_raw':
y_axis_tag = 'sigma_b_raw'
y_axis_label = 'Cross-section (barns)'
else:
y_axis_tag = 'mu_per_cm'
y_axis_label = "Attenuation coefficient (cm\u207B\u00B9)"
if y_axis_tag[:5] == 'sigma':
mixed = False
all_layers = False
all_isotopes = True
print("'y_axis='sigma'' is selected. Auto force 'mixed=False', 'all_layers=False', 'all_isotopes=True'")
if y_axis_tag[-3:] == 'raw':
all_elements = False
print("'y_axis='sigma_raw'' is selected. Auto force 'all_elements=False'")
if y_axis_tag == 'mu_per_cm':
mixed = False
print("'y_axis='mu_per_cm'' is selected. Auto force 'mixed=False'")
# Plotting begins
if mixed:
_y_axis = self.total_signal[y_axis_tag]
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label="Total")
if all_layers:
for _compound in _stack.keys():
_y_axis = _stack_signal[_compound][y_axis_tag]
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label=_compound)
if all_elements:
for _compound in _stack.keys():
for _element in _stack[_compound]['elements']:
if y_axis_tag[:5] != 'sigma':
_y_axis = _stack_signal[_compound][_element][y_axis_tag]
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha,
label="{}/{}".format(_compound, _element))
else:
_y_axis = _stack_sigma[_compound][_element]['sigma_b']
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha,
label="{}/{}".format(_compound, _element))
if all_isotopes:
for _compound in _stack.keys():
for _element in _stack[_compound]['elements']:
for _isotope in _stack[_compound][_element]['isotopes']['list']:
if y_axis_tag[:5] != 'sigma':
_y_axis = _stack_signal[_compound][_element][_isotope][y_axis_tag]
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha,
label="{}/{}/{}".format(_compound, _element, _isotope))
else:
_y_axis = _stack_sigma[_compound][_element][_isotope][y_axis_tag]
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha,
label="{}/{}/{}".format(_compound, _element, _isotope))
"""Y-axis for specified items_to_plot"""
if items_to_plot is not None:
for _path_to_plot in items_to_plot:
_path_to_plot = list(_path_to_plot)
if y_axis_tag[:5] != 'sigma':
_live_path = _stack_signal
else:
_len_of_path = len(_path_to_plot)
if y_axis_tag[-3:] == 'raw':
if _len_of_path < 3:
raise ValueError("'y_axis={}' is not supported for layer or element levels '{}'.".format(
y_axis_tag, _path_to_plot[-1]))
else:
if _len_of_path < 2:
raise ValueError("'y_axis={}' is not supported for layer level '{}'.".format(
y_axis_tag, _path_to_plot[-1]))
_live_path = _stack_sigma
_label = "/".join(_path_to_plot)
while _path_to_plot:
_item = _path_to_plot.pop(0)
_live_path = _live_path[_item]
_y_axis = _live_path[y_axis_tag]
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label=_label)
if y_axis_tag[:5] != 'sigma' and y_axis_tag != 'mu_per_cm':
ax_mpl.set_ylim(-0.01, 1.01)
if logy is True:
ax_mpl.set_yscale('log')
if logx is True:
ax_mpl.set_xscale('log')
ax_mpl.set_xlabel(x_axis_label)
ax_mpl.set_ylabel(y_axis_label)
if not plotly:
ax_mpl.legend(loc='best')
# plt.tight_layout()
return ax_mpl
else:
fig_mpl = ax_mpl.get_figure()
plotly_fig = tls.mpl_to_plotly(fig_mpl)
plotly_fig.layout.showlegend = True
return plotly_fig
def export(self, output_type='df', filename=None, x_axis='energy', y_axis='attenuation', mixed=True,
all_layers=False, all_elements=False, all_isotopes=False, items_to_export=None,
offset_us=0., source_to_detector_m=16.,
t_start_us=1, time_resolution_us=0.16, time_unit='us'):
"""
output x and y values to clipboard or .csv file
output the transmission or attenuation or sigma of compound, element and/or isotopes specified
'sigma_b' exported for each isotope is the product resulted from (sigma * isotopic ratio)
'atoms_per_cm3' of each element is also exported in 'sigma' mode based on molar mass within stack.
:param output_type: export type : ['df', 'csv', 'clip']
:type output_type: str
:param mixed: True -> display the total of each layer
False -> not displayed
:type mixed: boolean
:param filename: string. filename (with .csv suffix) you would like to save as
None -> export to clipboard
:type filename: string
:param x_axis: string. x type for export. Must in ['energy', 'lambda', 'time', 'number']
:param y_axis: string. y type for export. Must in ['transmission', 'attenuation', 'sigma', 'sigma_raw', 'mu_per_cm']
:param all_layers: boolean. True -> export all layers
False -> not export
:param all_elements: boolean. True -> export all elements signal
False -> not export
:param all_isotopes: boolean. True -> export all isotopes signal
False -> not export
:param items_to_export: array that describes what to export
ex:
[['CoAg','Ag','107-Ag'], ['CoAg']]
if the dictionary is empty, everything is exported
:param time_unit: string. Must be either 's' or 'us' or 'ns'
Note: this will be used only when x_axis='time'
:param offset_us: default: 0
Note: only used when x_axis='number' or 'time'
:param source_to_detector_m: Note: this will be used only when x_axis='number' or 'time'
:param time_resolution_us: Note: this will be used only when x_axis='number'
:param t_start_us: when is the first acquisition occurred. default: 1
Note: this will be used only when x_axis='number'
:return: simulated resonance signals or sigma in the form of 'clipboard' or '.csv file' or 'pd.DataFrame'
"""
if x_axis not in _utilities.x_type_list:
raise ValueError("Please specify the x-axis type using one from '{}'.".format(_utilities.x_type_list))
if time_unit not in _utilities.time_unit_list:
raise ValueError("Please specify the time unit using one from '{}'.".format(_utilities.time_unit_list))
if y_axis not in _utilities.y_type_list:
raise ValueError("Please specify the y-axis type using one from '{}'.".format(_utilities.y_type_list))
if output_type not in _utilities.export_type_list:
raise ValueError("Please specify export type using one from '{}'.".format(_utilities.export_type_list))
# stack from self
_stack_signal = self.stack_signal
_stack = self.stack
_x_axis = self.total_signal['energy_eV']
x_axis_label = None
df = pd.DataFrame()
"""X-axis"""
# determine values and labels for x-axis with options from
# 'energy(eV)' & 'lambda(A)' & 'time(us)' & 'image number(#)'
if x_axis == 'energy':
x_axis_label = 'Energy (eV)'
if x_axis == 'lambda':
x_axis_label = u"Wavelength (\u212B)"
_x_axis = _utilities.ev_to_angstroms(array=_x_axis)
if x_axis == 'time':
if time_unit == 's':
x_axis_label = 'Time (s)'
_x_axis = _utilities.ev_to_s(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us)
if time_unit == 'us':
x_axis_label = 'Time (us)'
_x_axis = 1e6 * _utilities.ev_to_s(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us)
if time_unit == 'ns':
x_axis_label = 'Time (ns)'
_x_axis = 1e9 * _utilities.ev_to_s(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us)
print("'{}' was obtained with the following:\nsource_to_detector_m={}\noffset_us={}"
.format(x_axis_label, source_to_detector_m, offset_us))
if x_axis == 'number':
x_axis_label = 'Image number (#)'
_x_axis = _utilities.ev_to_image_number(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us,
time_resolution_us=time_resolution_us,
t_start_us=t_start_us)
print("'{}' was obtained with the following:\nsource_to_detector_m={}\noffset_us={}\ntime_resolution_us={}"
.format(x_axis_label, source_to_detector_m, offset_us, time_resolution_us))
if x_axis_label is None:
raise ValueError("x_axis_label does NOT exist, please check.")
df[x_axis_label] = _x_axis
"""Y-axis"""
if y_axis[:5] != 'sigma':
# export transmission or attenuation or mu_per_cm
y_axis_tag = y_axis
if y_axis_tag == 'mu_per_cm':
mixed = False
print("'y_axis='mu_per_cm'' is selected. Auto force 'mixed=False'")
if mixed:
_y_axis = self.total_signal[y_axis_tag]
df['Total_' + y_axis_tag] = _y_axis
if items_to_export is None:
# export based on specified level : layer|element|isotope
if all_layers:
for _compound in _stack.keys():
_y_axis = _stack_signal[_compound][y_axis_tag]
df[_compound] = _y_axis
if all_elements:
for _compound in _stack.keys():
for _element in _stack[_compound]['elements']:
_y_axis = _stack_signal[_compound][_element][y_axis_tag]
df[_compound + '/' + _element] = _y_axis
if all_isotopes:
for _compound in _stack.keys():
for _element in _stack[_compound]['elements']:
for _isotope in _stack[_compound][_element]['isotopes']['list']:
_y_axis = _stack_signal[_compound][_element][_isotope][y_axis_tag]
df[_compound + '/' + _element + '/' + _isotope] = _y_axis
else:
# export specified transmission or attenuation
for _path_to_export in items_to_export:
_path_to_export = list(_path_to_export)
_live_path = _stack_signal
_label = "/".join(_path_to_export)
while _path_to_export:
_item = _path_to_export.pop(0)
_live_path = _live_path[_item]
_y_axis = _live_path[y_axis_tag]
df[_label] = _y_axis
else:
# export sigma
if y_axis == 'sigma':
y_axis_tag = 'sigma_b'
else:
y_axis_tag = 'sigma_b_raw'
# y_axis_tag = 'sigma_b_raw'
_stack_sigma = self.stack_sigma
if items_to_export is None:
for _compound in _stack.keys():
for _element in _stack[_compound]['elements']:
_y_axis = _stack_sigma[_compound][_element]['sigma_b'] # No 'sigma_b_raw' at this level
df[_compound + '/' + _element + '/atoms_per_cm3'] = _stack[_compound][_element]['atoms_per_cm3']
df[_compound + '/' + _element] = _y_axis
if all_isotopes:
for _isotope in _stack[_compound][_element]['isotopes']['list']:
_y_axis = _stack_sigma[_compound][_element][_isotope][y_axis_tag]
df[_compound + '/' + _element + '/' + _isotope] = _y_axis
else:
# export specified sigma
for _path_to_export in items_to_export:
if y_axis_tag[-3:] == 'raw':
if len(_path_to_export) < 3:
raise ValueError(
"Getting raw sigma of '{}' at layer or element level is not supported. "
"If it is a single element layer, please follow "
"['layer', 'element', 'isotope'] format.".format(_path_to_export[0]))
else:
if len(_path_to_export) < 2:
raise ValueError(
"Getting weighted sigma of '{}' at layer level is not supported. "
"If it is a single element layer, please follow "
"['layer', 'element'] format.".format(_path_to_export[0]))
_path_to_export = list(_path_to_export)
_live_path = _stack_sigma
_label = "/".join(_path_to_export)
while _path_to_export:
_item = _path_to_export.pop(0)
_live_path = _live_path[_item]
_y_axis = _live_path[y_axis_tag]
df[_label] = _y_axis
if len(df.columns) <= 1:
raise ValueError("No y values have been selected to export!")
if output_type == 'csv':
if filename is None:
filename = 'data.csv'
if '.csv' not in filename:
filename += '.csv'
df.to_csv(filename, index=False)
print("Exporting to file ('./{}') completed.".format(filename))
elif output_type == 'clip':
df.to_clipboard(excel=True, index=False)
print('Exporting to clipboard completed.')
else: # output_type == 'df'
return df
| [
"ImagingReso._utilities.calculate_transmission",
"ImagingReso._utilities.get_compound_density",
"ImagingReso._utilities.formula_to_dictionary",
"ImagingReso._utilities.is_element_in_database",
"json.dumps",
"plotly.tools.mpl_to_plotly",
"ImagingReso._utilities.ev_to_s",
"ImagingReso._utilities.ev_to_i... | [((3806, 3838), 'json.dumps', 'json.dumps', (['self.stack'], {'indent': '(4)'}), '(self.stack, indent=4)\n', (3816, 3838), False, 'import json\n'), ((3985, 4017), 'json.dumps', 'json.dumps', (['self.stack'], {'indent': '(4)'}), '(self.stack, indent=4)\n', (3995, 4017), False, 'import json\n'), ((4532, 4647), 'ImagingReso._utilities.formula_to_dictionary', '_utilities.formula_to_dictionary', ([], {'formula': 'formula', 'thickness': 'thickness', 'density': 'density', 'database': 'self.database'}), '(formula=formula, thickness=thickness,\n density=density, database=self.database)\n', (4564, 4647), False, 'from ImagingReso import _utilities\n'), ((8896, 8953), 'ImagingReso._utilities.check_iso_ratios', '_utilities.check_iso_ratios', ([], {'ratios': 'list_ratio', 'tol': '(0.005)'}), '(ratios=list_ratio, tol=0.005)\n', (8923, 8953), False, 'from ImagingReso import _utilities\n'), ((22098, 22155), 'os.path.join', 'os.path.join', (['_file_path', '"""reference_data"""', 'self.database'], {}), "(_file_path, 'reference_data', self.database)\n", (22110, 22155), False, 'import os\n'), ((40431, 40445), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (40443, 40445), True, 'import pandas as pd\n'), ((3254, 3316), 'ImagingReso._utilities.checking_stack', '_utilities.checking_stack', ([], {'stack': 'stack', 'database': 'self.database'}), '(stack=stack, database=self.database)\n', (3279, 3316), False, 'from ImagingReso import _utilities\n'), ((12291, 12309), 'numpy.isnan', 'np.isnan', (['_density'], {}), '(_density)\n', (12299, 12309), True, 'import numpy as np\n'), ((13009, 13175), 'ImagingReso._utilities.set_distance_units', '_utilities.set_distance_units', ([], {'value': "stack[_name_of_compound]['thickness']['value']", 'from_units': "stack[_name_of_compound]['thickness']['units']", 'to_units': '"""cm"""'}), "(value=stack[_name_of_compound]['thickness'][\n 'value'], from_units=stack[_name_of_compound]['thickness']['units'],\n to_units='cm')\n", (13038, 13175), False, 'from ImagingReso import _utilities\n'), ((16572, 16649), 'ImagingReso._utilities.get_atoms_per_cm3_of_layer', '_utilities.get_atoms_per_cm3_of_layer', ([], {'compound_dict': 'stack[_name_of_compound]'}), '(compound_dict=stack[_name_of_compound])\n', (16609, 16649), False, 'from ImagingReso import _utilities\n'), ((22044, 22069), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (22059, 22069), False, 'import os\n'), ((29548, 29562), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (29560, 29562), True, 'import matplotlib.pyplot as plt\n'), ((29897, 29938), 'ImagingReso._utilities.ev_to_angstroms', '_utilities.ev_to_angstroms', ([], {'array': '_x_axis'}), '(array=_x_axis)\n', (29923, 29938), False, 'from ImagingReso import _utilities\n'), ((31146, 31326), 'ImagingReso._utilities.ev_to_image_number', '_utilities.ev_to_image_number', ([], {'array': '_x_axis', 'source_to_detector_m': 'source_to_detector_m', 'offset_us': 'offset_us', 'time_resolution_us': 'time_resolution_us', 't_start_us': 't_start_us'}), '(array=_x_axis, source_to_detector_m=\n source_to_detector_m, offset_us=offset_us, time_resolution_us=\n time_resolution_us, t_start_us=t_start_us)\n', (31175, 31326), False, 'from ImagingReso import _utilities\n'), ((36860, 36886), 'plotly.tools.mpl_to_plotly', 'tls.mpl_to_plotly', (['fig_mpl'], {}), '(fig_mpl)\n', (36877, 36886), True, 'import plotly.tools as tls\n'), ((40780, 40821), 'ImagingReso._utilities.ev_to_angstroms', '_utilities.ev_to_angstroms', ([], {'array': '_x_axis'}), '(array=_x_axis)\n', (40806, 40821), False, 'from ImagingReso import _utilities\n'), ((42029, 42209), 'ImagingReso._utilities.ev_to_image_number', '_utilities.ev_to_image_number', ([], {'array': '_x_axis', 'source_to_detector_m': 'source_to_detector_m', 'offset_us': 'offset_us', 'time_resolution_us': 'time_resolution_us', 't_start_us': 't_start_us'}), '(array=_x_axis, source_to_detector_m=\n source_to_detector_m, offset_us=offset_us, time_resolution_us=\n time_resolution_us, t_start_us=t_start_us)\n', (42058, 42209), False, 'from ImagingReso import _utilities\n'), ((14638, 14754), 'ImagingReso._utilities.calculate_transmission', '_utilities.calculate_transmission', ([], {'thickness_cm': '_thickness_cm', 'atoms_per_cm3': '_atoms_per_cm3', 'sigma_b': '_sigma_ele'}), '(thickness_cm=_thickness_cm, atoms_per_cm3\n =_atoms_per_cm3, sigma_b=_sigma_ele)\n', (14671, 14754), False, 'from ImagingReso import _utilities\n'), ((18294, 18364), 'ImagingReso._utilities.get_isotope_dicts', '_utilities.get_isotope_dicts', ([], {'element': '_element', 'database': 'self.database'}), '(element=_element, database=self.database)\n', (18322, 18364), False, 'from ImagingReso import _utilities\n'), ((19048, 19136), 'ImagingReso._utilities.get_compound_density', '_utilities.get_compound_density', ([], {'list_density': '_list_density', 'list_ratio': '_list_ratio'}), '(list_density=_list_density, list_ratio=\n _list_ratio)\n', (19079, 19136), False, 'from ImagingReso import _utilities\n'), ((20001, 20017), 'numpy.float', 'np.float', (['_ratio'], {}), '(_ratio)\n', (20009, 20017), True, 'import numpy as np\n'), ((20020, 20038), 'numpy.float', 'np.float', (['_density'], {}), '(_density)\n', (20028, 20038), True, 'import numpy as np\n'), ((21669, 21685), 'numpy.float', 'np.float', (['_ratio'], {}), '(_ratio)\n', (21677, 21685), True, 'import numpy as np\n'), ((21688, 21703), 'numpy.float', 'np.float', (['_mass'], {}), '(_mass)\n', (21696, 21703), True, 'import numpy as np\n'), ((30069, 30170), 'ImagingReso._utilities.ev_to_s', '_utilities.ev_to_s', ([], {'array': '_x_axis', 'source_to_detector_m': 'source_to_detector_m', 'offset_us': 'offset_us'}), '(array=_x_axis, source_to_detector_m=source_to_detector_m,\n offset_us=offset_us)\n', (30087, 30170), False, 'from ImagingReso import _utilities\n'), ((40952, 41053), 'ImagingReso._utilities.ev_to_s', '_utilities.ev_to_s', ([], {'array': '_x_axis', 'source_to_detector_m': 'source_to_detector_m', 'offset_us': 'offset_us'}), '(array=_x_axis, source_to_detector_m=source_to_detector_m,\n offset_us=offset_us)\n', (40970, 41053), False, 'from ImagingReso import _utilities\n'), ((13840, 13956), 'ImagingReso._utilities.calculate_transmission', '_utilities.calculate_transmission', ([], {'thickness_cm': '_thickness_cm', 'atoms_per_cm3': '_atoms_per_cm3', 'sigma_b': '_sigma_iso'}), '(thickness_cm=_thickness_cm, atoms_per_cm3\n =_atoms_per_cm3, sigma_b=_sigma_iso)\n', (13873, 13956), False, 'from ImagingReso import _utilities\n'), ((24683, 24809), 'ImagingReso._utilities.get_sigma', '_utilities.get_sigma', ([], {'database_file_name': 'sigma_file', 'e_min': 'self.energy_min', 'e_max': 'self.energy_max', 'e_step': 'self.energy_step'}), '(database_file_name=sigma_file, e_min=self.energy_min,\n e_max=self.energy_max, e_step=self.energy_step)\n', (24703, 24809), False, 'from ImagingReso import _utilities\n'), ((30366, 30467), 'ImagingReso._utilities.ev_to_s', '_utilities.ev_to_s', ([], {'array': '_x_axis', 'source_to_detector_m': 'source_to_detector_m', 'offset_us': 'offset_us'}), '(array=_x_axis, source_to_detector_m=source_to_detector_m,\n offset_us=offset_us)\n', (30384, 30467), False, 'from ImagingReso import _utilities\n'), ((30675, 30776), 'ImagingReso._utilities.ev_to_s', '_utilities.ev_to_s', ([], {'array': '_x_axis', 'source_to_detector_m': 'source_to_detector_m', 'offset_us': 'offset_us'}), '(array=_x_axis, source_to_detector_m=source_to_detector_m,\n offset_us=offset_us)\n', (30693, 30776), False, 'from ImagingReso import _utilities\n'), ((41249, 41350), 'ImagingReso._utilities.ev_to_s', '_utilities.ev_to_s', ([], {'array': '_x_axis', 'source_to_detector_m': 'source_to_detector_m', 'offset_us': 'offset_us'}), '(array=_x_axis, source_to_detector_m=source_to_detector_m,\n offset_us=offset_us)\n', (41267, 41350), False, 'from ImagingReso import _utilities\n'), ((41558, 41659), 'ImagingReso._utilities.ev_to_s', '_utilities.ev_to_s', ([], {'array': '_x_axis', 'source_to_detector_m': 'source_to_detector_m', 'offset_us': 'offset_us'}), '(array=_x_axis, source_to_detector_m=source_to_detector_m,\n offset_us=offset_us)\n', (41576, 41659), False, 'from ImagingReso import _utilities\n'), ((24617, 24654), 'os.path.join', 'os.path.join', (['_database_folder', '_file'], {}), '(_database_folder, _file)\n', (24629, 24654), False, 'import os\n'), ((23319, 23386), 'ImagingReso._utilities.is_element_in_database', '_utilities.is_element_in_database', ([], {'element': '"""H"""', 'database': '"""Bonded_H"""'}), "(element='H', database='Bonded_H')\n", (23352, 23386), False, 'from ImagingReso import _utilities\n'), ((23436, 23490), 'os.path.join', 'os.path.join', (['_file_path', '"""reference_data"""', '"""Bonded_H"""'], {}), "(_file_path, 'reference_data', 'Bonded_H')\n", (23448, 23490), False, 'import os\n'), ((24516, 24553), 'os.path.join', 'os.path.join', (['_database_folder', '_file'], {}), '(_database_folder, _file)\n', (24528, 24553), False, 'import os\n')] |
import unittest
import numpy as np
import torch
from autoagent.models.rl.net import create_dense_net
from autoagent.models.rl.policy import CategoricalPolicy, GaussianPolicy, SquashedGaussianPolicy
class TestPolicy(unittest.TestCase):
def test_categorical_policy(self):
state_sizes = [np.random.randint(1, 20) for _ in range(5)]
action_sizes = [np.random.randint(1, 10) for _ in range(5)]
for s_size, a_size in zip(state_sizes, action_sizes):
d = create_dense_net(
input_size=s_size,
output_size=a_size
)
policy = CategoricalPolicy(d)
for _ in range(5):
s = torch.randn(size=(1, s_size))
a, log_p, _= policy(s, get_log_p=True)
np.testing.assert_almost_equal(
log_p.detach().numpy(),
policy.log_p(s, a)[0].detach().numpy()
)
def test_gaussian_policy(self):
state_sizes = [np.random.randint(1, 20) for _ in range(5)]
action_sizes = [np.random.randint(1, 10) for _ in range(5)]
for s_size, a_size in zip(state_sizes, action_sizes):
d = create_dense_net(
input_size=s_size,
output_size=a_size
)
policy = GaussianPolicy(d, a_size)
for _ in range(5):
s = torch.randn(size=(1, s_size))
a, log_p, _ = policy(s, get_log_p=True)
np.testing.assert_almost_equal(
log_p.detach().numpy(),
policy.log_p(s, a)[0].detach().numpy()
)
def test_squashed_gaussian_policy(self):
state_sizes = [np.random.randint(1, 20) for _ in range(5)]
action_sizes = [np.random.randint(1, 10) for _ in range(5)]
for s_size, a_size in zip(state_sizes, action_sizes):
d = create_dense_net(
input_size=s_size,
output_size=a_size*2
)
policy = SquashedGaussianPolicy(d, action_limit=np.random.randint(1, 10, a_size))
for _ in range(5):
s = torch.randn(size=(1, s_size))
a, log_p, _= policy(s, get_log_p=True)
np.testing.assert_almost_equal(
log_p.detach().numpy(),
policy.log_p(s, a)[0].detach().numpy(),
decimal=2
) | [
"autoagent.models.rl.net.create_dense_net",
"autoagent.models.rl.policy.GaussianPolicy",
"numpy.random.randint",
"autoagent.models.rl.policy.CategoricalPolicy",
"torch.randn"
] | [((301, 325), 'numpy.random.randint', 'np.random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (318, 325), True, 'import numpy as np\n'), ((369, 393), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (386, 393), True, 'import numpy as np\n'), ((492, 547), 'autoagent.models.rl.net.create_dense_net', 'create_dense_net', ([], {'input_size': 's_size', 'output_size': 'a_size'}), '(input_size=s_size, output_size=a_size)\n', (508, 547), False, 'from autoagent.models.rl.net import create_dense_net\n'), ((616, 636), 'autoagent.models.rl.policy.CategoricalPolicy', 'CategoricalPolicy', (['d'], {}), '(d)\n', (633, 636), False, 'from autoagent.models.rl.policy import CategoricalPolicy, GaussianPolicy, SquashedGaussianPolicy\n'), ((1002, 1026), 'numpy.random.randint', 'np.random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (1019, 1026), True, 'import numpy as np\n'), ((1070, 1094), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (1087, 1094), True, 'import numpy as np\n'), ((1193, 1248), 'autoagent.models.rl.net.create_dense_net', 'create_dense_net', ([], {'input_size': 's_size', 'output_size': 'a_size'}), '(input_size=s_size, output_size=a_size)\n', (1209, 1248), False, 'from autoagent.models.rl.net import create_dense_net\n'), ((1317, 1342), 'autoagent.models.rl.policy.GaussianPolicy', 'GaussianPolicy', (['d', 'a_size'], {}), '(d, a_size)\n', (1331, 1342), False, 'from autoagent.models.rl.policy import CategoricalPolicy, GaussianPolicy, SquashedGaussianPolicy\n'), ((1718, 1742), 'numpy.random.randint', 'np.random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (1735, 1742), True, 'import numpy as np\n'), ((1786, 1810), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (1803, 1810), True, 'import numpy as np\n'), ((1909, 1968), 'autoagent.models.rl.net.create_dense_net', 'create_dense_net', ([], {'input_size': 's_size', 'output_size': '(a_size * 2)'}), '(input_size=s_size, output_size=a_size * 2)\n', (1925, 1968), False, 'from autoagent.models.rl.net import create_dense_net\n'), ((688, 717), 'torch.randn', 'torch.randn', ([], {'size': '(1, s_size)'}), '(size=(1, s_size))\n', (699, 717), False, 'import torch\n'), ((1394, 1423), 'torch.randn', 'torch.randn', ([], {'size': '(1, s_size)'}), '(size=(1, s_size))\n', (1405, 1423), False, 'import torch\n'), ((2159, 2188), 'torch.randn', 'torch.randn', ([], {'size': '(1, s_size)'}), '(size=(1, s_size))\n', (2170, 2188), False, 'import torch\n'), ((2074, 2106), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)', 'a_size'], {}), '(1, 10, a_size)\n', (2091, 2106), True, 'import numpy as np\n')] |
#!/usr/bin/python3
"""
October 25, 2018
Author: <NAME>
TOF_Analyzer_DRS4.py
This program takes data from a DRS4 data file and computes:
Per Channel:
Pulse height distributions
Rise/Fall Times based on polarity
A Combination of possible Time of Flights to establish best two detectors.
Future work:
- Multiple peaks per events
- more configuration settings?
"""
import sys
import subprocess
import timeFinder as tf
print(sys.version_info[1])
if sys.version_info[0] < 3 and sys.version_info[1] < 7:
raise Exception("Must be using Python 3.7!!!!")
try:
import pip
except ImportError:
print("Pip not present on system! Installing Pip...")
try:
subprocess.call([sys.executable,'-m','ensurepip','--default-pip'],shell =True)
subprocess.call([sys.executable, "easy_install", "python3-pip"],shell =True)
except:
print('Could not install pip automatically, please install pip manually by typing "easy_install pip" into your terminal application')
def install_and_import(package):
import importlib
try:
importlib.import_module(package)
except:
subprocess.call([sys.executable, "-m", "pip", "install", package])
finally:
globals()[package] = importlib.import_module(package)
install_and_import('matplotlib')
install_and_import('numpy')
install_and_import('scipy')
install_and_import('uncertainties')
install_and_import('pandas')
install_and_import('lmfit')
install_and_import('tqdm')
from itertools import combinations
from tqdm import tqdm
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import struct
import array
import pandas as pd
from math import *
#from scipy.stats import poisson
try:
import Tkinter as tk # this is for python2
except:
import tkinter as tk # this is for python3
#from tkinter.filedialog import askopenfilename,askopenfilenames
from tkfilebrowser import askopenfilenames
import os
import scipy.signal as scisig
from drs4 import DRS4BinaryFile
from scipy import stats
from time import sleep
from matplotlib.ticker import EngFormatter
from scipy.optimize import curve_fit,least_squares
from scipy.misc import factorial
from scipy.optimize import minimize
from lmfit.models import GaussianModel,Model
# Print iterations progress
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def poisson(p,k):
lamb,amp=p[0],p[1]
lnl = amp*(lamb**(k))/factorial(k) * np.exp(-lamb)
return lnl
def PEModel(x,A,sigma,gain,mu,n):
prob = np.exp(-mu)*mu**n/factorial(n)
print(A,sigma,gain,mu,n)
return A*prob*[1/np.sqrt(2*np.pi*sigma)*np.exp((x-n*gain)**2/(2*sigma**2))]
def poissonMinimizer(p,k,Y):
lamb,amp=p[0],p[1]
lnl = amp*(lamb**(k))/factorial(k) * np.exp(-lamb)-Y
return np.log(lnl**2)
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '*'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
def weighted_avg_and_std(values, weights):
"""
Return the weighted average and standard deviation.
values, weights -- Numpy ndarrays with the same shape.
"""
values = np.array(values)
values = values[np.isfinite(values)]
average = np.nanmean(values)
# Fast and numerically precise:
std = np.nanstd(values)
return [np.round(average,3), np.round(std,3)]
def Interpolator(X, Y, TimeleftIndex, TimeRightIndex,YValue):
"""
Interpolate exact time Y == YValue using 4 points around closest data point to YValue
Returns a tuple with time and error associated.
"""
Y1 = Y[TimeleftIndex]
Y2 = Y[TimeRightIndex]
X2 = X[TimeRightIndex]
X1 = X[TimeleftIndex]
slope = (Y2 - Y1) / (X2 - X1)
if slope != 0:
X0 = (YValue - Y1) / slope + X1
return X0
else:
return 0
def filterData(Y):
return scisig.savgol_filter(x=Y, window_length=51, polyorder=11)
def gauss(x, *p):
A, mu, sigma = p
return A*np.exp(-(x-mu)**2/(2.*sigma**2))
def gaussMinimizer(p,x,Y):
A, mu, sigma = p[0],p[1],p[2]
return np.log((A*np.exp(-(x-mu)**2/(2.*sigma**2))-Y)**2+1)
def hifinderScipy(p):
#p = filterData(Y)
NoiseSigma = 7
baseline = np.mean(p[:50])
noiserms = np.std(p[:50])
hitStartIndexList = []
hitEndIndexList = []
hitPeakAmplitude = []
hitPeakIndexArray = []
Max = abs(max(p[100:900]))
Min = abs(min(p[100:900]))
#print(baseline - (baseline-NoiseSigma*noiserms))
if Max > Min and max(p[100:900]) > baseline + NoiseSigma*noiserms:
peaks, properties = scipy.signal.find_peaks(p, prominence=.005, width=6,height = baseline - (baseline-NoiseSigma*noiserms))
elif Min > Max and min(p[100:900]) < baseline - NoiseSigma*noiserms:
peaks, properties = scipy.signal.find_peaks(-p, prominence=.005, width=6,height = baseline - (baseline-NoiseSigma*noiserms))
else:
peaks, properties = [],{'widths':[]}
for (peak,width) in zip(peaks,properties['widths']):
hitAmplitude = p[peak]
#ThresholdADC = baseline - (.3 * (baseline - hitAmplitude))
hitEndIndex = peak + int(width)
hitStartIndex = peak - int(width)
if abs(hitAmplitude) < 500 and hitStartIndex != 0 and hitEndIndex !=0 and peak !=0 and hitEndIndex < 1023 and peak < hitEndIndex and peak > hitStartIndex and peak - int(width) > 100 and peak + int(width) < 900:
if eventNumber % SubDivider == 0:
PersistanceData.append(Data)
PersistanceTime.append(np.arange(0,1024)*.2)
hitStartIndexList = np.append(hitStartIndexList, hitStartIndex)
hitEndIndexList = np.append(hitEndIndexList,hitEndIndex)
hitPeakAmplitude = np.append(hitPeakAmplitude, hitAmplitude)
hitPeakIndexArray = np.append(hitPeakIndexArray, peak)
return [[int(x) for x in hitStartIndexList], hitPeakAmplitude, [int(x) for x in hitPeakIndexArray],[int(x) for x in hitEndIndexList], baseline, noiserms]
def hitfinder(Y):
#noiserms = np.std(Y[:50])**2
p = Y# scisig.savgol_filter(x=Y, window_length=25, polyorder=5)
NoiseSigma = 3
baseline = np.mean(Y[:50])
noiserms = np.std(Y[:50])
durationTheshold=5
adjDurationThreshold=5
#plt.plot(Y)
#plt.show()
#print(baseline,NoiseSigma,abs(baseline) + NoiseSigma * noiserms)
p_diff = np.diff(p)
#1mV per tick = .001
#2mV per tick = .002
#etc...
if abs(min(Y)) > abs(max(Y)):
hitLogic = np.array([(True if pi < baseline - NoiseSigma * noiserms else False) for pi in Y])
else:
hitLogic = np.array([(True if pi > baseline + NoiseSigma * noiserms else False) for pi in Y])
for i in range(1, np.size(hitLogic)):
if ((not hitLogic[i - 1]) and hitLogic[i]) and hitLogic[i]:
countDuration = 0
for j in range(i, np.size(hitLogic) - 1):
if hitLogic[j]:
countDuration = countDuration + 1
if not hitLogic[j + 1]:
break
if countDuration < durationTheshold:
for j in range(i, i + countDuration):
hitLogic[j] = False
for i in range(1, np.size(hitLogic)):
if (hitLogic[i - 1] and (not hitLogic[i])) and (not hitLogic[i]):
countDuration = 0
for j in range(i, np.size(hitLogic) - 1):
if (not hitLogic[j]):
countDuration = countDuration + 1
if hitLogic[j + 1]:
break
if countDuration < adjDurationThreshold:
for j in range(i, i + countDuration):
hitLogic[j] = True
hitStartIndexList = []
hitEndIndexList = []
hitPeakAmplitude = []
hitPeakIndexArray = []
hitEndIndex = 0
hitStartIndex = 0
hitPeakIndex = 0
eventNumber = 0
global SubDivider
global PersistanceData
global PersistanceTime
for i in range(1, np.size(hitLogic)):
if ((not hitLogic[i - 1]) and hitLogic[i]) and hitLogic[i]:
hitAmplitude = 0
hitPeakIndex = i
for j in range(i, np.size(hitLogic) - 1):
if abs(p[j]) > abs(hitAmplitude):
hitAmplitude = p[j]
hitPeakIndex = j
if not hitLogic[j + 1]:
break
ThresholdADC = baseline - (.3 * (baseline - hitAmplitude))
hitStartIndex = i
for j in range(hitPeakIndex, 0, -1):
if (abs(p[j]) >= abs(ThresholdADC) and abs(p[j - 1]) < abs(ThresholdADC)):
hitStartIndex = int(j)
break
for j in range(hitPeakIndex, np.size(hitLogic) - 1, 1):
if not hitLogic[j]:
hitEndIndex = int(j)
break
#print(hitStartIndex,hitEndIndex,hitPeakIndex,hitAmplitude)
#print(bool(hitStartIndex-hitEndIndex > 3))
if abs(hitEndIndex-hitStartIndex) > 10 and abs(hitAmplitude) < .5 and hitStartIndex != 0 and hitEndIndex !=0 and hitPeakIndex !=0 and hitEndIndex < 1023 and hitPeakIndex < hitEndIndex and hitPeakIndex > hitStartIndex:
if eventNumber % SubDivider == 0:
PersistanceData.append(Data)
PersistanceTime.append(np.arange(0,1024)*.2)
hitStartIndexList = np.append(hitStartIndexList, hitStartIndex)
hitEndIndexList = np.append(hitEndIndexList,hitEndIndex)
hitPeakAmplitude = np.append(hitPeakAmplitude, hitAmplitude)
hitPeakIndexArray = np.append(hitPeakIndexArray, hitPeakIndex)
i = hitEndIndex
#print([hitStartIndexList, hitPeakAmplitude, hitPeakIndexArray, baseline, NoiseSigma])
if len(hitPeakAmplitude) > 1:
minpeak = min(hitPeakAmplitude)
maxpeak = max(hitPeakAmplitude)
if abs(maxpeak) > abs(minpeak):
Indexes = np.nonzero(hitPeakAmplitude > 0)
else:
Indexes = np.nonzero(hitPeakAmplitude < 0)
hitStartIndexList = hitStartIndexList[Indexes]
hitEndIndexList = hitEndIndexList[Indexes]
hitPeakAmplitude = hitPeakAmplitude[Indexes]
hitPeakIndexArray = hitPeakIndexArray[Indexes]
return [[int(x) for x in hitStartIndexList], hitPeakAmplitude, [int(x) for x in hitPeakIndexArray],[int(x) for x in hitEndIndexList], baseline, noiserms]
def RisetimeFinder(X, Y,startIndex,peakIndex,baseline):
"""
Find peak location for proper interpolotion
Returns a returns time that waveform hit 40% of peak value
"""
# Channel1Data is from first TOF
# Channel2Data is from second TOF
hitAmplitude = Y[peakIndex]
UpperThreshold = baseline - (.7 * (baseline - hitAmplitude))
LowerThreshold = baseline - (.3 * (baseline - hitAmplitude))
riseTimestart = 0
riseTimeend = 0
riseIndex = 0
fallIndex = 0
diffs = Y[startIndex:peakIndex]-UpperThreshold
value = np.min(abs(diffs))
noiserms = np.std(Y[:50])*5
YStart = Y[startIndex]
YSign =np.sign(Y[startIndex])
#print(value,diffs)
#print(np.where(value == abs(diffs))[0][0])
riseIndex = int(np.where(value == abs(diffs))[0][0]) + startIndex
diffs = Y[startIndex:peakIndex]-LowerThreshold
value = np.min(abs(diffs))
fallIndex = int(np.where(value == abs(diffs))[0][0]) + startIndex
riseTimestart = Interpolator(X, Y, riseIndex-1,riseIndex+1,UpperThreshold)
riseTimeend = Interpolator(X, Y, fallIndex-1,fallIndex+1,LowerThreshold)
#print(UpperThreshold,LowerThreshold)
result = dict()
result['risetime'] = riseTimestart-riseTimeend
result['starttime'] = riseTimeend
if riseTimestart < X[startIndex] or riseTimestart > X[EndIndex] or riseTimeend < X[startIndex] or riseTimeend > X[EndIndex]:
result['risetime']= False
if riseTimestart - riseTimeend > (X[EndIndex] - X[startIndex]):
result['risetime']= False
if riseTimestart - riseTimeend <= 0:
result['risetime']= False
if riseIndex == 0 or fallIndex ==0:
result['risetime']= False
if YSign > 0:
if(YStart > baseline + noiserms):
result['risetime']= False
if YSign < 0:
if(YStart < baseline - noiserms):
result['risetime']= False
if len(np.unique(np.sign(np.diff(Y[fallIndex:startIndex])))) > 1:
result['risetime']= False
return result
def Lowpass(Y):
CutoffFreq = 5000
pedestle = list(Y[:50])
pedestle.extend(np.zeros(len(Y)-50))
fftpedestle= scipy.fft(pedestle)# (G) and (H)
fft= scipy.fft(Y)
newfft = fft-fftpedestle
bp=newfft[:]
# for i in range(len(bp)): # (H-red)
# if i>=CutoffFreq:bp[i]=0
ibp=scipy.ifft(bp) # (I), (J), (K) and (L)
return ibp
def DCS(Y):
step = 80
newY = np.ones(len(Y)-step-1)
for i in range(0,len(Y)-step-1):
newY[i] = Y[i+step] - Y[i]
plt.plot(Y,label= 'RAW')
plt.plot(newY,label= 'DCS')
plt.legend(loc='best')
plt.show()
def reject_outliers(TimeDeltas,TimeRes, m):
"""
Conducts m-sigma rejection for a given dataset to ensure statistical outliers do not affect results
Returns inputs with all statistical outliers removed
"""
mean,stdev = weighted_avg_and_std(TimeDeltas, TimeRes)
maskMin = mean - stdev * m
maskMax = mean + stdev * m
Indexes = np.where(abs(TimeDeltas-mean)>m*stdev)[0]
TimeDeltas = np.delete(TimeDeltas,Indexes)
TimeRes = np.delete(TimeRes,Indexes)
return TimeDeltas,TimeRes
def ChargeCalculator(Y,startIndex,EndIndex):
C = 1
Gain = 31
e = 1.602E-19
return (np.trapz(Y[startIndex:EndIndex],dx = .2E-9)*C/e)
def get_hist(ax,nbins):
n,bins = [],[]
finaln,finalbins = [],[]
bin = 0
iteration = 0
for rect in ax.patches:
((x0, y0), (x1, y1)) = rect.get_bbox().get_points()
n.append(y1-y0)
bins.append(x0) # left edge of each bin
finaln = [n[i:i + nbins] for i in range(0, len(n), nbins)]
finalbins = [bins[i:i + nbins] for i in range(0, len(bins), nbins)]
#print(finaln)
i = 0
for (arrayn,arrabins) in zip(finaln,finalbins):
if i ==0:
n = arrayn
bins = arrabins
else:
n = [n[i]+arrayn[i] for i in range(0,len(arrayn))]
i+=1
bins.append(x1) # also get right edge of last bin
return np.asarray(n,dtype=np.float32),np.asarray(bins,dtype=np.float32)
def FindHistPeaks(Y):
peaks, properties = scipy.signal.find_peaks(Y, width=10,height =5,prominence= 2,distance = 15)
return peaks,properties
root = tk.Tk()
root.withdraw()
print('I owe a million dollars!')
FileNames = askopenfilenames(parent=root, initialfile='tmp',
filetypes=[("Binary Files", "*.dat")])
with DRS4BinaryFile(FileNames[0]) as events:
length = len(list(events))
itertor = 1
GainArray = []
GainErrorArray = []
for i in tqdm(range(len(FileNames)),'Files',dynamic_ncols=True,unit = 'Files'):
FileName = FileNames[i]
directory = os.path.dirname(FileName)
newDirectory = os.path.join(directory,FileName[:-4])
path,name = os.path.split(FileName)
if not os.path.exists(newDirectory):
os.mkdir(newDirectory)
with DRS4BinaryFile(FileName) as events:
length = len(list(events))
Data1 = pd.DataFrame()
Data2 = pd.DataFrame()
Data3 = pd.DataFrame()
Data4 = pd.DataFrame()
Divider = 1
SubDivider = 1000
PersistanceData = []
PersistanceTime = []
with DRS4BinaryFile(FileName) as f:
BoardID = f.board_ids[0]
NumberofChannels = f.channels[BoardID]
if len(NumberofChannels) > 1:
ReferenceChannel = NumberofChannels[0]
TimeWidths = f.time_widths[f.board_ids[0]][ReferenceChannel]
Time = np.arange(0,1024)*.2
eventNumber = 0
#printProgressBar(0, length, prefix = 'Progress:', suffix = 'Complete', length = 50)
for i in tqdm(range(length),'Events',dynamic_ncols=True,unit = 'Events'):
event = next(f)
RC = event.range_center
ADCData = event.adc_data
triggerCell = event.trigger_cells[BoardID]
for i in NumberofChannels:
if (eventNumber % Divider == 0):
Data = (ADCData[BoardID][i]/65535 + (RC/1000 - .5))
#DCS(Data)
Data = filterData(Data)
[hitStartIndexList, hitPeakAmplitude, hitPeakIndexArray,hitEndIndexList, baseline, rmsnoise] = hifinderScipy(Data) #hitfinder(Data)
#print(hitStartIndexList)
if hitStartIndexList:
for (startIndex,EndIndex,hitAmplitude,hitAmplitudeIndex) in zip(hitStartIndexList,hitEndIndexList,hitPeakAmplitude,hitPeakIndexArray):
#print(startIndex,EndIndex,hitAmplitude,hitAmplitudeIndex)
resultrt = RisetimeFinder(Time,Data,startIndex,hitAmplitudeIndex,baseline)
RiseTime,StartTime = resultrt['risetime'],resultrt['starttime']
if RiseTime == False:
continue
PulseHeight = hitAmplitude
Charge = ChargeCalculator(Data,startIndex,EndIndex)
PeakTime = Time[hitAmplitudeIndex]
ChargePedestle = ChargeCalculator(Data,0,50)
TempData = pd.DataFrame(data = {'0':[RiseTime],'1':[PulseHeight],'2':[Charge],'3':[PeakTime],'4':[rmsnoise],'5':[baseline],'6':[baseline+rmsnoise],'7':[ChargePedestle],'8':[StartTime]})
#print(TempData)
if eventNumber % SubDivider == 0:
plt.plot(Time,Data,'k')
plt.axvline(Time[startIndex],color = 'r',ymax = 1,linewidth=.2)
plt.axvline(Time[hitAmplitudeIndex],color = 'g',ymax = 1,linewidth=.2)
plt.axvline(Time[EndIndex],color = 'b',ymax = 1,linewidth=.2)
if i == 1:
Data1 = Data1.append(TempData,ignore_index=True)
if i == 2:
Data2 = Data2.append(TempData,ignore_index=True)
if i == 3:
Data3 = Data3.append(TempData,ignore_index=True)
if i == 4:
Data4 = Data4.append(TempData,ignore_index=True)
#sleep(0.001)
#printProgressBar(eventNumber + 1, length, prefix = 'Progress:', suffix = 'Complete', length = 50)
eventNumber = eventNumber + 1
columnNames = []
plt.savefig(os.path.join(newDirectory,'Persistance.png'))
for i in NumberofChannels:
if i == NumberofChannels[0]:
columnNames = ["Channel {} Rise Time".format(i),"Channel {} Pulse Height".format(i),"Channel {} Cummulative Charge".format(i),"Channel {} Pulse Time".format(i),"Channel {} RMS Noise".format(i),"Channel {} Baseline".format(i),"Channel {} Pedestle".format(i),"Channel {} Charge Pedestle".format(i),"Channel {} Peak Start Time".format(i)]
else:
columnNames.extend(["Channel {} Rise Time".format(i),"Channel {} Pulse Height".format(i),"Channel {} Cummulative Charge".format(i),"Channel {} Pulse Time".format(i),"Channel {} RMS Noise".format(i),"Channel {} Baseline".format(i),"Channel {} Pedestle".format(i),"Channel {} Charge Pedestle".format(i),"Channel {} Peak Start Time".format(i)])
if 1 == NumberofChannels[0]:
Data = Data1
if 2 == NumberofChannels[0]:
Data = Data2
if 3 == NumberofChannels[0]:
Data = Data3
if 4 == NumberofChannels[0]:
Data = Data4
if 1 in NumberofChannels and 1 != NumberofChannels[0]:
Data = pd.concat([Data,Data1],axis=1,ignore_index=True)
if 2 in NumberofChannels and 2 != NumberofChannels[0]:
Data = pd.concat([Data,Data2],axis=1,ignore_index=True)
if 3 in NumberofChannels and 3 != NumberofChannels[0]:
Data = pd.concat([Data,Data3],axis=1,ignore_index=True)
if 4 in NumberofChannels and 4 != NumberofChannels[0]:
Data = pd.concat([Data,Data4],axis=1,ignore_index=True)
# print(Data.head(30),[e for e in columnNames if 'Channel' in e])
#Data= Data[(np.abs(stats.zscore(Data)) < 3).all(axis=1)]
#Data= Data[(np.abs(stats.zscore(Data)) < 3).all(axis=1)]
Data.columns = [e for e in columnNames if 'Channel' in e]
PulseHeightColumns = []
PulseHeightColumns = [column for column in columnNames if "Pulse Height" in column]
PulseandNoiseColumns = [column for column in columnNames if "Pulse Height" in column]
ChargeColumns = [column for column in columnNames if "Cummulative Charge" in column]
StartTimes = [column for column in columnNames if "Start Time" in column]
histPulseHieghts = Data.plot.hist(y = PulseandNoiseColumns,bins =1000,alpha = .3,subplots=False,title = 'Pulse Height Distributions',log=False,sharex = True)
plt.xlabel('Pulse Height (mV)')
plt.savefig(os.path.join(newDirectory,'Pulse_Height_Distribution.png'))
histCharge = Data.plot.hist(y = ChargeColumns,bins =1000,alpha = .3,subplots=False,title = 'Pulse Area Distribution',log=False,sharex = True)
plt.xlabel(r'Area ($\frac{pC}{e}$)')
plt.legend(ChargeColumns)
#print(n)
# for column in ChargeColumns:
# print("{} Statistics: \nMean Charge: {}\nVariance of Charge: {}".format(column,Data[column].mean(),Data[column].std()**2))
text = []
mu = []
variance = []
lammaList = []
amp = []
n, bins = get_hist(histCharge,1000)
bincenters = np.asarray([(bins[i]+bins[i-1])/2 for i in range(1,len(bins))],np.float32)
print([N for N in n])
if any(isinstance(el, list) for el in n):
print(n)
peaks,properties = FindHistPeaks(n)
#plt.plot(bincenters[peaks],n[peaks],'g+')
widths = scipy.signal.peak_widths(n, peaks, rel_height=0.5)
j = 0
scale = .5
for (peak,width) in zip(peaks,widths[0]):
try:
true_width = abs(bincenters[int(peak - width/2)]-bincenters[int(peak + width/2)])
X = bincenters[int(peak - width):int(peak + width)]
Y = n[int(peak - width):int(peak + width)]
mean,std = weighted_avg_and_std(X,Y)
p0 = [n[peak], mean, std]
bounds = [(0,.5*mean,.5*std),(n[peak],1.5*mean,1.5*std)]
res = least_squares(gaussMinimizer, p0, loss='linear', f_scale=scale,args=(X,Y),bounds = bounds,xtol = 1E-20,ftol = 1E-15,x_scale = 'jac',tr_solver = 'lsmr',max_nfev=1E4)
mu.append(res.x[1])
variance.append(res.x[2]**2)
amp.append(res.x[0])
except:
pass
print(mu,len(mu))
j = j+1
# for i in range(0,len(mu)+1):
# if i==0 and i < len(mu):
# mod = Model(PEModel,prefix = 'f{}_'.format(i))
# pars = mod.make_params(verbose = True)
# pars['f{}_n'.format(i)].set(value = i+1,vary = False)
# pars['f{}_A'.format(i)].set(value = amp[i],min = 0)
# pars['f{}_mu'.format(i)].set(value = 1,min = 0,max = 100)
# pars['f{}_sigma'.format(i)].set(min = 0,value = np.sqrt(variance[i]))
# pars['f{}_gain'.format(i)].set(brute_step= 1E5,min = 1E5,max = 1E10,value = 1E8)
# elif i == 0 and not len(mu):
# mod = Model(PEModel,prefix = 'f{}_'.format(i))
# pars = mod.make_params(verbose = True)
# pars['f{}_n'.format(i)].set(value = i+1,vary = False)
# pars['f{}_A'.format(i)].set(value = amp[i],min = 0)
# pars['f{}_mu'.format(i)].set(value = 1,min = 0,max = 100)
# pars['f{}_sigma'.format(i)].set(min = 0)
# pars['f{}_gain'.format(i)].set(brute_step= 1E5,min = 1E5,max = 1E10,value = 1E8)
# else:
# tempmod = Model(PEModel,prefix = 'f{}_'.format(i))
# temppars = tempmod.make_params(verbose = True)
# temppars['f{}_n'.format(i)].set(value = i+1,vary = False)
# mod+=tempmod
# pars+=temppars
# pars['f{}_gain'.format(i)].set(expr ='f{}_gain'.format(i-1) )
# pars['f{}_mu'.format(i)].set(expr = 'f{}_mu'.format(i-1))
# pars['f{}_sigma'.format(i)].set(min = 0)
# pars['f{}_A'.format(i)].set(value = 50,min = 0)
#WORKING
for i in range(0,len(mu)+1):
if i == 0 and i < len(mu):
mod = GaussianModel(prefix = 'f{}_'.format(i))
pars = mod.guess(n,x=bincenters, sigma=np.sqrt(variance[i]),height = amp[i],center = mu[i])
if len(mu) >=2:
pars.add('G',value = mu[i+1]-mu[i],brute_step=.01*mu[i],min = .1*(mu[i+1]-mu[i]),max = 5*(mu[i+1]-mu[i]))
else:
pars.add('G',value = 1E8,min=1E5,max=1E10,brute_step = .1E5)
elif i >= len(mu) and i !=0:
tempmod = GaussianModel(prefix = 'f{}_'.format(i))
temppars = tempmod.guess(n,x=bincenters)
pars += temppars
pars['f{}_center'.format(i)].set(expr='G+f{}_center'.format(i-1))
mod += tempmod
elif i < len(mu):
tempmod = GaussianModel(prefix = 'f{}_'.format(i))
temppars = tempmod.guess(n,x=bincenters,center = mu[i], sigma=np.sqrt(variance[i]),height = amp[i])
pars += temppars
pars['f{}_center'.format(i)].set(expr='G+f{}_center'.format(i-1))
#pars['f{}_center'.format(i-1)].set(expr='G-f{}_center'.format(i))
mod += tempmod
elif not len(mu) and i==0:
mod = GaussianModel(prefix = 'f{}_'.format(i))
pars = mod.guess(n,x=bincenters)
pars.add('G',value = 1E8,min=1E5,max=1E10,brute_step = .1E5)
else:
pass
result = mod.fit(n, pars, x=bincenters)
print(result.fit_report())
#plt.plot(bincenters,n,'y')
#plt.plot(mu,amp,'k+')
vals = result.params.valuesdict()
mu = []
amp = []
for (key,value) in vals.items():
if 'center' in key:
mu.append(value)
if 'height' in key:
amp.append(value)
mu = [0] +mu
amp = [0]+amp
#print(mu,amp)
plt.plot(mu,amp,'g+')
vals = pars.valuesdict()
#print(result.params['G'].value)
GainError = result.params['G'].stderr
Gain = result.params['G'].value
#print(result.params["*_center"])
if GainError == None:
GainError = 1
print('Error approximation failed!')
GainArray.append(Gain)
GainErrorArray.append(GainError)
lamd = 0
if len(mu) >= 2:
newmu = np.arange(0,len(mu)) #print(result.params['G'].stderr)
newamp = np.array(amp)#/np.linalg.norm(amp) #[x/np.linalg.norm(amp) for x in amp]
p0= [1,newamp[1]*10]
bounds = [(0,newamp[1]),(newmu[-1],20*newamp[1])]
res = least_squares(poissonMinimizer, p0, loss='linear',args=(newmu, newamp),bounds=bounds,gtol = 1E-50,xtol = 1E-50,ftol = 1E-50,x_scale = 'jac',tr_solver = 'lsmr',max_nfev=1E4)
fit = poisson(res.x,newmu)
lamd = res.x[0]
#plt.stem(newmu*Gain, fit, 'r--',label =r"<$\mu$> = {}".format(np.round(res.x[0],3)))
plt.plot(bincenters,result.best_fit,'k',label = 'Gain = {:.2e} +/- {:.2e}, <$\mu$> = {}'.format(Gain,GainError,np.round(lamd,3)))
plt.legend(loc = 'best')
plt.savefig(os.path.join(newDirectory,'Pulse_Area_Distribution.png'))
Text = []
for i in NumberofChannels:
values = np.array(Data['Channel {} Rise Time'.format(i)].values)
values = values[np.isfinite(values)]
values = [x for x in values if abs(x - np.nanmean(values)) < 3*np.std(values)]
[ToFMean, TofStd] = weighted_avg_and_std(values,np.ones(len(Data.index)))
Text.append(r'$\tau_{}: \mu = {}ns; \sigma = {}ns$'.format(i,ToFMean,TofStd))
ristimeColumns = [column for column in columnNames if "Rise Time" in column]
histRiseTimes = Data.plot.hist(y =ristimeColumns,bins = 1000,alpha = .3,subplots=False,title = 'Rise Time Distributions')
plt.legend(Text)
plt.xlabel('Rise Times (ns)')
plt.savefig(os.path.join(newDirectory,'Rise_Time_Distribution.png'))
itertor = itertor +1
if len(FileNames) != 1:
plt.close('all')
#TOF Plotting
plt.figure()
if len(NumberofChannels) >1:
comb = combinations(NumberofChannels, 2)
for i in list(comb):
#print(i,Data['Channel {} Peak Start Time'.format(i[0])])
value1 = np.array(Data['Channel {} Peak Start Time'.format(i[0])].values)
#value1 = value1[np.isfinite(value1)]
value2 = np.array(Data['Channel {} Peak Start Time'.format(i[1])].values)
#value2 = value2[np.isfinite(value2)]
tofs = value1-value2
tofs = [x for x in tofs if abs(x - np.nanmean(tofs)) < 3*np.nanstd(tofs)]
tof = np.nanmean(tofs)
tofres = np.nanstd(tofs)
string = "TOF Between Inputs {} and {}: {}ns +/- {}ns".format(i[0],i[1],tof,tofres)
plt.hist(tofs,bins = 1000,alpha = .3,label = string)
plt.title('TOF Histogram')
plt.legend(loc='best')
plt.savefig(os.path.join(newDirectory,'TOF_Distributions.png'))
if len(FileNames) == 1:
plt.show()
#sys.exit()
else:
GainPlotting = str(input("Voltage Based Gain Plotting? (y/n): "))
plt.figure()
if GainPlotting == 'y':
LowVoltage = float(input("Lower Voltage?: "))
HighVoltage = float(input("Higher Voltage?: "))
Breakdown = float(input("Breakdown Voltage?: "))
X = np.linspace(LowVoltage,HighVoltage,len(GainArray)) - Breakdown
plt.xlabel('Breakdown Voltage (V)')
else:
X = [1,2,3,4,5.8,8]
print(X,GainArray,GainErrorArray)
#plt.errorbar(voltages, GainArray, yerr=GainErrorArray, fmt='o')
plt.errorbar(X, GainArray, yerr=GainErrorArray, fmt='o')
p = np.polyfit(X,GainArray,deg = 1)
p = np.poly1d(p)
plt.plot(X,p(X),label =r'Fit: m = {:.2E}, b = {:.2E}, $\mu$ = {:.2E}'.format(p[1],p[0],np.mean(GainArray)))
plt.legend(loc = 'best')
plt.ylabel('Gain')
plt.xlabel('Light Intensity (A.U)')
plt.ylim(1E6,2E8)
plt.savefig('Gain_Plot.png')
print("Analysis of Files Complete!")
plt.show()
#sys.exit()
| [
"matplotlib.pyplot.hist",
"scipy.misc.factorial",
"numpy.sqrt",
"numpy.polyfit",
"matplotlib.pyplot.ylabel",
"numpy.log",
"scipy.signal.savgol_filter",
"numpy.array",
"numpy.nanmean",
"numpy.isfinite",
"matplotlib.pyplot.errorbar",
"numpy.poly1d",
"drs4.DRS4BinaryFile",
"numpy.arange",
"... | [((1556, 1579), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (1570, 1579), False, 'import matplotlib\n'), ((2316, 2378), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (2337, 2378), False, 'import warnings\n'), ((15489, 15496), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (15494, 15496), True, 'import tkinter as tk\n'), ((15560, 15651), 'tkfilebrowser.askopenfilenames', 'askopenfilenames', ([], {'parent': 'root', 'initialfile': '"""tmp"""', 'filetypes': "[('Binary Files', '*.dat')]"}), "(parent=root, initialfile='tmp', filetypes=[('Binary Files',\n '*.dat')])\n", (15576, 15651), False, 'from tkfilebrowser import askopenfilenames\n'), ((29366, 29378), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (29376, 29378), True, 'import matplotlib.pyplot as plt\n'), ((2795, 2811), 'numpy.log', 'np.log', (['(lnl ** 2)'], {}), '(lnl ** 2)\n', (2801, 2811), True, 'import numpy as np\n'), ((3974, 3990), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (3982, 3990), True, 'import numpy as np\n'), ((4046, 4064), 'numpy.nanmean', 'np.nanmean', (['values'], {}), '(values)\n', (4056, 4064), True, 'import numpy as np\n'), ((4111, 4128), 'numpy.nanstd', 'np.nanstd', (['values'], {}), '(values)\n', (4120, 4128), True, 'import numpy as np\n'), ((4677, 4734), 'scipy.signal.savgol_filter', 'scisig.savgol_filter', ([], {'x': 'Y', 'window_length': '(51)', 'polyorder': '(11)'}), '(x=Y, window_length=51, polyorder=11)\n', (4697, 4734), True, 'import scipy.signal as scisig\n'), ((5024, 5039), 'numpy.mean', 'np.mean', (['p[:50]'], {}), '(p[:50])\n', (5031, 5039), True, 'import numpy as np\n'), ((5055, 5069), 'numpy.std', 'np.std', (['p[:50]'], {}), '(p[:50])\n', (5061, 5069), True, 'import numpy as np\n'), ((6964, 6979), 'numpy.mean', 'np.mean', (['Y[:50]'], {}), '(Y[:50])\n', (6971, 6979), True, 'import numpy as np\n'), ((6995, 7009), 'numpy.std', 'np.std', (['Y[:50]'], {}), '(Y[:50])\n', (7001, 7009), True, 'import numpy as np\n'), ((7177, 7187), 'numpy.diff', 'np.diff', (['p'], {}), '(p)\n', (7184, 7187), True, 'import numpy as np\n'), ((11930, 11952), 'numpy.sign', 'np.sign', (['Y[startIndex]'], {}), '(Y[startIndex])\n', (11937, 11952), True, 'import numpy as np\n'), ((13791, 13815), 'matplotlib.pyplot.plot', 'plt.plot', (['Y'], {'label': '"""RAW"""'}), "(Y, label='RAW')\n", (13799, 13815), True, 'import matplotlib.pyplot as plt\n'), ((13820, 13847), 'matplotlib.pyplot.plot', 'plt.plot', (['newY'], {'label': '"""DCS"""'}), "(newY, label='DCS')\n", (13828, 13847), True, 'import matplotlib.pyplot as plt\n'), ((13852, 13874), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (13862, 13874), True, 'import matplotlib.pyplot as plt\n'), ((13879, 13889), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13887, 13889), True, 'import matplotlib.pyplot as plt\n'), ((14309, 14339), 'numpy.delete', 'np.delete', (['TimeDeltas', 'Indexes'], {}), '(TimeDeltas, Indexes)\n', (14318, 14339), True, 'import numpy as np\n'), ((14353, 14380), 'numpy.delete', 'np.delete', (['TimeRes', 'Indexes'], {}), '(TimeRes, Indexes)\n', (14362, 14380), True, 'import numpy as np\n'), ((15680, 15708), 'drs4.DRS4BinaryFile', 'DRS4BinaryFile', (['FileNames[0]'], {}), '(FileNames[0])\n', (15694, 15708), False, 'from drs4 import DRS4BinaryFile\n'), ((15923, 15948), 'os.path.dirname', 'os.path.dirname', (['FileName'], {}), '(FileName)\n', (15938, 15948), False, 'import os\n'), ((15968, 16006), 'os.path.join', 'os.path.join', (['directory', 'FileName[:-4]'], {}), '(directory, FileName[:-4])\n', (15980, 16006), False, 'import os\n'), ((16022, 16045), 'os.path.split', 'os.path.split', (['FileName'], {}), '(FileName)\n', (16035, 16045), False, 'import os\n'), ((16210, 16224), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (16222, 16224), True, 'import pandas as pd\n'), ((16237, 16251), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (16249, 16251), True, 'import pandas as pd\n'), ((16264, 16278), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (16276, 16278), True, 'import pandas as pd\n'), ((16291, 16305), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (16303, 16305), True, 'import pandas as pd\n'), ((22062, 22093), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Pulse Height (mV)"""'], {}), "('Pulse Height (mV)')\n", (22072, 22093), True, 'import matplotlib.pyplot as plt\n'), ((22321, 22357), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Area ($\\\\frac{pC}{e}$)"""'], {}), "('Area ($\\\\frac{pC}{e}$)')\n", (22331, 22357), True, 'import matplotlib.pyplot as plt\n'), ((22362, 22387), 'matplotlib.pyplot.legend', 'plt.legend', (['ChargeColumns'], {}), '(ChargeColumns)\n', (22372, 22387), True, 'import matplotlib.pyplot as plt\n'), ((27299, 27322), 'matplotlib.pyplot.plot', 'plt.plot', (['mu', 'amp', '"""g+"""'], {}), "(mu, amp, 'g+')\n", (27307, 27322), True, 'import matplotlib.pyplot as plt\n'), ((28422, 28444), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (28432, 28444), True, 'import matplotlib.pyplot as plt\n'), ((29150, 29166), 'matplotlib.pyplot.legend', 'plt.legend', (['Text'], {}), '(Text)\n', (29160, 29166), True, 'import matplotlib.pyplot as plt\n'), ((29171, 29200), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Rise Times (ns)"""'], {}), "('Rise Times (ns)')\n", (29181, 29200), True, 'import matplotlib.pyplot as plt\n'), ((29423, 29456), 'itertools.combinations', 'combinations', (['NumberofChannels', '(2)'], {}), '(NumberofChannels, 2)\n', (29435, 29456), False, 'from itertools import combinations\n'), ((30188, 30214), 'matplotlib.pyplot.title', 'plt.title', (['"""TOF Histogram"""'], {}), "('TOF Histogram')\n", (30197, 30214), True, 'import matplotlib.pyplot as plt\n'), ((30223, 30245), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (30233, 30245), True, 'import matplotlib.pyplot as plt\n'), ((30346, 30356), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30354, 30356), True, 'import matplotlib.pyplot as plt\n'), ((30453, 30465), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (30463, 30465), True, 'import matplotlib.pyplot as plt\n'), ((31396, 31406), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31404, 31406), True, 'import matplotlib.pyplot as plt\n'), ((1076, 1108), 'importlib.import_module', 'importlib.import_module', (['package'], {}), '(package)\n', (1099, 1108), False, 'import importlib\n'), ((1238, 1270), 'importlib.import_module', 'importlib.import_module', (['package'], {}), '(package)\n', (1261, 1270), False, 'import importlib\n'), ((2461, 2474), 'numpy.exp', 'np.exp', (['(-lamb)'], {}), '(-lamb)\n', (2467, 2474), True, 'import numpy as np\n'), ((2553, 2565), 'scipy.misc.factorial', 'factorial', (['n'], {}), '(n)\n', (2562, 2565), False, 'from scipy.misc import factorial\n'), ((4011, 4030), 'numpy.isfinite', 'np.isfinite', (['values'], {}), '(values)\n', (4022, 4030), True, 'import numpy as np\n'), ((4141, 4161), 'numpy.round', 'np.round', (['average', '(3)'], {}), '(average, 3)\n', (4149, 4161), True, 'import numpy as np\n'), ((4162, 4178), 'numpy.round', 'np.round', (['std', '(3)'], {}), '(std, 3)\n', (4170, 4178), True, 'import numpy as np\n'), ((4787, 4830), 'numpy.exp', 'np.exp', (['(-(x - mu) ** 2 / (2.0 * sigma ** 2))'], {}), '(-(x - mu) ** 2 / (2.0 * sigma ** 2))\n', (4793, 4830), True, 'import numpy as np\n'), ((7304, 7390), 'numpy.array', 'np.array', (['[(True if pi < baseline - NoiseSigma * noiserms else False) for pi in Y]'], {}), '([(True if pi < baseline - NoiseSigma * noiserms else False) for pi in\n Y])\n', (7312, 7390), True, 'import numpy as np\n'), ((7416, 7502), 'numpy.array', 'np.array', (['[(True if pi > baseline + NoiseSigma * noiserms else False) for pi in Y]'], {}), '([(True if pi > baseline + NoiseSigma * noiserms else False) for pi in\n Y])\n', (7424, 7502), True, 'import numpy as np\n'), ((7521, 7538), 'numpy.size', 'np.size', (['hitLogic'], {}), '(hitLogic)\n', (7528, 7538), True, 'import numpy as np\n'), ((8010, 8027), 'numpy.size', 'np.size', (['hitLogic'], {}), '(hitLogic)\n', (8017, 8027), True, 'import numpy as np\n'), ((8778, 8795), 'numpy.size', 'np.size', (['hitLogic'], {}), '(hitLogic)\n', (8785, 8795), True, 'import numpy as np\n'), ((11874, 11888), 'numpy.std', 'np.std', (['Y[:50]'], {}), '(Y[:50])\n', (11880, 11888), True, 'import numpy as np\n'), ((15265, 15296), 'numpy.asarray', 'np.asarray', (['n'], {'dtype': 'np.float32'}), '(n, dtype=np.float32)\n', (15275, 15296), True, 'import numpy as np\n'), ((15296, 15330), 'numpy.asarray', 'np.asarray', (['bins'], {'dtype': 'np.float32'}), '(bins, dtype=np.float32)\n', (15306, 15330), True, 'import numpy as np\n'), ((16057, 16085), 'os.path.exists', 'os.path.exists', (['newDirectory'], {}), '(newDirectory)\n', (16071, 16085), False, 'import os\n'), ((16095, 16117), 'os.mkdir', 'os.mkdir', (['newDirectory'], {}), '(newDirectory)\n', (16103, 16117), False, 'import os\n'), ((16127, 16151), 'drs4.DRS4BinaryFile', 'DRS4BinaryFile', (['FileName'], {}), '(FileName)\n', (16141, 16151), False, 'from drs4 import DRS4BinaryFile\n'), ((16404, 16428), 'drs4.DRS4BinaryFile', 'DRS4BinaryFile', (['FileName'], {}), '(FileName)\n', (16418, 16428), False, 'from drs4 import DRS4BinaryFile\n'), ((19725, 19770), 'os.path.join', 'os.path.join', (['newDirectory', '"""Persistance.png"""'], {}), "(newDirectory, 'Persistance.png')\n", (19737, 19770), False, 'import os\n'), ((20848, 20899), 'pandas.concat', 'pd.concat', (['[Data, Data1]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([Data, Data1], axis=1, ignore_index=True)\n', (20857, 20899), True, 'import pandas as pd\n'), ((20971, 21022), 'pandas.concat', 'pd.concat', (['[Data, Data2]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([Data, Data2], axis=1, ignore_index=True)\n', (20980, 21022), True, 'import pandas as pd\n'), ((21094, 21145), 'pandas.concat', 'pd.concat', (['[Data, Data3]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([Data, Data3], axis=1, ignore_index=True)\n', (21103, 21145), True, 'import pandas as pd\n'), ((21217, 21268), 'pandas.concat', 'pd.concat', (['[Data, Data4]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([Data, Data4], axis=1, ignore_index=True)\n', (21226, 21268), True, 'import pandas as pd\n'), ((22110, 22169), 'os.path.join', 'os.path.join', (['newDirectory', '"""Pulse_Height_Distribution.png"""'], {}), "(newDirectory, 'Pulse_Height_Distribution.png')\n", (22122, 22169), False, 'import os\n'), ((27783, 27796), 'numpy.array', 'np.array', (['amp'], {}), '(amp)\n', (27791, 27796), True, 'import numpy as np\n'), ((27957, 28139), 'scipy.optimize.least_squares', 'least_squares', (['poissonMinimizer', 'p0'], {'loss': '"""linear"""', 'args': '(newmu, newamp)', 'bounds': 'bounds', 'gtol': '(1e-50)', 'xtol': '(1e-50)', 'ftol': '(1e-50)', 'x_scale': '"""jac"""', 'tr_solver': '"""lsmr"""', 'max_nfev': '(10000.0)'}), "(poissonMinimizer, p0, loss='linear', args=(newmu, newamp),\n bounds=bounds, gtol=1e-50, xtol=1e-50, ftol=1e-50, x_scale='jac',\n tr_solver='lsmr', max_nfev=10000.0)\n", (27970, 28139), False, 'from scipy.optimize import curve_fit, least_squares\n'), ((28463, 28520), 'os.path.join', 'os.path.join', (['newDirectory', '"""Pulse_Area_Distribution.png"""'], {}), "(newDirectory, 'Pulse_Area_Distribution.png')\n", (28475, 28520), False, 'import os\n'), ((29217, 29273), 'os.path.join', 'os.path.join', (['newDirectory', '"""Rise_Time_Distribution.png"""'], {}), "(newDirectory, 'Rise_Time_Distribution.png')\n", (29229, 29273), False, 'import os\n'), ((29335, 29351), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (29344, 29351), True, 'import matplotlib.pyplot as plt\n'), ((29965, 29981), 'numpy.nanmean', 'np.nanmean', (['tofs'], {}), '(tofs)\n', (29975, 29981), True, 'import numpy as np\n'), ((30003, 30018), 'numpy.nanstd', 'np.nanstd', (['tofs'], {}), '(tofs)\n', (30012, 30018), True, 'import numpy as np\n'), ((30127, 30177), 'matplotlib.pyplot.hist', 'plt.hist', (['tofs'], {'bins': '(1000)', 'alpha': '(0.3)', 'label': 'string'}), '(tofs, bins=1000, alpha=0.3, label=string)\n', (30135, 30177), True, 'import matplotlib.pyplot as plt\n'), ((30266, 30317), 'os.path.join', 'os.path.join', (['newDirectory', '"""TOF_Distributions.png"""'], {}), "(newDirectory, 'TOF_Distributions.png')\n", (30278, 30317), False, 'import os\n'), ((30744, 30779), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Breakdown Voltage (V)"""'], {}), "('Breakdown Voltage (V)')\n", (30754, 30779), True, 'import matplotlib.pyplot as plt\n'), ((30941, 30997), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['X', 'GainArray'], {'yerr': 'GainErrorArray', 'fmt': '"""o"""'}), "(X, GainArray, yerr=GainErrorArray, fmt='o')\n", (30953, 30997), True, 'import matplotlib.pyplot as plt\n'), ((31010, 31041), 'numpy.polyfit', 'np.polyfit', (['X', 'GainArray'], {'deg': '(1)'}), '(X, GainArray, deg=1)\n', (31020, 31041), True, 'import numpy as np\n'), ((31054, 31066), 'numpy.poly1d', 'np.poly1d', (['p'], {}), '(p)\n', (31063, 31066), True, 'import numpy as np\n'), ((31192, 31214), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (31202, 31214), True, 'import matplotlib.pyplot as plt\n'), ((31225, 31243), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Gain"""'], {}), "('Gain')\n", (31235, 31243), True, 'import matplotlib.pyplot as plt\n'), ((31252, 31287), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Light Intensity (A.U)"""'], {}), "('Light Intensity (A.U)')\n", (31262, 31287), True, 'import matplotlib.pyplot as plt\n'), ((31296, 31328), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(1000000.0)', '(200000000.0)'], {}), '(1000000.0, 200000000.0)\n', (31304, 31328), True, 'import matplotlib.pyplot as plt\n'), ((31322, 31350), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Gain_Plot.png"""'], {}), "('Gain_Plot.png')\n", (31333, 31350), True, 'import matplotlib.pyplot as plt\n'), ((687, 773), 'subprocess.call', 'subprocess.call', (["[sys.executable, '-m', 'ensurepip', '--default-pip']"], {'shell': '(True)'}), "([sys.executable, '-m', 'ensurepip', '--default-pip'], shell\n =True)\n", (702, 773), False, 'import subprocess\n'), ((774, 850), 'subprocess.call', 'subprocess.call', (["[sys.executable, 'easy_install', 'python3-pip']"], {'shell': '(True)'}), "([sys.executable, 'easy_install', 'python3-pip'], shell=True)\n", (789, 850), False, 'import subprocess\n'), ((1129, 1195), 'subprocess.call', 'subprocess.call', (["[sys.executable, '-m', 'pip', 'install', package]"], {}), "([sys.executable, '-m', 'pip', 'install', package])\n", (1144, 1195), False, 'import subprocess\n'), ((2446, 2458), 'scipy.misc.factorial', 'factorial', (['k'], {}), '(k)\n', (2455, 2458), False, 'from scipy.misc import factorial\n'), ((2535, 2546), 'numpy.exp', 'np.exp', (['(-mu)'], {}), '(-mu)\n', (2541, 2546), True, 'import numpy as np\n'), ((2768, 2781), 'numpy.exp', 'np.exp', (['(-lamb)'], {}), '(-lamb)\n', (2774, 2781), True, 'import numpy as np\n'), ((6398, 6441), 'numpy.append', 'np.append', (['hitStartIndexList', 'hitStartIndex'], {}), '(hitStartIndexList, hitStartIndex)\n', (6407, 6441), True, 'import numpy as np\n'), ((6472, 6511), 'numpy.append', 'np.append', (['hitEndIndexList', 'hitEndIndex'], {}), '(hitEndIndexList, hitEndIndex)\n', (6481, 6511), True, 'import numpy as np\n'), ((6542, 6583), 'numpy.append', 'np.append', (['hitPeakAmplitude', 'hitAmplitude'], {}), '(hitPeakAmplitude, hitAmplitude)\n', (6551, 6583), True, 'import numpy as np\n'), ((6616, 6650), 'numpy.append', 'np.append', (['hitPeakIndexArray', 'peak'], {}), '(hitPeakIndexArray, peak)\n', (6625, 6650), True, 'import numpy as np\n'), ((10799, 10831), 'numpy.nonzero', 'np.nonzero', (['(hitPeakAmplitude > 0)'], {}), '(hitPeakAmplitude > 0)\n', (10809, 10831), True, 'import numpy as np\n'), ((10868, 10900), 'numpy.nonzero', 'np.nonzero', (['(hitPeakAmplitude < 0)'], {}), '(hitPeakAmplitude < 0)\n', (10878, 10900), True, 'import numpy as np\n'), ((14510, 14552), 'numpy.trapz', 'np.trapz', (['Y[startIndex:EndIndex]'], {'dx': '(2e-10)'}), '(Y[startIndex:EndIndex], dx=2e-10)\n', (14518, 14552), True, 'import numpy as np\n'), ((16702, 16720), 'numpy.arange', 'np.arange', (['(0)', '(1024)'], {}), '(0, 1024)\n', (16711, 16720), True, 'import numpy as np\n'), ((23495, 23669), 'scipy.optimize.least_squares', 'least_squares', (['gaussMinimizer', 'p0'], {'loss': '"""linear"""', 'f_scale': 'scale', 'args': '(X, Y)', 'bounds': 'bounds', 'xtol': '(1e-20)', 'ftol': '(1e-15)', 'x_scale': '"""jac"""', 'tr_solver': '"""lsmr"""', 'max_nfev': '(10000.0)'}), "(gaussMinimizer, p0, loss='linear', f_scale=scale, args=(X, Y),\n bounds=bounds, xtol=1e-20, ftol=1e-15, x_scale='jac', tr_solver='lsmr',\n max_nfev=10000.0)\n", (23508, 23669), False, 'from scipy.optimize import curve_fit, least_squares\n'), ((28663, 28682), 'numpy.isfinite', 'np.isfinite', (['values'], {}), '(values)\n', (28674, 28682), True, 'import numpy as np\n'), ((2639, 2685), 'numpy.exp', 'np.exp', (['((x - n * gain) ** 2 / (2 * sigma ** 2))'], {}), '((x - n * gain) ** 2 / (2 * sigma ** 2))\n', (2645, 2685), True, 'import numpy as np\n'), ((2753, 2765), 'scipy.misc.factorial', 'factorial', (['k'], {}), '(k)\n', (2762, 2765), False, 'from scipy.misc import factorial\n'), ((10231, 10274), 'numpy.append', 'np.append', (['hitStartIndexList', 'hitStartIndex'], {}), '(hitStartIndexList, hitStartIndex)\n', (10240, 10274), True, 'import numpy as np\n'), ((10309, 10348), 'numpy.append', 'np.append', (['hitEndIndexList', 'hitEndIndex'], {}), '(hitEndIndexList, hitEndIndex)\n', (10318, 10348), True, 'import numpy as np\n'), ((10383, 10424), 'numpy.append', 'np.append', (['hitPeakAmplitude', 'hitAmplitude'], {}), '(hitPeakAmplitude, hitAmplitude)\n', (10392, 10424), True, 'import numpy as np\n'), ((10461, 10503), 'numpy.append', 'np.append', (['hitPeakIndexArray', 'hitPeakIndex'], {}), '(hitPeakIndexArray, hitPeakIndex)\n', (10470, 10503), True, 'import numpy as np\n'), ((28398, 28415), 'numpy.round', 'np.round', (['lamd', '(3)'], {}), '(lamd, 3)\n', (28406, 28415), True, 'import numpy as np\n'), ((2616, 2642), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * sigma)'], {}), '(2 * np.pi * sigma)\n', (2623, 2642), True, 'import numpy as np\n'), ((7669, 7686), 'numpy.size', 'np.size', (['hitLogic'], {}), '(hitLogic)\n', (7676, 7686), True, 'import numpy as np\n'), ((8164, 8181), 'numpy.size', 'np.size', (['hitLogic'], {}), '(hitLogic)\n', (8171, 8181), True, 'import numpy as np\n'), ((8954, 8971), 'numpy.size', 'np.size', (['hitLogic'], {}), '(hitLogic)\n', (8961, 8971), True, 'import numpy as np\n'), ((9543, 9560), 'numpy.size', 'np.size', (['hitLogic'], {}), '(hitLogic)\n', (9550, 9560), True, 'import numpy as np\n'), ((13194, 13226), 'numpy.diff', 'np.diff', (['Y[fallIndex:startIndex]'], {}), '(Y[fallIndex:startIndex])\n', (13201, 13226), True, 'import numpy as np\n'), ((25647, 25667), 'numpy.sqrt', 'np.sqrt', (['variance[i]'], {}), '(variance[i])\n', (25654, 25667), True, 'import numpy as np\n'), ((31162, 31180), 'numpy.mean', 'np.mean', (['GainArray'], {}), '(GainArray)\n', (31169, 31180), True, 'import numpy as np\n'), ((4902, 4945), 'numpy.exp', 'np.exp', (['(-(x - mu) ** 2 / (2.0 * sigma ** 2))'], {}), '(-(x - mu) ** 2 / (2.0 * sigma ** 2))\n', (4908, 4945), True, 'import numpy as np\n'), ((6344, 6362), 'numpy.arange', 'np.arange', (['(0)', '(1024)'], {}), '(0, 1024)\n', (6353, 6362), True, 'import numpy as np\n'), ((28755, 28769), 'numpy.std', 'np.std', (['values'], {}), '(values)\n', (28761, 28769), True, 'import numpy as np\n'), ((29930, 29945), 'numpy.nanstd', 'np.nanstd', (['tofs'], {}), '(tofs)\n', (29939, 29945), True, 'import numpy as np\n'), ((10173, 10191), 'numpy.arange', 'np.arange', (['(0)', '(1024)'], {}), '(0, 1024)\n', (10182, 10191), True, 'import numpy as np\n'), ((18398, 18598), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'0': [RiseTime], '1': [PulseHeight], '2': [Charge], '3': [PeakTime], '4':\n [rmsnoise], '5': [baseline], '6': [baseline + rmsnoise], '7': [\n ChargePedestle], '8': [StartTime]}"}), "(data={'0': [RiseTime], '1': [PulseHeight], '2': [Charge], '3':\n [PeakTime], '4': [rmsnoise], '5': [baseline], '6': [baseline + rmsnoise\n ], '7': [ChargePedestle], '8': [StartTime]})\n", (18410, 18598), True, 'import pandas as pd\n'), ((26397, 26417), 'numpy.sqrt', 'np.sqrt', (['variance[i]'], {}), '(variance[i])\n', (26404, 26417), True, 'import numpy as np\n'), ((28731, 28749), 'numpy.nanmean', 'np.nanmean', (['values'], {}), '(values)\n', (28741, 28749), True, 'import numpy as np\n'), ((29908, 29924), 'numpy.nanmean', 'np.nanmean', (['tofs'], {}), '(tofs)\n', (29918, 29924), True, 'import numpy as np\n'), ((18712, 18737), 'matplotlib.pyplot.plot', 'plt.plot', (['Time', 'Data', '"""k"""'], {}), "(Time, Data, 'k')\n", (18720, 18737), True, 'import matplotlib.pyplot as plt\n'), ((18768, 18831), 'matplotlib.pyplot.axvline', 'plt.axvline', (['Time[startIndex]'], {'color': '"""r"""', 'ymax': '(1)', 'linewidth': '(0.2)'}), "(Time[startIndex], color='r', ymax=1, linewidth=0.2)\n", (18779, 18831), True, 'import matplotlib.pyplot as plt\n'), ((18864, 18934), 'matplotlib.pyplot.axvline', 'plt.axvline', (['Time[hitAmplitudeIndex]'], {'color': '"""g"""', 'ymax': '(1)', 'linewidth': '(0.2)'}), "(Time[hitAmplitudeIndex], color='g', ymax=1, linewidth=0.2)\n", (18875, 18934), True, 'import matplotlib.pyplot as plt\n'), ((18967, 19028), 'matplotlib.pyplot.axvline', 'plt.axvline', (['Time[EndIndex]'], {'color': '"""b"""', 'ymax': '(1)', 'linewidth': '(0.2)'}), "(Time[EndIndex], color='b', ymax=1, linewidth=0.2)\n", (18978, 19028), True, 'import matplotlib.pyplot as plt\n')] |
from dataclasses import dataclass
from io import BytesIO
from pathlib import Path
import matplotlib.pyplot as plt
import numpy
import numpy.typing as npt
import pandas as pd
import pytest
from PIL import Image
from scipy.cluster import hierarchy
from optmath.HCA import (
HCA,
Chebyshev,
Cluster,
CompleteLinkage,
Euclidean,
HCAStep,
Manhattan,
RecordBase,
SingleLinkage,
Ward,
)
from optmath.HCA.record import autoscale
@dataclass(frozen=True)
class Seed(RecordBase):
size: float
quality: float
raw = [
[2, 3],
[3, 8],
[4, 7],
[1, 1],
[0, 0],
]
clusters = Cluster.new(Seed.new(raw))
def compare_ndarrays(first: npt.NDArray, second: npt.NDArray) -> float:
first = first.reshape(-1)
second = second.reshape(-1)
return numpy.count_nonzero(first == second) / len(first)
def dump_dendrogram(z_matrix: npt.NDArray) -> npt.NDArray:
io = BytesIO()
hierarchy.dendrogram(z_matrix, leaf_rotation=90.0, leaf_font_size=8.0)
plt.savefig(io, format="png")
io.seek(0)
image = Image.open(io)
raw = numpy.asarray(image)
return raw
def test_HCA():
algorithm = HCA(clusters, CompleteLinkage(Euclidean()))
algorithm.reduce()
def test_HCA_iteration():
algorithm = HCA(clusters, CompleteLinkage(Euclidean()))
for step in algorithm:
assert isinstance(step, HCAStep)
def test_HCA_result():
algorithm = HCA(clusters, CompleteLinkage(Euclidean()))
assert str(algorithm.result()) == "Cluster(ID=8,s=5,h=8.544)"
def test_HCA_dendrogram_complete_euclidean():
algorithm = HCA(clusters, CompleteLinkage(Euclidean()))
cluster = algorithm.result()
z_custom = cluster.Z()
z_scipy = hierarchy.linkage(raw, method="complete", metric="euclidean")
custom_version = dump_dendrogram(z_custom)
scipy_version = dump_dendrogram(z_scipy)
result = compare_ndarrays(custom_version, scipy_version)
assert result > 0.99
def test_HCA_dendrogram_single_euclidean():
algorithm = HCA(clusters, SingleLinkage(Euclidean()))
cluster = algorithm.result()
z_custom = cluster.Z()
z_scipy = hierarchy.linkage(raw, method="single", metric="euclidean")
custom_version = dump_dendrogram(z_custom)
scipy_version = dump_dendrogram(z_scipy)
result = compare_ndarrays(custom_version, scipy_version)
assert result > 0.99
def test_HCA_dendrogram_single_manhattan():
algorithm = HCA(clusters, SingleLinkage(Manhattan()))
cluster = algorithm.result()
z_custom = cluster.Z()
z_scipy = hierarchy.linkage(raw, method="single", metric="cityblock")
custom_version = dump_dendrogram(z_custom)
scipy_version = dump_dendrogram(z_scipy)
result = compare_ndarrays(custom_version, scipy_version)
assert result > 0.99
def test_HCA_dendrogram_complete_chebyshev():
algorithm = HCA(clusters, SingleLinkage(Chebyshev()))
cluster = algorithm.result()
z_custom = cluster.Z()
z_scipy = hierarchy.linkage(raw, method="single", metric="chebyshev")
custom_version = dump_dendrogram(z_custom)
scipy_version = dump_dendrogram(z_scipy)
result = compare_ndarrays(custom_version, scipy_version)
assert result > 0.99
@pytest.mark.skip(
reason=(
"Known issue with Ward distance selector - "
"mismatch between scipy and this implementation"
)
)
def test_HCA_dendrogram_ward_euclidean():
algorithm = HCA(clusters, Ward(Euclidean()))
cluster = algorithm.result()
z_custom = cluster.Z()
z_scipy = hierarchy.linkage(raw, method="ward", metric="euclidean")
custom_version = dump_dendrogram(z_custom)
scipy_version = dump_dendrogram(z_scipy)
result = compare_ndarrays(custom_version, scipy_version)
assert result > 0.99
@dataclass(frozen=True)
class PumpkinSeed(RecordBase):
Area: float
Perimeter: float
Major_Axis_Length: float
Minor_Axis_Length: float
Solidity: float
Roundness: float
TEST_HCA_DIR = Path(__file__).parent
def test_HCA_complex_pumpkin_data():
raw = pd.read_csv(TEST_HCA_DIR / "data" / "test_seeds.csv").to_numpy()
raw = autoscale(raw)
clusters = Cluster.new(PumpkinSeed.new(raw))
algorithm = HCA(clusters, SingleLinkage(Euclidean()))
cluster = algorithm.result()
z = hierarchy.linkage(raw, method="single", metric="euclidean")
hierarchy.dendrogram(z, leaf_rotation=90.0, leaf_font_size=8.0)
z = cluster.Z()
hierarchy.dendrogram(z, leaf_rotation=90.0, leaf_font_size=8.0)
plt.show()
def test_HCA_complex_pumpkin_data_complete_euclidean():
raw = pd.read_csv(TEST_HCA_DIR / "data" / "test_seeds.csv").to_numpy()
raw = autoscale(raw)
clusters = Cluster.new(PumpkinSeed.new(autoscale(raw)))
algorithm = HCA(clusters, CompleteLinkage(Euclidean()))
cluster = algorithm.result()
z_custom = cluster.Z()
z_scipy = hierarchy.linkage(raw, method="complete", metric="euclidean")
custom_version = dump_dendrogram(z_custom)
scipy_version = dump_dendrogram(z_scipy)
result = compare_ndarrays(custom_version, scipy_version)
assert result > 0.98
| [
"optmath.HCA.Chebyshev",
"optmath.HCA.Manhattan",
"PIL.Image.open",
"scipy.cluster.hierarchy.dendrogram",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"pathlib.Path",
"pytest.mark.skip",
"optmath.HCA.Euclidean",
"dataclasses.dataclass",
"io.BytesIO",
"numpy.asarray",
"optmath.HCA.record.a... | [((493, 515), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (502, 515), False, 'from dataclasses import dataclass\n'), ((3358, 3483), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Known issue with Ward distance selector - mismatch between scipy and this implementation"""'}), "(reason=\n 'Known issue with Ward distance selector - mismatch between scipy and this implementation'\n )\n", (3374, 3483), False, 'import pytest\n'), ((3934, 3956), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (3943, 3956), False, 'from dataclasses import dataclass\n'), ((977, 986), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (984, 986), False, 'from io import BytesIO\n'), ((992, 1062), 'scipy.cluster.hierarchy.dendrogram', 'hierarchy.dendrogram', (['z_matrix'], {'leaf_rotation': '(90.0)', 'leaf_font_size': '(8.0)'}), '(z_matrix, leaf_rotation=90.0, leaf_font_size=8.0)\n', (1012, 1062), False, 'from scipy.cluster import hierarchy\n'), ((1068, 1097), 'matplotlib.pyplot.savefig', 'plt.savefig', (['io'], {'format': '"""png"""'}), "(io, format='png')\n", (1079, 1097), True, 'import matplotlib.pyplot as plt\n'), ((1127, 1141), 'PIL.Image.open', 'Image.open', (['io'], {}), '(io)\n', (1137, 1141), False, 'from PIL import Image\n'), ((1153, 1173), 'numpy.asarray', 'numpy.asarray', (['image'], {}), '(image)\n', (1166, 1173), False, 'import numpy\n'), ((1805, 1866), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['raw'], {'method': '"""complete"""', 'metric': '"""euclidean"""'}), "(raw, method='complete', metric='euclidean')\n", (1822, 1866), False, 'from scipy.cluster import hierarchy\n'), ((2238, 2297), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['raw'], {'method': '"""single"""', 'metric': '"""euclidean"""'}), "(raw, method='single', metric='euclidean')\n", (2255, 2297), False, 'from scipy.cluster import hierarchy\n'), ((2671, 2730), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['raw'], {'method': '"""single"""', 'metric': '"""cityblock"""'}), "(raw, method='single', metric='cityblock')\n", (2688, 2730), False, 'from scipy.cluster import hierarchy\n'), ((3106, 3165), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['raw'], {'method': '"""single"""', 'metric': '"""chebyshev"""'}), "(raw, method='single', metric='chebyshev')\n", (3123, 3165), False, 'from scipy.cluster import hierarchy\n'), ((3684, 3741), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['raw'], {'method': '"""ward"""', 'metric': '"""euclidean"""'}), "(raw, method='ward', metric='euclidean')\n", (3701, 3741), False, 'from scipy.cluster import hierarchy\n'), ((4151, 4165), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (4155, 4165), False, 'from pathlib import Path\n'), ((4302, 4316), 'optmath.HCA.record.autoscale', 'autoscale', (['raw'], {}), '(raw)\n', (4311, 4316), False, 'from optmath.HCA.record import autoscale\n'), ((4471, 4530), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['raw'], {'method': '"""single"""', 'metric': '"""euclidean"""'}), "(raw, method='single', metric='euclidean')\n", (4488, 4530), False, 'from scipy.cluster import hierarchy\n'), ((4536, 4599), 'scipy.cluster.hierarchy.dendrogram', 'hierarchy.dendrogram', (['z'], {'leaf_rotation': '(90.0)', 'leaf_font_size': '(8.0)'}), '(z, leaf_rotation=90.0, leaf_font_size=8.0)\n', (4556, 4599), False, 'from scipy.cluster import hierarchy\n'), ((4626, 4689), 'scipy.cluster.hierarchy.dendrogram', 'hierarchy.dendrogram', (['z'], {'leaf_rotation': '(90.0)', 'leaf_font_size': '(8.0)'}), '(z, leaf_rotation=90.0, leaf_font_size=8.0)\n', (4646, 4689), False, 'from scipy.cluster import hierarchy\n'), ((4695, 4705), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4703, 4705), True, 'import matplotlib.pyplot as plt\n'), ((4854, 4868), 'optmath.HCA.record.autoscale', 'autoscale', (['raw'], {}), '(raw)\n', (4863, 4868), False, 'from optmath.HCA.record import autoscale\n'), ((5070, 5131), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['raw'], {'method': '"""complete"""', 'metric': '"""euclidean"""'}), "(raw, method='complete', metric='euclidean')\n", (5087, 5131), False, 'from scipy.cluster import hierarchy\n'), ((853, 889), 'numpy.count_nonzero', 'numpy.count_nonzero', (['(first == second)'], {}), '(first == second)\n', (872, 889), False, 'import numpy\n'), ((1258, 1269), 'optmath.HCA.Euclidean', 'Euclidean', ([], {}), '()\n', (1267, 1269), False, 'from optmath.HCA import HCA, Chebyshev, Cluster, CompleteLinkage, Euclidean, HCAStep, Manhattan, RecordBase, SingleLinkage, Ward\n'), ((1374, 1385), 'optmath.HCA.Euclidean', 'Euclidean', ([], {}), '()\n', (1383, 1385), False, 'from optmath.HCA import HCA, Chebyshev, Cluster, CompleteLinkage, Euclidean, HCAStep, Manhattan, RecordBase, SingleLinkage, Ward\n'), ((1533, 1544), 'optmath.HCA.Euclidean', 'Euclidean', ([], {}), '()\n', (1542, 1544), False, 'from optmath.HCA import HCA, Chebyshev, Cluster, CompleteLinkage, Euclidean, HCAStep, Manhattan, RecordBase, SingleLinkage, Ward\n'), ((1712, 1723), 'optmath.HCA.Euclidean', 'Euclidean', ([], {}), '()\n', (1721, 1723), False, 'from optmath.HCA import HCA, Chebyshev, Cluster, CompleteLinkage, Euclidean, HCAStep, Manhattan, RecordBase, SingleLinkage, Ward\n'), ((2147, 2158), 'optmath.HCA.Euclidean', 'Euclidean', ([], {}), '()\n', (2156, 2158), False, 'from optmath.HCA import HCA, Chebyshev, Cluster, CompleteLinkage, Euclidean, HCAStep, Manhattan, RecordBase, SingleLinkage, Ward\n'), ((2578, 2589), 'optmath.HCA.Manhattan', 'Manhattan', ([], {}), '()\n', (2587, 2589), False, 'from optmath.HCA import HCA, Chebyshev, Cluster, CompleteLinkage, Euclidean, HCAStep, Manhattan, RecordBase, SingleLinkage, Ward\n'), ((3013, 3024), 'optmath.HCA.Chebyshev', 'Chebyshev', ([], {}), '()\n', (3022, 3024), False, 'from optmath.HCA import HCA, Chebyshev, Cluster, CompleteLinkage, Euclidean, HCAStep, Manhattan, RecordBase, SingleLinkage, Ward\n'), ((3591, 3602), 'optmath.HCA.Euclidean', 'Euclidean', ([], {}), '()\n', (3600, 3602), False, 'from optmath.HCA import HCA, Chebyshev, Cluster, CompleteLinkage, Euclidean, HCAStep, Manhattan, RecordBase, SingleLinkage, Ward\n'), ((4226, 4279), 'pandas.read_csv', 'pd.read_csv', (["(TEST_HCA_DIR / 'data' / 'test_seeds.csv')"], {}), "(TEST_HCA_DIR / 'data' / 'test_seeds.csv')\n", (4237, 4279), True, 'import pandas as pd\n'), ((4412, 4423), 'optmath.HCA.Euclidean', 'Euclidean', ([], {}), '()\n', (4421, 4423), False, 'from optmath.HCA import HCA, Chebyshev, Cluster, CompleteLinkage, Euclidean, HCAStep, Manhattan, RecordBase, SingleLinkage, Ward\n'), ((4778, 4831), 'pandas.read_csv', 'pd.read_csv', (["(TEST_HCA_DIR / 'data' / 'test_seeds.csv')"], {}), "(TEST_HCA_DIR / 'data' / 'test_seeds.csv')\n", (4789, 4831), True, 'import pandas as pd\n'), ((4913, 4927), 'optmath.HCA.record.autoscale', 'autoscale', (['raw'], {}), '(raw)\n', (4922, 4927), False, 'from optmath.HCA.record import autoscale\n'), ((4977, 4988), 'optmath.HCA.Euclidean', 'Euclidean', ([], {}), '()\n', (4986, 4988), False, 'from optmath.HCA import HCA, Chebyshev, Cluster, CompleteLinkage, Euclidean, HCAStep, Manhattan, RecordBase, SingleLinkage, Ward\n')] |
import csv
import numpy as np
import cv2
def resize_and_crop(image, img_size):
""" Resize an image to the given img_size by first rescaling it
and then applying a central crop to fit the given dimension. """
source_size = np.array(image.shape[:2], dtype=float)
target_size = np.array(img_size, dtype=float)
# Scale
scale = np.amax(target_size / source_size)
inter_size = np.round(source_size * scale).astype(int)
image = cv2.resize(image, (inter_size[1], inter_size[0]))
# Central crop
pad = np.round((source_size * scale - target_size) / 2.).astype(int)
image = image[pad[0]:(pad[0] + int(target_size[0])),
pad[1]:(pad[1] + int(target_size[1])), :]
return image
def read_timestamps(text_file):
"""
Read a text file containing the timestamps of images
and return a dictionary matching the name of the image
to its timestamp.
"""
timestamps = {'name': [], 'date': [], 'hour': [],
'minute': [], 'time': []}
with open(text_file, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=' ')
for row in reader:
timestamps['name'].append(row[0])
timestamps['date'].append(row[1])
hour = int(row[2])
timestamps['hour'].append(hour)
minute = int(row[3])
timestamps['minute'].append(minute)
timestamps['time'].append(hour + minute / 60.)
return timestamps
def ascii_to_string(s):
""" Convert the array s of ascii values into the corresponding string. """
return ''.join(chr(i) for i in s) | [
"numpy.array",
"csv.reader",
"cv2.resize",
"numpy.amax",
"numpy.round"
] | [((240, 278), 'numpy.array', 'np.array', (['image.shape[:2]'], {'dtype': 'float'}), '(image.shape[:2], dtype=float)\n', (248, 278), True, 'import numpy as np\n'), ((297, 328), 'numpy.array', 'np.array', (['img_size'], {'dtype': 'float'}), '(img_size, dtype=float)\n', (305, 328), True, 'import numpy as np\n'), ((354, 388), 'numpy.amax', 'np.amax', (['(target_size / source_size)'], {}), '(target_size / source_size)\n', (361, 388), True, 'import numpy as np\n'), ((460, 509), 'cv2.resize', 'cv2.resize', (['image', '(inter_size[1], inter_size[0])'], {}), '(image, (inter_size[1], inter_size[0]))\n', (470, 509), False, 'import cv2\n'), ((1089, 1123), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""" """'}), "(csvfile, delimiter=' ')\n", (1099, 1123), False, 'import csv\n'), ((406, 435), 'numpy.round', 'np.round', (['(source_size * scale)'], {}), '(source_size * scale)\n', (414, 435), True, 'import numpy as np\n'), ((540, 591), 'numpy.round', 'np.round', (['((source_size * scale - target_size) / 2.0)'], {}), '((source_size * scale - target_size) / 2.0)\n', (548, 591), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
#import matplotlib.image as img
#import PIL.Image as Image
from PIL import Image
import math
import cmath
import time
import csv
from numpy import binary_repr
from fractions import gcd
class DCT(object):
"""
This class DCT implements all the procedures for transforming a given 2D digital image
into its corresponding frequency-domain image (Forward DCT Transform)
"""
def __init__():
pass
@classmethod
def __computeSinglePoint2DCT(self, imge, u, v, N):
"""
A private method that computes a single value of the 2D-DCT from a given image.
Parameters
----------
imge : ndarray
The input image.
u : ndarray
The index in x-dimension.
v : ndarray
The index in y-dimension.
N : int
Size of the image.
Returns
-------
result : float
The computed single value of the DCT.
"""
result = 0
for x in xrange(N):
for y in xrange(N):
result += imge[x, y] * math.cos(((2*x + 1)*u*math.pi)/(2*N)) * math.cos(((2*y + 1)*v*math.pi)/(2*N))
#Add the tau value to the result
if (u==0) and (v==0):
result = result/N
elif (u==0) or (v==0):
result = (math.sqrt(2.0)*result)/N
else:
result = (2.0*result)/N
return result
@classmethod
def __computeSinglePointInverse2DCT(self, imge, x, y, N):
"""
A private method that computes a single value of the 2D-DCT from a given image.
Parameters
----------
imge : ndarray
The input image.
u : ndarray
The index in x-dimension.
v : ndarray
The index in y-dimension.
N : int
Size of the image.
Returns
-------
result : float
The computed single value of the DCT.
"""
result = 0
for u in xrange(N):
for v in xrange(N):
if (u==0) and (v==0):
tau = 1.0/N
elif (u==0) or (v==0):
tau = math.sqrt(2.0)/N
else:
tau = 2.0/N
result += tau * imge[u, v] * math.cos(((2*x + 1)*u*math.pi)/(2*N)) * math.cos(((2*y + 1)*v*math.pi)/(2*N))
return result
@classmethod
def computeForward2DDCT(self, imge):
"""
Computes/generates the 2D DCT of an input image in spatial domain.
Parameters
----------
imge : ndarray
The input image to be transformed.
Returns
-------
final2DDFT : ndarray
The transformed image.
"""
# Assuming a square image
N = imge.shape[0]
final2DDCT = np.zeros([N, N], dtype=float)
for u in xrange(N):
for v in xrange(N):
#Compute the DCT value for each cells/points in the resulting transformed image.
final2DDCT[u, v] = DCT.__computeSinglePoint2DCT(imge, u, v, N)
return final2DDCT
@classmethod
def computeInverse2DDCT(self, imge):
"""
Computes/generates the 2D DCT of an input image in spatial domain.
Parameters
----------
imge : ndarray
The input image to be transformed.
Returns
-------
final2DDFT : ndarray
The transformed image.
"""
# Assuming a square image
N = imge.shape[0]
finalInverse2DDCT = np.zeros([N, N], dtype=float)
for x in xrange(N):
for y in xrange(N):
#Compute the DCT value for each cells/points in the resulting transformed image.
finalInverse2DDCT[x, y] = DCT.__computeSinglePointInverse2DCT(imge, x, y, N)
return finalInverse2DDCT
@classmethod
def normalize2DDCTByLog(self, dctImge):
"""
Computes the log transformation of the transformed DCT image to make the range
of the DCT values b/n 0 to 255
Parameters
----------
dctImge : ndarray
The input DCT transformed image.
Returns
-------
dctNormImge : ndarray
The normalized version of the transformed image.
"""
#Normalize the DCT values of a transformed image:
dctImge = np.absolute(dctImge)
dctNormImge = (255/ math.log10(255)) * np.log10(1 + (255/(np.max(dctImge))*dctImge))
return dctNormImge
| [
"numpy.absolute",
"math.sqrt",
"numpy.max",
"math.cos",
"numpy.zeros",
"math.log10"
] | [((3022, 3051), 'numpy.zeros', 'np.zeros', (['[N, N]'], {'dtype': 'float'}), '([N, N], dtype=float)\n', (3030, 3051), True, 'import numpy as np\n'), ((3772, 3801), 'numpy.zeros', 'np.zeros', (['[N, N]'], {'dtype': 'float'}), '([N, N], dtype=float)\n', (3780, 3801), True, 'import numpy as np\n'), ((4628, 4648), 'numpy.absolute', 'np.absolute', (['dctImge'], {}), '(dctImge)\n', (4639, 4648), True, 'import numpy as np\n'), ((4677, 4692), 'math.log10', 'math.log10', (['(255)'], {}), '(255)\n', (4687, 4692), False, 'import math\n'), ((1245, 1290), 'math.cos', 'math.cos', (['((2 * y + 1) * v * math.pi / (2 * N))'], {}), '((2 * y + 1) * v * math.pi / (2 * N))\n', (1253, 1290), False, 'import math\n'), ((2510, 2555), 'math.cos', 'math.cos', (['((2 * y + 1) * v * math.pi / (2 * N))'], {}), '((2 * y + 1) * v * math.pi / (2 * N))\n', (2518, 2555), False, 'import math\n'), ((1205, 1250), 'math.cos', 'math.cos', (['((2 * x + 1) * u * math.pi / (2 * N))'], {}), '((2 * x + 1) * u * math.pi / (2 * N))\n', (1213, 1250), False, 'import math\n'), ((1438, 1452), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (1447, 1452), False, 'import math\n'), ((2470, 2515), 'math.cos', 'math.cos', (['((2 * x + 1) * u * math.pi / (2 * N))'], {}), '((2 * x + 1) * u * math.pi / (2 * N))\n', (2478, 2515), False, 'import math\n'), ((2342, 2356), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (2351, 2356), False, 'import math\n'), ((4715, 4730), 'numpy.max', 'np.max', (['dctImge'], {}), '(dctImge)\n', (4721, 4730), True, 'import numpy as np\n')] |
import warnings
import os.path as osp
import tensorflow as tf
import numpy as np
import time
from tflearn import is_training
from in_out import create_dir
from general_utils import iterate_in_chunks
from latent_3d_points.neural_net import Neural_Net, MODEL_SAVER_ID
try:
from latent_3d_points.structural_losses.tf_nndistance import nn_distance
from latent_3d_points.structural_losses.tf_approxmatch import approx_match, match_cost
except:
print('External Losses (Chamfer-EMD) cannot be loaded. Please install them first.')
exit()
class AutoEncoder(Neural_Net):
'''
An Auto-Encoder for point-clouds.
'''
def __init__(self, name, configuration, graph=None):
c = configuration
self.configuration = c
self.name = name
Neural_Net.__init__(self, name, graph)
self.n_input = c.n_input
self.n_output = c.n_output
self.batch_size=c.batch_size
in_shape = [c.batch_size] + self.n_input
out_shape = [c.batch_size] + self.n_output
with tf.variable_scope(name):
self.x = tf.placeholder(tf.float32, in_shape)
self.gt = self.x
self.z = c.encoder(self.x, **c.encoder_args)
assert self.z.get_shape()[1]==256
zerovector = tf.constant(0.0, dtype=tf.float32, shape=[self.z.get_shape()[0], 64])
with tf.variable_scope('sharedDecoder') as scope:
print(scope)
subcode1 = tf.concat( [self.z[:,0:64], zerovector, zerovector, zerovector] , axis=1 )
layer1 = c.decoder(subcode1, nameprefix='branch_5decoder', scope=scope, reuse=False, **c.decoder_args)
with tf.variable_scope('sharedDecoder', reuse=True) as scope:
print(scope)
subcode2 = tf.concat( [zerovector, self.z[:,64:128], zerovector, zerovector] , axis=1 )
layer2 = c.decoder(subcode2, nameprefix='branch_5decoder', scope=scope, reuse=True, **c.decoder_args)
with tf.variable_scope('sharedDecoder', reuse=True) as scope:
print(scope)
subcode3 = tf.concat( [zerovector, zerovector, self.z[:,128:192], zerovector] , axis=1 )
layer3 = c.decoder( subcode3, nameprefix='branch_5decoder', scope=scope, reuse=True, **c.decoder_args)
with tf.variable_scope('sharedDecoder', reuse=True) as scope:
print(scope)
subcode4 = tf.concat( [zerovector, zerovector, zerovector, self.z[:,192:256]] , axis=1 )
layer4 = c.decoder(subcode4, nameprefix='branch_5decoder', scope=scope, reuse=True, **c.decoder_args)
with tf.variable_scope('sharedDecoder', reuse=True) as scope:
print(scope)
layer5 = c.decoder(self.z, nameprefix='branch_5decoder', scope=scope, reuse=True, **c.decoder_args)
self.x_b1 = tf.reshape(layer1, [-1, self.n_output[0], self.n_output[1]])
self.x_b2 = tf.reshape(layer2, [-1, self.n_output[0], self.n_output[1]])
self.x_b3 = tf.reshape(layer3, [-1, self.n_output[0], self.n_output[1]])
self.x_b4 = tf.reshape(layer4, [-1, self.n_output[0], self.n_output[1]])
self.x_b5 = tf.reshape(layer5, [-1, self.n_output[0], self.n_output[1]])
self.x_all = tf.concat([self.x_b1,self.x_b2,self.x_b3,self.x_b4,self.x_b5], 2)
self.x_reconstr = self.x_b5
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=c.saver_max_to_keep)
self._create_loss()
self._setup_optimizer()
# GPU configuration
if hasattr(c, 'allow_gpu_growth'):
growth = c.allow_gpu_growth
else:
growth = True
config = tf.ConfigProto()
config.gpu_options.allow_growth = growth
# Summaries
self.merged_summaries = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter(osp.join(configuration.train_dir, 'summaries'), self.graph)
# Initializing the tensor flow variables
self.init = tf.global_variables_initializer()
# Launch the session
self.sess = tf.Session(config=config)
self.sess.run(self.init)
def encode_layers(self, scopename, input_pcs, reuse=False ):
c = self.configuration
with tf.variable_scope(scopename, reuse=reuse ):
return c.encoder(input_pcs, **c.encoder_args )
def decode_layers(self, scopename, input_code, reuse=False ):
c = self.configuration
with tf.variable_scope(scopename, reuse=reuse ):
with tf.variable_scope('sharedDecoder', reuse=reuse) as scope:
print(scope)
layer5 = c.decoder( input_code, nameprefix='branch_5decoder', scope=scope, reuse=reuse, **c.decoder_args)
x_reconstr = tf.reshape(layer5, [-1, self.n_output[0], self.n_output[1]])
return x_reconstr
def _create_loss(self):
c = self.configuration
self.loss = 0
if c.loss == 'chamfer':
cost_p1_p2, _, cost_p2_p1, _ = nn_distance(self.x_b1, self.gt)
self.loss += tf.reduce_mean(cost_p1_p2) + tf.reduce_mean(cost_p2_p1) * 0.1
cost_p1_p2, _, cost_p2_p1, _ = nn_distance(self.x_b2, self.gt)
self.loss += tf.reduce_mean(cost_p1_p2) + tf.reduce_mean(cost_p2_p1) * 0.1
cost_p1_p2, _, cost_p2_p1, _ = nn_distance(self.x_b3, self.gt)
self.loss += tf.reduce_mean(cost_p1_p2) + tf.reduce_mean(cost_p2_p1) * 0.1
cost_p1_p2, _, cost_p2_p1, _ = nn_distance(self.x_b4, self.gt)
self.loss += tf.reduce_mean(cost_p1_p2) + tf.reduce_mean(cost_p2_p1) * 0.1
cost_p1_p2, _, cost_p2_p1, _ = nn_distance(self.x_b5, self.gt)
self.loss += tf.reduce_mean(cost_p1_p2) + tf.reduce_mean(cost_p2_p1)
self.match_errors = cost_p1_p2 + cost_p2_p1
elif c.loss == 'emd':
match = approx_match(self.x_b1, self.gt)
self.loss_1 = tf.reduce_mean(match_cost(self.x_b1, self.gt, match))
match = approx_match(self.x_b2, self.gt)
self.loss_2 = tf.reduce_mean(match_cost(self.x_b2, self.gt, match))
match = approx_match(self.x_b3, self.gt)
self.loss_3 = tf.reduce_mean(match_cost(self.x_b3, self.gt, match))
match = approx_match(self.x_b4, self.gt)
self.loss_4 = tf.reduce_mean(match_cost(self.x_b4, self.gt, match))
match = approx_match(self.x_b5, self.gt)
self.loss_5 = tf.reduce_mean(match_cost(self.x_b5, self.gt, match))
self.match_errors = match_cost(self.x_b5, self.gt, match) / self.n_input[0]
self.loss = self.loss_1 * 0.1 + self.loss_2* 0.1 + self.loss_3* 0.1 + self.loss_4* 0.1 + self.loss_5
else:
print("error! you must choose one!")
reg_losses = self.graph.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if c.exists_and_is_not_none('w_reg_alpha'):
w_reg_alpha = c.w_reg_alpha
else:
w_reg_alpha = 1.0
print('reg_losses:')
print(reg_losses)
print('w_reg_alpha = ', w_reg_alpha)
for rl in reg_losses:
self.loss += (w_reg_alpha * rl)
def _setup_optimizer(self):
c = self.configuration
self.lr = c.learning_rate
if hasattr(c, 'exponential_decay') and hasattr(c, 'decay_steps'):
self.lr = tf.train.exponential_decay(c.learning_rate, self.epoch, c.decay_steps, decay_rate=0.5, staircase=True, name="learning_rate_decay")
self.lr = tf.maximum(self.lr, 1e-5)
tf.summary.scalar('learning_rate', self.lr)
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
self.train_step = self.optimizer.minimize(self.loss)
def train(self, train_data, configuration, log_file=None ):
c = configuration
stats = []
if c.saver_step is not None:
create_dir(c.train_dir)
epoch = int(self.sess.run(self.epoch))
while epoch < c.training_epochs:
loss, duration = self._single_epoch_train(train_data, c)
epoch = int(self.sess.run(self.increment_epoch))
stats.append((epoch, loss, duration))
if epoch % c.loss_display_step == 0:
print("Epoch:", '%04d' % (epoch), 'training time (minutes)=', "{:.4f}".format(duration / 60.0), "loss=", "{:.9f}".format(loss))
if log_file is not None:
log_file.write('%04d\t%.9f\t%.4f\n' % (epoch, loss, duration / 60.0))
# Save the models checkpoint periodically.
if c.saver_step is not None and (epoch % c.saver_step == 0 or epoch - 1 == 0):
checkpoint_path = osp.join(c.train_dir, MODEL_SAVER_ID)
self.saver.save(self.sess, checkpoint_path, global_step=self.epoch)
if c.exists_and_is_not_none('summary_step') and (epoch % c.summary_step == 0 or epoch - 1 == 0):
summary = self.sess.run(self.merged_summaries)
self.train_writer.add_summary(summary, epoch)
return stats
def _single_epoch_train(self, train_data, configuration ):
n_examples = train_data.num_examples
epoch_loss = 0.
batch_size = configuration.batch_size
n_batches = int(n_examples / batch_size)
start_time = time.time()
# Loop over all batches
for _ in range(n_batches):
batch_i, _, _ = train_data.next_batch(batch_size)
_, loss = self.partial_fit(batch_i)
epoch_loss += loss
epoch_loss /= n_batches
duration = time.time() - start_time
if configuration.loss == 'emd':
epoch_loss /= len(train_data.point_clouds[0])
return epoch_loss, duration
def partial_fit(self, X ):
'''Trains the model with mini-batches of input data.
Returns:
The loss of the mini-batch.
The reconstructed (output) point-clouds.
'''
is_training(True, session=self.sess)
try:
_, loss, recon = self.sess.run((self.train_step, self.loss, self.x_reconstr), feed_dict={self.x: X})
is_training(False, session=self.sess)
except Exception:
raise
finally:
is_training(False, session=self.sess)
return recon, loss
def reconstruct(self, X, GT=None, compute_loss=True):
'''Use AE to reconstruct given data.
GT will be used to measure the loss (e.g., if X is a noisy version of the GT)'''
if compute_loss:
loss = self.loss
else:
loss = self.no_op
if GT is None:
return self.sess.run((self.x_reconstr, loss), feed_dict={self.x: X})
else:
return self.sess.run((self.x_reconstr, loss), feed_dict={self.x: X, self.gt: GT})
def transform(self, X):
'''Transform data by mapping it into the latent space.'''
return self.sess.run(self.z, feed_dict={self.x: X})
def decode(self, z):
if np.ndim(z) == 1: # single example
z = np.expand_dims(z, 0)
return self.sess.run((self.x_reconstr), {self.z: z})
def get_latent_codes(self, pclouds ):
''' wrapper of self.transform, to get the latent (bottle-neck) codes for a set of input point clouds.
Args:
pclouds (N, K, 3) numpy array of N point clouds with K points each.
'''
num2skip = len(pclouds) % self.batch_size
idx = np.arange(len(pclouds)- num2skip)
latent_codes = []
for b in iterate_in_chunks(idx, self.batch_size):
latent_codes.append(self.transform(pclouds[b]))
# deal with remainder
if num2skip>0:
theRestData = pclouds[ len(pclouds)-num2skip : len(pclouds) ]
theRestData = np.tile( theRestData, (self.batch_size,1, 1) )
encodeResult = self.transform(theRestData[0:self.batch_size])
latent_codes.append( encodeResult[0:num2skip] )
return np.vstack(latent_codes)
def get_point_clouds(self, latentcodes ):
num2skip = len(latentcodes) % self.batch_size
idx = np.arange(len(latentcodes)-num2skip)
pointclouds = []
for b in iterate_in_chunks(idx, self.batch_size):
pointclouds.append(self.decode(latentcodes[b]))
# deal with remainder
if num2skip>0:
theRestData = latentcodes[ len(latentcodes)-num2skip : len(latentcodes) ]
theRestData = np.tile( theRestData, (self.batch_size,1) )
decodeResult = self.decode( theRestData[0:self.batch_size] )
pointclouds.append( decodeResult[0:num2skip] )
return np.vstack(pointclouds)
| [
"latent_3d_points.structural_losses.tf_nndistance.nn_distance",
"tensorflow.reduce_mean",
"latent_3d_points.structural_losses.tf_approxmatch.approx_match",
"in_out.create_dir",
"latent_3d_points.neural_net.Neural_Net.__init__",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.ndim",
"tensorflo... | [((803, 841), 'latent_3d_points.neural_net.Neural_Net.__init__', 'Neural_Net.__init__', (['self', 'name', 'graph'], {}), '(self, name, graph)\n', (822, 841), False, 'from latent_3d_points.neural_net import Neural_Net, MODEL_SAVER_ID\n'), ((7874, 7919), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.lr'}), '(learning_rate=self.lr)\n', (7896, 7919), True, 'import tensorflow as tf\n'), ((9571, 9582), 'time.time', 'time.time', ([], {}), '()\n', (9580, 9582), False, 'import time\n'), ((10250, 10286), 'tflearn.is_training', 'is_training', (['(True)'], {'session': 'self.sess'}), '(True, session=self.sess)\n', (10261, 10286), False, 'from tflearn import is_training\n'), ((11843, 11882), 'general_utils.iterate_in_chunks', 'iterate_in_chunks', (['idx', 'self.batch_size'], {}), '(idx, self.batch_size)\n', (11860, 11882), False, 'from general_utils import iterate_in_chunks\n'), ((12297, 12320), 'numpy.vstack', 'np.vstack', (['latent_codes'], {}), '(latent_codes)\n', (12306, 12320), True, 'import numpy as np\n'), ((12520, 12559), 'general_utils.iterate_in_chunks', 'iterate_in_chunks', (['idx', 'self.batch_size'], {}), '(idx, self.batch_size)\n', (12537, 12559), False, 'from general_utils import iterate_in_chunks\n'), ((12981, 13003), 'numpy.vstack', 'np.vstack', (['pointclouds'], {}), '(pointclouds)\n', (12990, 13003), True, 'import numpy as np\n'), ((1074, 1097), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (1091, 1097), True, 'import tensorflow as tf\n'), ((1120, 1156), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'in_shape'], {}), '(tf.float32, in_shape)\n', (1134, 1156), True, 'import tensorflow as tf\n'), ((2935, 2995), 'tensorflow.reshape', 'tf.reshape', (['layer1', '[-1, self.n_output[0], self.n_output[1]]'], {}), '(layer1, [-1, self.n_output[0], self.n_output[1]])\n', (2945, 2995), True, 'import tensorflow as tf\n'), ((3020, 3080), 'tensorflow.reshape', 'tf.reshape', (['layer2', '[-1, self.n_output[0], self.n_output[1]]'], {}), '(layer2, [-1, self.n_output[0], self.n_output[1]])\n', (3030, 3080), True, 'import tensorflow as tf\n'), ((3105, 3165), 'tensorflow.reshape', 'tf.reshape', (['layer3', '[-1, self.n_output[0], self.n_output[1]]'], {}), '(layer3, [-1, self.n_output[0], self.n_output[1]])\n', (3115, 3165), True, 'import tensorflow as tf\n'), ((3190, 3250), 'tensorflow.reshape', 'tf.reshape', (['layer4', '[-1, self.n_output[0], self.n_output[1]]'], {}), '(layer4, [-1, self.n_output[0], self.n_output[1]])\n', (3200, 3250), True, 'import tensorflow as tf\n'), ((3275, 3335), 'tensorflow.reshape', 'tf.reshape', (['layer5', '[-1, self.n_output[0], self.n_output[1]]'], {}), '(layer5, [-1, self.n_output[0], self.n_output[1]])\n', (3285, 3335), True, 'import tensorflow as tf\n'), ((3375, 3444), 'tensorflow.concat', 'tf.concat', (['[self.x_b1, self.x_b2, self.x_b3, self.x_b4, self.x_b5]', '(2)'], {}), '([self.x_b1, self.x_b2, self.x_b3, self.x_b4, self.x_b5], 2)\n', (3384, 3444), True, 'import tensorflow as tf\n'), ((3843, 3859), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (3857, 3859), True, 'import tensorflow as tf\n'), ((3974, 3996), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (3994, 3996), True, 'import tensorflow as tf\n'), ((4189, 4222), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4220, 4222), True, 'import tensorflow as tf\n'), ((4281, 4306), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (4291, 4306), True, 'import tensorflow as tf\n'), ((4455, 4496), 'tensorflow.variable_scope', 'tf.variable_scope', (['scopename'], {'reuse': 'reuse'}), '(scopename, reuse=reuse)\n', (4472, 4496), True, 'import tensorflow as tf\n'), ((4670, 4711), 'tensorflow.variable_scope', 'tf.variable_scope', (['scopename'], {'reuse': 'reuse'}), '(scopename, reuse=reuse)\n', (4687, 4711), True, 'import tensorflow as tf\n'), ((5224, 5255), 'latent_3d_points.structural_losses.tf_nndistance.nn_distance', 'nn_distance', (['self.x_b1', 'self.gt'], {}), '(self.x_b1, self.gt)\n', (5235, 5255), False, 'from latent_3d_points.structural_losses.tf_nndistance import nn_distance\n'), ((5386, 5417), 'latent_3d_points.structural_losses.tf_nndistance.nn_distance', 'nn_distance', (['self.x_b2', 'self.gt'], {}), '(self.x_b2, self.gt)\n', (5397, 5417), False, 'from latent_3d_points.structural_losses.tf_nndistance import nn_distance\n'), ((5548, 5579), 'latent_3d_points.structural_losses.tf_nndistance.nn_distance', 'nn_distance', (['self.x_b3', 'self.gt'], {}), '(self.x_b3, self.gt)\n', (5559, 5579), False, 'from latent_3d_points.structural_losses.tf_nndistance import nn_distance\n'), ((5710, 5741), 'latent_3d_points.structural_losses.tf_nndistance.nn_distance', 'nn_distance', (['self.x_b4', 'self.gt'], {}), '(self.x_b4, self.gt)\n', (5721, 5741), False, 'from latent_3d_points.structural_losses.tf_nndistance import nn_distance\n'), ((5872, 5903), 'latent_3d_points.structural_losses.tf_nndistance.nn_distance', 'nn_distance', (['self.x_b5', 'self.gt'], {}), '(self.x_b5, self.gt)\n', (5883, 5903), False, 'from latent_3d_points.structural_losses.tf_nndistance import nn_distance\n'), ((7613, 7747), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['c.learning_rate', 'self.epoch', 'c.decay_steps'], {'decay_rate': '(0.5)', 'staircase': '(True)', 'name': '"""learning_rate_decay"""'}), "(c.learning_rate, self.epoch, c.decay_steps,\n decay_rate=0.5, staircase=True, name='learning_rate_decay')\n", (7639, 7747), True, 'import tensorflow as tf\n'), ((7766, 7792), 'tensorflow.maximum', 'tf.maximum', (['self.lr', '(1e-05)'], {}), '(self.lr, 1e-05)\n', (7776, 7792), True, 'import tensorflow as tf\n'), ((7804, 7847), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'self.lr'], {}), "('learning_rate', self.lr)\n", (7821, 7847), True, 'import tensorflow as tf\n'), ((8142, 8165), 'in_out.create_dir', 'create_dir', (['c.train_dir'], {}), '(c.train_dir)\n', (8152, 8165), False, 'from in_out import create_dir\n'), ((9846, 9857), 'time.time', 'time.time', ([], {}), '()\n', (9855, 9857), False, 'import time\n'), ((10425, 10462), 'tflearn.is_training', 'is_training', (['(False)'], {'session': 'self.sess'}), '(False, session=self.sess)\n', (10436, 10462), False, 'from tflearn import is_training\n'), ((10536, 10573), 'tflearn.is_training', 'is_training', (['(False)'], {'session': 'self.sess'}), '(False, session=self.sess)\n', (10547, 10573), False, 'from tflearn import is_training\n'), ((11298, 11308), 'numpy.ndim', 'np.ndim', (['z'], {}), '(z)\n', (11305, 11308), True, 'import numpy as np\n'), ((11349, 11369), 'numpy.expand_dims', 'np.expand_dims', (['z', '(0)'], {}), '(z, 0)\n', (11363, 11369), True, 'import numpy as np\n'), ((12099, 12144), 'numpy.tile', 'np.tile', (['theRestData', '(self.batch_size, 1, 1)'], {}), '(theRestData, (self.batch_size, 1, 1))\n', (12106, 12144), True, 'import numpy as np\n'), ((12788, 12830), 'numpy.tile', 'np.tile', (['theRestData', '(self.batch_size, 1)'], {}), '(theRestData, (self.batch_size, 1))\n', (12795, 12830), True, 'import numpy as np\n'), ((1408, 1442), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sharedDecoder"""'], {}), "('sharedDecoder')\n", (1425, 1442), True, 'import tensorflow as tf\n'), ((1509, 1581), 'tensorflow.concat', 'tf.concat', (['[self.z[:, 0:64], zerovector, zerovector, zerovector]'], {'axis': '(1)'}), '([self.z[:, 0:64], zerovector, zerovector, zerovector], axis=1)\n', (1518, 1581), True, 'import tensorflow as tf\n'), ((1721, 1767), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sharedDecoder"""'], {'reuse': '(True)'}), "('sharedDecoder', reuse=True)\n", (1738, 1767), True, 'import tensorflow as tf\n'), ((1834, 1908), 'tensorflow.concat', 'tf.concat', (['[zerovector, self.z[:, 64:128], zerovector, zerovector]'], {'axis': '(1)'}), '([zerovector, self.z[:, 64:128], zerovector, zerovector], axis=1)\n', (1843, 1908), True, 'import tensorflow as tf\n'), ((2049, 2095), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sharedDecoder"""'], {'reuse': '(True)'}), "('sharedDecoder', reuse=True)\n", (2066, 2095), True, 'import tensorflow as tf\n'), ((2162, 2237), 'tensorflow.concat', 'tf.concat', (['[zerovector, zerovector, self.z[:, 128:192], zerovector]'], {'axis': '(1)'}), '([zerovector, zerovector, self.z[:, 128:192], zerovector], axis=1)\n', (2171, 2237), True, 'import tensorflow as tf\n'), ((2378, 2424), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sharedDecoder"""'], {'reuse': '(True)'}), "('sharedDecoder', reuse=True)\n", (2395, 2424), True, 'import tensorflow as tf\n'), ((2491, 2566), 'tensorflow.concat', 'tf.concat', (['[zerovector, zerovector, zerovector, self.z[:, 192:256]]'], {'axis': '(1)'}), '([zerovector, zerovector, zerovector, self.z[:, 192:256]], axis=1)\n', (2500, 2566), True, 'import tensorflow as tf\n'), ((2706, 2752), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sharedDecoder"""'], {'reuse': '(True)'}), "('sharedDecoder', reuse=True)\n", (2723, 2752), True, 'import tensorflow as tf\n'), ((3523, 3544), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (3542, 3544), True, 'import tensorflow as tf\n'), ((4051, 4097), 'os.path.join', 'osp.join', (['configuration.train_dir', '"""summaries"""'], {}), "(configuration.train_dir, 'summaries')\n", (4059, 4097), True, 'import os.path as osp\n'), ((4731, 4778), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sharedDecoder"""'], {'reuse': 'reuse'}), "('sharedDecoder', reuse=reuse)\n", (4748, 4778), True, 'import tensorflow as tf\n'), ((4970, 5030), 'tensorflow.reshape', 'tf.reshape', (['layer5', '[-1, self.n_output[0], self.n_output[1]]'], {}), '(layer5, [-1, self.n_output[0], self.n_output[1]])\n', (4980, 5030), True, 'import tensorflow as tf\n'), ((5281, 5307), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cost_p1_p2'], {}), '(cost_p1_p2)\n', (5295, 5307), True, 'import tensorflow as tf\n'), ((5443, 5469), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cost_p1_p2'], {}), '(cost_p1_p2)\n', (5457, 5469), True, 'import tensorflow as tf\n'), ((5605, 5631), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cost_p1_p2'], {}), '(cost_p1_p2)\n', (5619, 5631), True, 'import tensorflow as tf\n'), ((5767, 5793), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cost_p1_p2'], {}), '(cost_p1_p2)\n', (5781, 5793), True, 'import tensorflow as tf\n'), ((5929, 5955), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cost_p1_p2'], {}), '(cost_p1_p2)\n', (5943, 5955), True, 'import tensorflow as tf\n'), ((5958, 5984), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cost_p2_p1'], {}), '(cost_p2_p1)\n', (5972, 5984), True, 'import tensorflow as tf\n'), ((6094, 6126), 'latent_3d_points.structural_losses.tf_approxmatch.approx_match', 'approx_match', (['self.x_b1', 'self.gt'], {}), '(self.x_b1, self.gt)\n', (6106, 6126), False, 'from latent_3d_points.structural_losses.tf_approxmatch import approx_match, match_cost\n'), ((6227, 6259), 'latent_3d_points.structural_losses.tf_approxmatch.approx_match', 'approx_match', (['self.x_b2', 'self.gt'], {}), '(self.x_b2, self.gt)\n', (6239, 6259), False, 'from latent_3d_points.structural_losses.tf_approxmatch import approx_match, match_cost\n'), ((6360, 6392), 'latent_3d_points.structural_losses.tf_approxmatch.approx_match', 'approx_match', (['self.x_b3', 'self.gt'], {}), '(self.x_b3, self.gt)\n', (6372, 6392), False, 'from latent_3d_points.structural_losses.tf_approxmatch import approx_match, match_cost\n'), ((6493, 6525), 'latent_3d_points.structural_losses.tf_approxmatch.approx_match', 'approx_match', (['self.x_b4', 'self.gt'], {}), '(self.x_b4, self.gt)\n', (6505, 6525), False, 'from latent_3d_points.structural_losses.tf_approxmatch import approx_match, match_cost\n'), ((6626, 6658), 'latent_3d_points.structural_losses.tf_approxmatch.approx_match', 'approx_match', (['self.x_b5', 'self.gt'], {}), '(self.x_b5, self.gt)\n', (6638, 6658), False, 'from latent_3d_points.structural_losses.tf_approxmatch import approx_match, match_cost\n'), ((8942, 8979), 'os.path.join', 'osp.join', (['c.train_dir', 'MODEL_SAVER_ID'], {}), '(c.train_dir, MODEL_SAVER_ID)\n', (8950, 8979), True, 'import os.path as osp\n'), ((5310, 5336), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cost_p2_p1'], {}), '(cost_p2_p1)\n', (5324, 5336), True, 'import tensorflow as tf\n'), ((5472, 5498), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cost_p2_p1'], {}), '(cost_p2_p1)\n', (5486, 5498), True, 'import tensorflow as tf\n'), ((5634, 5660), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cost_p2_p1'], {}), '(cost_p2_p1)\n', (5648, 5660), True, 'import tensorflow as tf\n'), ((5796, 5822), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cost_p2_p1'], {}), '(cost_p2_p1)\n', (5810, 5822), True, 'import tensorflow as tf\n'), ((6168, 6205), 'latent_3d_points.structural_losses.tf_approxmatch.match_cost', 'match_cost', (['self.x_b1', 'self.gt', 'match'], {}), '(self.x_b1, self.gt, match)\n', (6178, 6205), False, 'from latent_3d_points.structural_losses.tf_approxmatch import approx_match, match_cost\n'), ((6301, 6338), 'latent_3d_points.structural_losses.tf_approxmatch.match_cost', 'match_cost', (['self.x_b2', 'self.gt', 'match'], {}), '(self.x_b2, self.gt, match)\n', (6311, 6338), False, 'from latent_3d_points.structural_losses.tf_approxmatch import approx_match, match_cost\n'), ((6434, 6471), 'latent_3d_points.structural_losses.tf_approxmatch.match_cost', 'match_cost', (['self.x_b3', 'self.gt', 'match'], {}), '(self.x_b3, self.gt, match)\n', (6444, 6471), False, 'from latent_3d_points.structural_losses.tf_approxmatch import approx_match, match_cost\n'), ((6567, 6604), 'latent_3d_points.structural_losses.tf_approxmatch.match_cost', 'match_cost', (['self.x_b4', 'self.gt', 'match'], {}), '(self.x_b4, self.gt, match)\n', (6577, 6604), False, 'from latent_3d_points.structural_losses.tf_approxmatch import approx_match, match_cost\n'), ((6700, 6737), 'latent_3d_points.structural_losses.tf_approxmatch.match_cost', 'match_cost', (['self.x_b5', 'self.gt', 'match'], {}), '(self.x_b5, self.gt, match)\n', (6710, 6737), False, 'from latent_3d_points.structural_losses.tf_approxmatch import approx_match, match_cost\n'), ((6785, 6822), 'latent_3d_points.structural_losses.tf_approxmatch.match_cost', 'match_cost', (['self.x_b5', 'self.gt', 'match'], {}), '(self.x_b5, self.gt, match)\n', (6795, 6822), False, 'from latent_3d_points.structural_losses.tf_approxmatch import approx_match, match_cost\n')] |
import argparse
import os
import time
import numpy as np
from config import Config
from dataset import DataSet
from logger import get_logger
logger = get_logger()
def decode(dataTest, config):
logger.info('Batch Dimensions: ' + str(dataTest.get_feature_shape()))
logger.info('Label Dimensions: ' + str(dataTest.get_label_shape()))
network = config.load_network(fortraining=False)
global_step = 0
metrics = {'test_time_sec': 0, 'avg_loss': 0, 'avg_ler': 0}
while dataTest.has_more_batches():
global_step += 1
t0 = time.time()
mfccs, labels, seq_len, labels_len = dataTest.get_next_batch()
output, valid_loss_val, valid_mean_ler_value = network.evaluate(
mfccs, labels, seq_len, labels_len)
logger.info('Valid: batch_cost = %.4f' % (valid_loss_val) +
', batch_ler = %.4f' % (valid_mean_ler_value))
metrics['test_time_sec'] = metrics['test_time_sec'] + \
(time.time() - t0)
metrics['avg_loss'] += valid_loss_val
metrics['avg_ler'] += valid_mean_ler_value
str_decoded = config.symbols.convert_to_str(np.asarray(output))
logger.info('Decoded: ' + str_decoded)
str_labels = config.symbols.convert_to_str(np.asarray(labels[0]))
logger.info('Original: ' + str_labels)
logger.info("Finished Decoding!!!")
logger.info('Decoded Time = %.4fs, avg_loss = %.4f, avg_ler = %.4f' % (
metrics['test_time_sec'], metrics['avg_loss'] / global_step, metrics['avg_ler'] / global_step))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Decode test data using trained model.")
parser.add_argument("config", help="Configuration file.")
args = parser.parse_args()
config = Config(args.config, True)
config.batch_size = 1
config.epochs = 1
config.rand_shift = 0
dataTest = DataSet(config.test_input, config)
decode(dataTest, config)
| [
"argparse.ArgumentParser",
"dataset.DataSet",
"logger.get_logger",
"config.Config",
"numpy.asarray",
"time.time"
] | [((153, 165), 'logger.get_logger', 'get_logger', ([], {}), '()\n', (163, 165), False, 'from logger import get_logger\n'), ((1595, 1671), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Decode test data using trained model."""'}), "(description='Decode test data using trained model.')\n", (1618, 1671), False, 'import argparse\n'), ((1787, 1812), 'config.Config', 'Config', (['args.config', '(True)'], {}), '(args.config, True)\n', (1793, 1812), False, 'from config import Config\n'), ((1902, 1936), 'dataset.DataSet', 'DataSet', (['config.test_input', 'config'], {}), '(config.test_input, config)\n', (1909, 1936), False, 'from dataset import DataSet\n'), ((560, 571), 'time.time', 'time.time', ([], {}), '()\n', (569, 571), False, 'import time\n'), ((1144, 1162), 'numpy.asarray', 'np.asarray', (['output'], {}), '(output)\n', (1154, 1162), True, 'import numpy as np\n'), ((1262, 1283), 'numpy.asarray', 'np.asarray', (['labels[0]'], {}), '(labels[0])\n', (1272, 1283), True, 'import numpy as np\n'), ((977, 988), 'time.time', 'time.time', ([], {}), '()\n', (986, 988), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 14 13:56:58 2019
@author: leoska
"""
import random
import numpy as np
from tensorflow.python.keras.utils import to_categorical
def create_dataset(filepath):
sgn = []
lbl = []
path = filepath + "/{}_data.txt"
for i in range(0,10):
data = np.loadtxt(path.format(i+1), dtype=np.float64)
for j in range(np.shape(data)[0]):
sgn.append(data[j, :])
lbl.append(i)
c = list(zip(sgn, lbl))
random.shuffle(c)
sgn, lbl = zip(*c)
sgn = np.asarray(sgn, dtype=np.float64)
lbl = np.asarray(lbl, dtype=np.int64)
train_signals = sgn[0:int(0.6*len(sgn))]
train_labels = lbl[0:int(0.6*len(lbl))]
val_signals = sgn[int(0.6*len(sgn)):int(0.8*len(sgn))]
val_labels = lbl[int(0.6*len(lbl)):int(0.8*len(lbl))]
test_signals = sgn[int(0.8*len(sgn)):]
test_labels = lbl[int(0.8*len(lbl)):]
nrows, ncols = train_signals.shape
train_signals = train_signals.reshape(nrows, ncols, 1)
nrows, ncols = val_signals.shape
val_signals = val_signals.reshape(nrows, ncols, 1)
nrows, ncols = test_signals.shape
test_signals = test_signals.reshape(nrows, ncols, 1)
train_labels = to_categorical(train_labels)
val_labels = to_categorical(val_labels)
test_labels = to_categorical(test_labels)
return train_signals, train_labels, val_signals, val_labels, test_signals, test_labels
if __name__ == "__main__":
# For test module
files_path = "data_emg/" #path to your directory with 10 data.txt files
train_signals, train_labels, val_signals, val_labels, test_signals, test_labels = create_dataset(files_path) | [
"tensorflow.python.keras.utils.to_categorical",
"numpy.shape",
"numpy.asarray",
"random.shuffle"
] | [((499, 516), 'random.shuffle', 'random.shuffle', (['c'], {}), '(c)\n', (513, 516), False, 'import random\n'), ((551, 584), 'numpy.asarray', 'np.asarray', (['sgn'], {'dtype': 'np.float64'}), '(sgn, dtype=np.float64)\n', (561, 584), True, 'import numpy as np\n'), ((595, 626), 'numpy.asarray', 'np.asarray', (['lbl'], {'dtype': 'np.int64'}), '(lbl, dtype=np.int64)\n', (605, 626), True, 'import numpy as np\n'), ((1229, 1257), 'tensorflow.python.keras.utils.to_categorical', 'to_categorical', (['train_labels'], {}), '(train_labels)\n', (1243, 1257), False, 'from tensorflow.python.keras.utils import to_categorical\n'), ((1275, 1301), 'tensorflow.python.keras.utils.to_categorical', 'to_categorical', (['val_labels'], {}), '(val_labels)\n', (1289, 1301), False, 'from tensorflow.python.keras.utils import to_categorical\n'), ((1320, 1347), 'tensorflow.python.keras.utils.to_categorical', 'to_categorical', (['test_labels'], {}), '(test_labels)\n', (1334, 1347), False, 'from tensorflow.python.keras.utils import to_categorical\n'), ((381, 395), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (389, 395), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from statsmodels.distributions import ECDF, monotone_fn_inverter
from joblib import dump
# model = XGBRegressor(random_state=2021)
model = XGBRegressor(random_state=2021, n_estimators=900, max_depth=9, learning_rate=0.1, subsample=0.4)
# model = LGBMRegressor(random_state=2021)
# model = LGBMRegressor(random_state=2021, n_estimators=900, max_depth=9, learning_rate=0.1, subsample=0.4)
grid_search = False
save_model = True
param_grid = {
'n_estimators': [300, 500, 700, 900],
'learning_rate': [0.05, 0.1, 0.2],
'max_depth': [5, 7, 9],
'subsample': [0.4, 0.5, 1.0]}
def load_data(data):
X = pd.read_csv(data)
y = X.pop("y")
return X, y
def calculate_rmse(actual, predicted):
return np.sqrt(mean_squared_error(actual, predicted))
def calculate_percent_accuracy(actual, predicted):
return sum(abs(actual - predicted) <= 3.0) / len(actual)
def generate_scalers(model, X_train, y_train):
predictions_train = model.predict(X_train)
actual_ecdf = ECDF(y_train)
actual_ecdf_inv = monotone_fn_inverter(actual_ecdf, y_train)
actual_ecdf_inv.bounds_error = False
prediction_ecdf = ECDF(predictions_train)
return prediction_ecdf, actual_ecdf_inv
def generate_finalized_model(model, X_train, y_train):
model.fit(X_train, y_train)
prediction_ecdf, actual_ecdf_inv = generate_scalers(model, X_train, y_train)
return {"model": model, "ecdf": prediction_ecdf, "inverse_ecdf": actual_ecdf_inv}
def generate_scaled_predictions(finalized_model, data):
predictions = finalized_model["model"].predict(data)
predictions = finalized_model["ecdf"](predictions)
predictions = finalized_model["inverse_ecdf"](predictions)
min_score = finalized_model["inverse_ecdf"].y.min()
return np.nan_to_num(predictions, nan=min_score)
def print_summary(actual, predictions, identifier):
print("model " + identifier + " rmse: %.3f" % calculate_rmse(actual, predictions))
print("model " + identifier + " acc: %.3f" % calculate_percent_accuracy(actual, predictions))
def save_csv(data: np.array, filename: str) -> None:
np.savetxt(filename, data, delimiter=",")
print(f"Saved data to {filename}")
return
X,y = load_data("./input_data.csv")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Create sklearn Pipelines for numerical features
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value=999999)),
('scaler', StandardScaler())])
# Define numerical features by dtype, in this dataset this is all columns
numeric_features = X_train.select_dtypes(include=['int64', 'float64']).columns
# Create sklearn ColumnTransformer
preprocessor = ColumnTransformer(
transformers=[('num', numeric_transformer, numeric_features)])
# Create model pipeline
model = Pipeline(steps=[('preprocessor', preprocessor),
('regressor', model)])
if grid_search:
model = GridSearchCV(model, param_grid, n_jobs=4)
finalized_model = generate_finalized_model(model, X_train, y_train)
if grid_search:
print(model.best_params_)
predictions_train = generate_scaled_predictions(finalized_model, X_train)
predictions_test = generate_scaled_predictions(finalized_model, X_test)
print_summary(y_train, predictions_train, "train")
print_summary(y_test, predictions_test, "test")
if save_model:
finalized_model = generate_finalized_model(model, X, y)
predictions_final = generate_scaled_predictions(finalized_model, X)
print_summary(y, predictions_final, "final")
save_csv(predictions_final, "final_moodel_predictions.csv")
dump(finalized_model, "saved_model.joblib", compress=True)
| [
"sklearn.model_selection.GridSearchCV",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"statsmodels.distributions.monotone_fn_inverter",
"joblib.dump",
"sklearn.metrics.mean_squared_error",
"sklearn.preprocessing.StandardScaler",
"xgboost.XGBRegressor",
"sklearn.impute.SimpleImputer"... | [((536, 636), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {'random_state': '(2021)', 'n_estimators': '(900)', 'max_depth': '(9)', 'learning_rate': '(0.1)', 'subsample': '(0.4)'}), '(random_state=2021, n_estimators=900, max_depth=9,\n learning_rate=0.1, subsample=0.4)\n', (548, 636), False, 'from xgboost import XGBRegressor\n'), ((2664, 2701), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (2680, 2701), False, 'from sklearn.model_selection import train_test_split, GridSearchCV\n'), ((3105, 3190), 'sklearn.compose.ColumnTransformer', 'ColumnTransformer', ([], {'transformers': "[('num', numeric_transformer, numeric_features)]"}), "(transformers=[('num', numeric_transformer, numeric_features)]\n )\n", (3122, 3190), False, 'from sklearn.compose import ColumnTransformer\n'), ((3224, 3294), 'sklearn.pipeline.Pipeline', 'Pipeline', ([], {'steps': "[('preprocessor', preprocessor), ('regressor', model)]"}), "(steps=[('preprocessor', preprocessor), ('regressor', model)])\n", (3232, 3294), False, 'from sklearn.pipeline import Pipeline\n'), ((1013, 1030), 'pandas.read_csv', 'pd.read_csv', (['data'], {}), '(data)\n', (1024, 1030), True, 'import pandas as pd\n'), ((1393, 1406), 'statsmodels.distributions.ECDF', 'ECDF', (['y_train'], {}), '(y_train)\n', (1397, 1406), False, 'from statsmodels.distributions import ECDF, monotone_fn_inverter\n'), ((1429, 1471), 'statsmodels.distributions.monotone_fn_inverter', 'monotone_fn_inverter', (['actual_ecdf', 'y_train'], {}), '(actual_ecdf, y_train)\n', (1449, 1471), False, 'from statsmodels.distributions import ECDF, monotone_fn_inverter\n'), ((1535, 1558), 'statsmodels.distributions.ECDF', 'ECDF', (['predictions_train'], {}), '(predictions_train)\n', (1539, 1558), False, 'from statsmodels.distributions import ECDF, monotone_fn_inverter\n'), ((2159, 2200), 'numpy.nan_to_num', 'np.nan_to_num', (['predictions'], {'nan': 'min_score'}), '(predictions, nan=min_score)\n', (2172, 2200), True, 'import numpy as np\n'), ((2499, 2540), 'numpy.savetxt', 'np.savetxt', (['filename', 'data'], {'delimiter': '""","""'}), "(filename, data, delimiter=',')\n", (2509, 2540), True, 'import numpy as np\n'), ((3348, 3389), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['model', 'param_grid'], {'n_jobs': '(4)'}), '(model, param_grid, n_jobs=4)\n', (3360, 3389), False, 'from sklearn.model_selection import train_test_split, GridSearchCV\n'), ((4018, 4076), 'joblib.dump', 'dump', (['finalized_model', '"""saved_model.joblib"""'], {'compress': '(True)'}), "(finalized_model, 'saved_model.joblib', compress=True)\n", (4022, 4076), False, 'from joblib import dump\n'), ((1126, 1163), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['actual', 'predicted'], {}), '(actual, predicted)\n', (1144, 1163), False, 'from sklearn.metrics import mean_squared_error\n'), ((2809, 2862), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""constant"""', 'fill_value': '(999999)'}), "(strategy='constant', fill_value=999999)\n", (2822, 2862), False, 'from sklearn.impute import SimpleImputer\n'), ((2880, 2896), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2894, 2896), False, 'from sklearn.preprocessing import StandardScaler\n')] |
import numpy as np
from finitefield import GF
q=1024
field= GF(q)
elts_map = {}
for (i, v) in enumerate(field):
elts_map[i] =v
print(elts_map)
rev_elts_map = {v:k for k,v in elts_map.items()}
print(rev_elts_map)
add_table = np.zeros((q,q))
for i in range(q):
for j in range(q):
add_table[i][j] = rev_elts_map[elts_map[i]+elts_map[j]]
mul_table = np.zeros((q,q))
for i in range(q):
for j in range(q):
mul_table[i][j] = rev_elts_map[elts_map[i]*elts_map[j]]
print(add_table)
print(mul_table)
| [
"finitefield.GF",
"numpy.zeros"
] | [((61, 66), 'finitefield.GF', 'GF', (['q'], {}), '(q)\n', (63, 66), False, 'from finitefield import GF\n'), ((229, 245), 'numpy.zeros', 'np.zeros', (['(q, q)'], {}), '((q, q))\n', (237, 245), True, 'import numpy as np\n'), ((363, 379), 'numpy.zeros', 'np.zeros', (['(q, q)'], {}), '((q, q))\n', (371, 379), True, 'import numpy as np\n')] |
"""Set of functions for converting outputs."""
import numpy as np
from frites.io.io_dependencies import is_pandas_installed, is_xarray_installed
def convert_spatiotemporal_outputs(arr, times, roi, astype='array'):
"""Convert spatio-temporal outputs.
Parameters
----------
arr : array_like
2d array of shape (n_times, n_roi)
times : array_like | None
Array of roi names
roi : array_like | None
Array of time index
astype : {'array', 'dataframe', 'dataarray'}
Convert the array either to a pandas DataFrames (require pandas to be
installed) either to a xarray DataArray (require xarray to be
installed)
Returns
-------
arr_c : array_like | DataFrame | DataArray
Converted spatio-temporal array
"""
assert isinstance(arr, np.ndarray) and (arr.ndim == 2)
assert astype in ['array', 'dataframe', 'dataarray']
# checkout index and columns
assert arr.shape == (len(times), len(roi))
# output conversion
force_np = not is_pandas_installed() and not is_xarray_installed()
astype = 'array' if force_np else astype
if astype == 'array': # numpy
return arr
elif astype == 'dataframe': # pandas
is_pandas_installed(raise_error=True)
import pandas as pd
return pd.DataFrame(arr, index=times, columns=roi)
elif astype == 'dataarray': # xarray
is_xarray_installed(raise_error=True)
from xarray import DataArray
return DataArray(arr, dims=('times', 'roi'), coords=(times, roi))
def convert_dfc_outputs(arr, times, roi, sources, targets, astype='2d_array',
is_pvalue=False):
"""Convert dynamic functional connectivity outputs.
This functions can be used to convert an array of dynamical functional
connectivity (dFC) from a shape (n_times, n_pairs) into either the same
shape but using pandas DataFrame or to an array of shape
(n_sources, n_targets, n_times). The number of pairs n_pairs is defined as
the length of `sources` or `targets` inputs
(pairs = np.c_[sources, targets]).
Parameters
----------
arr : array_like
Array of connectivity of shape (n_times, n_pairs)
times : array_like
Array of time points of shape (n_times,)
roi : array_like
Array of region of interest names of shape (n_roi,)
sources : array_like
Array of sources indices of shape (n_pairs,)
targets : array_like
Array of targets indices of shape (n_pairs,)
astype : {2d_array, 3d_array, 2d_dataframe, 3d_dataframe, dataarray}
String describing the output type. Use either :
* '2d_array', '3d_array' : NumPy arrays respectively of shapes
(n_pairs, n_times) or (n_sources, n_targets, n_times)
* '2d_dataframe', '3d_dataframe' : Pandas DataFrame both of shapes
(n_pairs, n_times) but the 2d version is a single column level
(roi_source, roi_target) while the 3d version is a muli-level
index DataFrame. Require pandas to be installed
* 'dataarray' : a 3d xarray DataArray of shape
(n_sources, n_targets, n_times). Requires xarray to be installed
but this the recommended output as slicing is much easier.
is_pvalue : bool | False
Specify if the array is p-values
Returns
-------
arr_c : array_like | DataFrame | DataArray
Converted dFC array
"""
assert isinstance(arr, np.ndarray) and (arr.ndim == 2)
assert len(sources) == len(targets)
assert arr.shape == (len(times), len(sources))
assert astype in ['2d_array', '3d_array', '2d_dataframe', '3d_dataframe',
'dataarray']
# empty fcn to use
empty_fcn = np.zeros if not is_pvalue else np.ones
# get used roi and unique sources / targets
roi = np.asarray(roi)
s_roi, t_roi = roi[sources], roi[targets]
n_times = arr.shape[0]
_, s_idx = np.unique(sources, return_index=True)
_, t_idx = np.unique(targets, return_index=True)
# output conversion
force_np = not is_pandas_installed() and not is_xarray_installed()
astype = '2d_array' if force_np else astype
if astype == '2d_array':
return arr
elif astype == '3d_array':
out = empty_fcn((len(roi), len(roi), n_times))
out[sources, targets, :] = arr.T
return out
elif astype == '2d_dataframe':
import pandas as pd
columns = [(s, t) for s, t in zip(s_roi, t_roi)]
return pd.DataFrame(arr, index=times, columns=columns)
elif astype == '3d_dataframe':
import pandas as pd
idx = pd.MultiIndex.from_arrays([s_roi, t_roi],
names=['source', 'target'])
return pd.DataFrame(arr, index=times, columns=idx)
elif astype == 'dataarray':
from xarray import DataArray
out = empty_fcn((len(roi), len(roi), n_times))
out[sources, targets, :] = arr.T
da = DataArray(out, dims=('source', 'target', 'times'),
coords=(roi, roi, times))
return da
| [
"numpy.unique",
"frites.io.io_dependencies.is_xarray_installed",
"numpy.asarray",
"pandas.MultiIndex.from_arrays",
"xarray.DataArray",
"pandas.DataFrame",
"frites.io.io_dependencies.is_pandas_installed"
] | [((3941, 3956), 'numpy.asarray', 'np.asarray', (['roi'], {}), '(roi)\n', (3951, 3956), True, 'import numpy as np\n'), ((4045, 4082), 'numpy.unique', 'np.unique', (['sources'], {'return_index': '(True)'}), '(sources, return_index=True)\n', (4054, 4082), True, 'import numpy as np\n'), ((4098, 4135), 'numpy.unique', 'np.unique', (['targets'], {'return_index': '(True)'}), '(targets, return_index=True)\n', (4107, 4135), True, 'import numpy as np\n'), ((1040, 1061), 'frites.io.io_dependencies.is_pandas_installed', 'is_pandas_installed', ([], {}), '()\n', (1059, 1061), False, 'from frites.io.io_dependencies import is_pandas_installed, is_xarray_installed\n'), ((1070, 1091), 'frites.io.io_dependencies.is_xarray_installed', 'is_xarray_installed', ([], {}), '()\n', (1089, 1091), False, 'from frites.io.io_dependencies import is_pandas_installed, is_xarray_installed\n'), ((1273, 1310), 'frites.io.io_dependencies.is_pandas_installed', 'is_pandas_installed', ([], {'raise_error': '(True)'}), '(raise_error=True)\n', (1292, 1310), False, 'from frites.io.io_dependencies import is_pandas_installed, is_xarray_installed\n'), ((1354, 1397), 'pandas.DataFrame', 'pd.DataFrame', (['arr'], {'index': 'times', 'columns': 'roi'}), '(arr, index=times, columns=roi)\n', (1366, 1397), True, 'import pandas as pd\n'), ((4180, 4201), 'frites.io.io_dependencies.is_pandas_installed', 'is_pandas_installed', ([], {}), '()\n', (4199, 4201), False, 'from frites.io.io_dependencies import is_pandas_installed, is_xarray_installed\n'), ((4210, 4231), 'frites.io.io_dependencies.is_xarray_installed', 'is_xarray_installed', ([], {}), '()\n', (4229, 4231), False, 'from frites.io.io_dependencies import is_pandas_installed, is_xarray_installed\n'), ((1461, 1498), 'frites.io.io_dependencies.is_xarray_installed', 'is_xarray_installed', ([], {'raise_error': '(True)'}), '(raise_error=True)\n', (1480, 1498), False, 'from frites.io.io_dependencies import is_pandas_installed, is_xarray_installed\n'), ((1551, 1609), 'xarray.DataArray', 'DataArray', (['arr'], {'dims': "('times', 'roi')", 'coords': '(times, roi)'}), "(arr, dims=('times', 'roi'), coords=(times, roi))\n", (1560, 1609), False, 'from xarray import DataArray\n'), ((4610, 4657), 'pandas.DataFrame', 'pd.DataFrame', (['arr'], {'index': 'times', 'columns': 'columns'}), '(arr, index=times, columns=columns)\n', (4622, 4657), True, 'import pandas as pd\n'), ((4735, 4804), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', (['[s_roi, t_roi]'], {'names': "['source', 'target']"}), "([s_roi, t_roi], names=['source', 'target'])\n", (4760, 4804), True, 'import pandas as pd\n'), ((4860, 4903), 'pandas.DataFrame', 'pd.DataFrame', (['arr'], {'index': 'times', 'columns': 'idx'}), '(arr, index=times, columns=idx)\n', (4872, 4903), True, 'import pandas as pd\n'), ((5082, 5158), 'xarray.DataArray', 'DataArray', (['out'], {'dims': "('source', 'target', 'times')", 'coords': '(roi, roi, times)'}), "(out, dims=('source', 'target', 'times'), coords=(roi, roi, times))\n", (5091, 5158), False, 'from xarray import DataArray\n')] |
import sys
import warnings
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
from typing import Optional, Tuple, Union
import igraph as ig
import networkx as nx
import numpy as np
from edgeseraser.misc.backend import ig_erase, ig_extract, nx_erase, nx_extract
from edgeseraser.misc.fast_math import NbGammaLnCache
from edgeseraser.misc.matrix import construct_sp_matrices
from edgeseraser.misc.typing import NpArrayEdges, NpArrayEdgesFloat, NpArrayEdgesIds
from edgeseraser.polya_tools.numba_tools import (
NbComputePolyaCacheDict,
NbComputePolyaCacheSzuszik,
)
from edgeseraser.polya_tools.statistics import polya_cdf
from scipy.sparse import csr_matrix
warnings.simplefilter("ignore", FutureWarning)
OptionsForCache = Literal["lru-nb", "lru-nb-szuszik", "lru-nbf", "lru-py-nb", "nb"]
def scores_generic_graph(
num_vertices: int,
edges: NpArrayEdges,
weights: NpArrayEdgesFloat,
a: float = 1,
apt_lvl: int = 10,
is_directed: bool = False,
eps: float = 1e-20,
optimization: OptionsForCache = "lru-nb-szuszik",
) -> NpArrayEdgesFloat:
"""Compute the probability for each edge using the Pólya-based method for
a generic weighted graph.
Args:
num_vertices: int
number of vertices
edges: np.array
edges
weights: np.array
edge weights
a: float
apt_lvl: int
is_directed: bool
eps: float
optimization: OptionsForCache
Returns:
np.array:
edge scores. Probability values
"""
w_adj, adj = construct_sp_matrices(
weights, edges, num_vertices, is_directed=is_directed
)
def calc_degree(x: csr_matrix, i: int) -> NpArrayEdgesFloat:
return np.asarray(x.sum(axis=i)).flatten()
ids_out = edges[:, 0]
ids_in = edges[:, 1]
wdegree_out = calc_degree(w_adj, 1)[ids_out]
wdegree_in = calc_degree(w_adj, 0)[ids_in]
degree_out = calc_degree(adj, 1)[ids_out]
degree_in = calc_degree(adj, 0)[ids_in]
if np.mod(weights, 1).sum() > eps:
# non integer weights
apt_lvl = 0
if optimization == "lru-nb":
cache_obj = NbComputePolyaCacheDict()
elif optimization == "lru-nb-szuszik":
cache_obj = NbComputePolyaCacheSzuszik()
elif optimization == "lru-nbf":
cache_obj = NbGammaLnCache()
else:
cache_obj = None
p_in = polya_cdf(
wdegree_in,
degree_in,
weights,
a,
apt_lvl,
optimization=optimization,
cache_obj=cache_obj,
)
p_out = polya_cdf(
wdegree_out,
degree_out,
weights,
a,
apt_lvl,
optimization=optimization,
cache_obj=cache_obj,
)
p: NpArrayEdgesFloat = np.minimum(p_in, p_out)
return p
def cond_edges2erase(alphas: NpArrayEdgesFloat, thresh: float = 0.1) -> NpArrayEdgesIds:
"""
Args:
alphas: np.array
edge scores
thresh: float
Between 0 and 1.
Returns:
np.array:
indices of edges to be erased
"""
ids2erase: NpArrayEdgesIds = np.argwhere(alphas > thresh).flatten().astype("int64")
return ids2erase
def filter_generic_graph(
num_vertices: int,
edges: NpArrayEdges,
weights: NpArrayEdgesFloat,
thresh: float = 0.4,
a: float = 1,
apt_lvl: int = 10,
is_directed: bool = False,
eps: float = 1e-20,
optimization: OptionsForCache = "lru-nb-szuszik",
) -> Tuple[NpArrayEdgesIds, NpArrayEdgesFloat]:
"""Filter the graph using the Pólya-based method.
Args:
num_vertices: int
number of vertices
edges: np.array
edges
weights: np.array
edge weights
thresh: float
a: float
apt_lvl: int
is_directed: bool
eps: float
Returns:
(np.array, np.array)
- indices of edges to be erased
- probability for each edge
"""
p = scores_generic_graph(
num_vertices,
edges,
weights,
a=a,
apt_lvl=apt_lvl,
is_directed=is_directed,
eps=eps,
optimization=optimization,
)
ids2erase = cond_edges2erase(p, thresh=thresh)
return ids2erase, p
def filter_nx_graph(
g: Union[nx.Graph, nx.DiGraph],
thresh: float = 0.5,
field: Optional[str] = None,
a: float = 2,
apt_lvl: int = 10,
remap_labels: bool = False,
save_scores: bool = False,
optimization: OptionsForCache = "lru-nb-szuszik",
) -> Tuple[NpArrayEdgesIds, NpArrayEdgesFloat]:
"""Filter edges from a networkx graph using the Pólya-Urn filter.
Parameters:
g: networkx.Graph
graph to be filtered
thresh: float
field: str
a: float
0 is the Binomial distribution,
1 the filter will behave like the Disparity filter.
apt_lvl: int
remap_labels: bool
If True, the labels of the nodes are remapped to consecutive integers.
save_scores: bool (default: False)
If True, the scores of the edges are saved in the graph.
Returns:
(np.array, np.array)
- indices of edges erased
- probability for each edge
"""
edges, weights, num_vertices, opts = nx_extract(g, remap_labels, field)
is_directed: bool = opts["is_directed"]
ids2erase, probs = filter_generic_graph(
num_vertices,
edges,
weights,
is_directed=is_directed,
a=a,
apt_lvl=apt_lvl,
thresh=thresh,
optimization=optimization,
)
if save_scores:
nx.set_edge_attributes(
g,
{
(u, v): {"prob": prob}
for u, v, prob in zip(edges[:, 0], edges[:, 1], probs)
},
)
nx_erase(g, edges[ids2erase], opts)
return ids2erase, probs
def filter_ig_graph(
g: ig.Graph,
thresh: float = 0.5,
field: Optional[str] = None,
a: float = 2,
apt_lvl: int = 10,
optimization: OptionsForCache = "lru-nb-szuszik",
) -> Tuple[NpArrayEdgesIds, NpArrayEdgesFloat]:
"""Filter edges from a igraph using the Pólya-Urn filter.
Parameters:
g: ig.Graph
graph to be filtered
thresh: float
field: str
a: float
0 is the Binomial distribution,
1 the filter will behave like the Disparity filter.
apt_lvl: int
Return:
(np.array, np.array)
- indices of edges erased
- probability for each edge
"""
edges, weights, num_vertices, opts = ig_extract(g, field)
is_directed: bool = opts["is_directed"]
ids2erase, probs = filter_generic_graph(
num_vertices,
edges,
weights,
is_directed=is_directed,
a=a,
apt_lvl=apt_lvl,
thresh=thresh,
optimization=optimization,
)
ig_erase(g, ids2erase)
return ids2erase, probs
| [
"numpy.minimum",
"edgeseraser.polya_tools.statistics.polya_cdf",
"edgeseraser.misc.backend.nx_erase",
"edgeseraser.misc.backend.nx_extract",
"edgeseraser.polya_tools.numba_tools.NbComputePolyaCacheDict",
"numpy.argwhere",
"edgeseraser.polya_tools.numba_tools.NbComputePolyaCacheSzuszik",
"edgeseraser.m... | [((727, 773), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'FutureWarning'], {}), "('ignore', FutureWarning)\n", (748, 773), False, 'import warnings\n'), ((1633, 1709), 'edgeseraser.misc.matrix.construct_sp_matrices', 'construct_sp_matrices', (['weights', 'edges', 'num_vertices'], {'is_directed': 'is_directed'}), '(weights, edges, num_vertices, is_directed=is_directed)\n', (1654, 1709), False, 'from edgeseraser.misc.matrix import construct_sp_matrices\n'), ((2459, 2565), 'edgeseraser.polya_tools.statistics.polya_cdf', 'polya_cdf', (['wdegree_in', 'degree_in', 'weights', 'a', 'apt_lvl'], {'optimization': 'optimization', 'cache_obj': 'cache_obj'}), '(wdegree_in, degree_in, weights, a, apt_lvl, optimization=\n optimization, cache_obj=cache_obj)\n', (2468, 2565), False, 'from edgeseraser.polya_tools.statistics import polya_cdf\n'), ((2636, 2744), 'edgeseraser.polya_tools.statistics.polya_cdf', 'polya_cdf', (['wdegree_out', 'degree_out', 'weights', 'a', 'apt_lvl'], {'optimization': 'optimization', 'cache_obj': 'cache_obj'}), '(wdegree_out, degree_out, weights, a, apt_lvl, optimization=\n optimization, cache_obj=cache_obj)\n', (2645, 2744), False, 'from edgeseraser.polya_tools.statistics import polya_cdf\n'), ((2831, 2854), 'numpy.minimum', 'np.minimum', (['p_in', 'p_out'], {}), '(p_in, p_out)\n', (2841, 2854), True, 'import numpy as np\n'), ((5381, 5415), 'edgeseraser.misc.backend.nx_extract', 'nx_extract', (['g', 'remap_labels', 'field'], {}), '(g, remap_labels, field)\n', (5391, 5415), False, 'from edgeseraser.misc.backend import ig_erase, ig_extract, nx_erase, nx_extract\n'), ((5915, 5950), 'edgeseraser.misc.backend.nx_erase', 'nx_erase', (['g', 'edges[ids2erase]', 'opts'], {}), '(g, edges[ids2erase], opts)\n', (5923, 5950), False, 'from edgeseraser.misc.backend import ig_erase, ig_extract, nx_erase, nx_extract\n'), ((6704, 6724), 'edgeseraser.misc.backend.ig_extract', 'ig_extract', (['g', 'field'], {}), '(g, field)\n', (6714, 6724), False, 'from edgeseraser.misc.backend import ig_erase, ig_extract, nx_erase, nx_extract\n'), ((7007, 7029), 'edgeseraser.misc.backend.ig_erase', 'ig_erase', (['g', 'ids2erase'], {}), '(g, ids2erase)\n', (7015, 7029), False, 'from edgeseraser.misc.backend import ig_erase, ig_extract, nx_erase, nx_extract\n'), ((2222, 2247), 'edgeseraser.polya_tools.numba_tools.NbComputePolyaCacheDict', 'NbComputePolyaCacheDict', ([], {}), '()\n', (2245, 2247), False, 'from edgeseraser.polya_tools.numba_tools import NbComputePolyaCacheDict, NbComputePolyaCacheSzuszik\n'), ((2311, 2339), 'edgeseraser.polya_tools.numba_tools.NbComputePolyaCacheSzuszik', 'NbComputePolyaCacheSzuszik', ([], {}), '()\n', (2337, 2339), False, 'from edgeseraser.polya_tools.numba_tools import NbComputePolyaCacheDict, NbComputePolyaCacheSzuszik\n'), ((2087, 2105), 'numpy.mod', 'np.mod', (['weights', '(1)'], {}), '(weights, 1)\n', (2093, 2105), True, 'import numpy as np\n'), ((2396, 2412), 'edgeseraser.misc.fast_math.NbGammaLnCache', 'NbGammaLnCache', ([], {}), '()\n', (2410, 2412), False, 'from edgeseraser.misc.fast_math import NbGammaLnCache\n'), ((3192, 3220), 'numpy.argwhere', 'np.argwhere', (['(alphas > thresh)'], {}), '(alphas > thresh)\n', (3203, 3220), True, 'import numpy as np\n')] |
import os, sys, time, copy, glob
from collections import deque
import gym
from gym import spaces
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from ppo.a2c_ppo_acktr import algo
from ppo.a2c_ppo_acktr.arguments import get_args
from ppo.a2c_ppo_acktr.envs import make_vec_envs
from ppo.a2c_ppo_acktr.model import Policy
from ppo.a2c_ppo_acktr.storage import RolloutStorage
from ppo.a2c_ppo_acktr.utils import get_vec_normalize, update_linear_schedule
from ppo.a2c_ppo_acktr.visualize import visdom_plot
args = get_args()
assert args.algo in ['a2c', 'ppo', 'acktr']
if args.recurrent_policy:
assert args.algo in ['a2c', 'ppo'], \
'Recurrent policy is not implemented for ACKTR'
if args.num_rollouts > 0:
assert args.num_rollouts % args.num_processes == 0, 'num_rollouts must be divisable by num_processes'
num_updates = int(args.num_env_steps) // args.num_steps // args.num_processes
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
try:
os.makedirs(args.log_dir)
except OSError:
files = glob.glob(os.path.join(args.log_dir, '*.monitor.csv'))
try:
for f in files:
os.remove(f)
except PermissionError as e:
pass
eval_log_dir = args.log_dir + "_eval"
try:
os.makedirs(eval_log_dir)
except OSError:
files = glob.glob(os.path.join(eval_log_dir, '*.monitor.csv'))
try:
for f in files:
os.remove(f)
except PermissionError as e:
pass
def main():
torch.set_num_threads(1)
device = torch.device("cuda:0" if args.cuda else "cpu")
if args.vis:
from visdom import Visdom
viz = Visdom(port=args.port)
win = None
envs = make_vec_envs(args.env_name, args.seed, 1,
args.gamma, args.log_dir, args.add_timestep, device, False)
# Determine if this is a dual robot (multi agent) environment.
obs = envs.reset()
action = torch.tensor([envs.action_space.sample()])
_, _, _, info = envs.step(action)
dual_robots = 'dual_robots' in info[0]
if dual_robots:
obs_robot_len = info[0]['obs_robot_len'] // 2
action_robot_len = info[0]['action_robot_len'] // 2
obs_robot1 = obs[:, :obs_robot_len]
obs_robot2 = obs[:, obs_robot_len:]
if len(obs_robot1[0]) != obs_robot_len or len(obs_robot2[0]) != obs_robot_len:
print('robot 1 obs shape:', len(obs_robot1[0]), 'obs space robot shape:', (obs_robot_len,))
print('robot 2 obs shape:', len(obs_robot2[0]), 'obs space robot shape:', (obs_robot_len,))
exit()
envs = make_vec_envs(args.env_name, args.seed, args.num_processes,
args.gamma, args.log_dir, args.add_timestep, device, False)
if dual_robots:
# Reset environment
obs = envs.reset()
obs_robot1 = obs[:, :obs_robot_len]
obs_robot2 = obs[:, obs_robot_len:]
action_space_robot1 = spaces.Box(low=np.array([-1.0]*action_robot_len), high=np.array([1.0]*action_robot_len), dtype=np.float32)
action_space_robot2 = spaces.Box(low=np.array([-1.0]*action_robot_len), high=np.array([1.0]*action_robot_len), dtype=np.float32)
if args.load_policy is not None:
if dual_robots:
actor_critic_robot1, actor_critic_robot2, ob_rms = torch.load(args.load_policy)
else:
actor_critic, ob_rms = torch.load(args.load_policy)
vec_norm = get_vec_normalize(envs)
if vec_norm is not None:
vec_norm.ob_rms = ob_rms
else:
if dual_robots:
actor_critic_robot1 = Policy([obs_robot_len], action_space_robot1,
base_kwargs={'recurrent': args.recurrent_policy})
actor_critic_robot2 = Policy([obs_robot_len], action_space_robot2,
base_kwargs={'recurrent': args.recurrent_policy})
else:
actor_critic = Policy(envs.observation_space.shape, envs.action_space,
base_kwargs={'recurrent': args.recurrent_policy, 'hidden_size': args.hidden_size})
if dual_robots:
actor_critic_robot1.to(device)
actor_critic_robot2.to(device)
else:
actor_critic.to(device)
if args.algo == 'a2c':
agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef,
args.entropy_coef, lr=args.lr,
eps=args.eps, alpha=args.alpha,
max_grad_norm=args.max_grad_norm)
elif args.algo == 'ppo':
if dual_robots:
agent_robot1 = algo.PPO(actor_critic_robot1, args.clip_param, args.ppo_epoch, args.num_mini_batch,
args.value_loss_coef, args.entropy_coef, lr=args.lr,
eps=args.eps,
max_grad_norm=args.max_grad_norm)
agent_robot2 = algo.PPO(actor_critic_robot2, args.clip_param, args.ppo_epoch, args.num_mini_batch,
args.value_loss_coef, args.entropy_coef, lr=args.lr,
eps=args.eps,
max_grad_norm=args.max_grad_norm)
else:
agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch,
args.value_loss_coef, args.entropy_coef, lr=args.lr,
eps=args.eps,
max_grad_norm=args.max_grad_norm)
elif args.algo == 'acktr':
agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef,
args.entropy_coef, acktr=True)
if dual_robots:
rollouts_robot1 = RolloutStorage(args.num_steps, args.num_rollouts if args.num_rollouts > 0 else args.num_processes,
[obs_robot_len], action_space_robot1,
actor_critic_robot1.recurrent_hidden_state_size)
rollouts_robot2 = RolloutStorage(args.num_steps, args.num_rollouts if args.num_rollouts > 0 else args.num_processes,
[obs_robot_len], action_space_robot2,
actor_critic_robot2.recurrent_hidden_state_size)
if args.num_rollouts > 0:
rollouts_robot1.obs[0].copy_(torch.cat([obs_robot1 for _ in range(args.num_rollouts // args.num_processes)] + [obs_robot1[:(args.num_rollouts % args.num_processes)]], dim=0))
rollouts_robot2.obs[0].copy_(torch.cat([obs_robot2 for _ in range(args.num_rollouts // args.num_processes)] + [obs_robot2[:(args.num_rollouts % args.num_processes)]], dim=0))
else:
rollouts_robot1.obs[0].copy_(obs_robot1)
rollouts_robot2.obs[0].copy_(obs_robot2)
rollouts_robot1.to(device)
rollouts_robot2.to(device)
else:
rollouts = RolloutStorage(args.num_steps, args.num_rollouts if args.num_rollouts > 0 else args.num_processes,
envs.observation_space.shape, envs.action_space,
actor_critic.recurrent_hidden_state_size)
obs = envs.reset()
if args.num_rollouts > 0:
rollouts.obs[0].copy_(torch.cat([obs for _ in range(args.num_rollouts // args.num_processes)] + [obs[:(args.num_rollouts % args.num_processes)]], dim=0))
else:
rollouts.obs[0].copy_(obs)
rollouts.to(device)
deque_len = args.num_rollouts if args.num_rollouts > 0 else (args.num_processes if args.num_processes > 10 else 10)
if dual_robots:
episode_rewards_robot1 = deque(maxlen=deque_len)
episode_rewards_robot2 = deque(maxlen=deque_len)
else:
episode_rewards = deque(maxlen=deque_len)
start = time.time()
for j in range(num_updates):
if args.use_linear_lr_decay:
# decrease learning rate linearly
if args.algo == "acktr":
# use optimizer's learning rate since it's hard-coded in kfac.py
update_linear_schedule(agent.optimizer, j, num_updates, agent.optimizer.lr)
else:
if dual_robots:
update_linear_schedule(agent_robot1.optimizer, j, num_updates, args.lr)
update_linear_schedule(agent_robot2.optimizer, j, num_updates, args.lr)
else:
update_linear_schedule(agent.optimizer, j, num_updates, args.lr)
if args.algo == 'ppo' and args.use_linear_clip_decay:
if dual_robots:
agent_robot1.clip_param = args.clip_param * (1 - j / float(num_updates))
agent_robot2.clip_param = args.clip_param * (1 - j / float(num_updates))
else:
agent.clip_param = args.clip_param * (1 - j / float(num_updates))
reward_list_robot1 = [[] for _ in range(args.num_processes)]
reward_list_robot2 = [[] for _ in range(args.num_processes)]
for step in range(args.num_steps):
# Sample actions
# obs = self.apply_attack(obs, args.phi, args.epsilon)
with torch.no_grad():
if dual_robots:
value_robot1, action_robot1, action_log_prob_robot1, recurrent_hidden_states_robot1 = actor_critic_robot1.act(
rollouts_robot1.obs[step, :args.num_processes],
rollouts_robot1.recurrent_hidden_states[step, :args.num_processes],
rollouts_robot1.masks[step, :args.num_processes])
value_robot2, action_robot2, action_log_prob_robot2, recurrent_hidden_states_robot2 = actor_critic_robot2.act(
rollouts_robot2.obs[step, :args.num_processes],
rollouts_robot2.recurrent_hidden_states[step, :args.num_processes],
rollouts_robot2.masks[step, :args.num_processes])
else:
value, action, action_log_prob, recurrent_hidden_states = actor_critic.act(
rollouts.obs[step, :args.num_processes],
rollouts.recurrent_hidden_states[step, :args.num_processes],
rollouts.masks[step, :args.num_processes])
# Obser reward and next obs
if dual_robots:
action = torch.cat((action_robot1, action_robot2), dim=-1)
obs, reward, done, infos = envs.step(action)
obs_robot1 = obs[:, :obs_robot_len]
obs_robot2 = obs[:, obs_robot_len:]
for i, info in enumerate(infos):
reward_list_robot1[i].append(info['reward_robot1'])
reward_list_robot2[i].append(info['reward_robot2'])
reward_robot1 = torch.tensor([[info['reward_robot1']] for info in infos])
reward_robot2 = torch.tensor([[info['reward_robot2']] for info in infos])
else:
obs, reward, done, infos = envs.step(action)
for i, info in enumerate(infos):
if 'episode' in info.keys():
if dual_robots:
episode_rewards_robot1.append(np.sum(reward_list_robot1[i]))
episode_rewards_robot2.append(np.sum(reward_list_robot2[i]))
else:
episode_rewards.append(info['episode']['r'])
# If done then clean the history of observations.
masks = torch.FloatTensor([[0.0] if done_ else [1.0]
for done_ in done])
if dual_robots:
rollouts_robot1.insert(obs_robot1, recurrent_hidden_states_robot1, action_robot1, action_log_prob_robot1, value_robot1, reward_robot1, masks)
rollouts_robot2.insert(obs_robot2, recurrent_hidden_states_robot2, action_robot2, action_log_prob_robot2, value_robot2, reward_robot2, masks)
else:
rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks)
if args.num_rollouts > 0 and (j % (args.num_rollouts // args.num_processes) != 0):
# Only update the policies when we have performed num_rollouts simulations
continue
with torch.no_grad():
if dual_robots:
next_value_robot1 = actor_critic_robot1.get_value(rollouts_robot1.obs[-1],
rollouts_robot1.recurrent_hidden_states[-1],
rollouts_robot1.masks[-1]).detach()
next_value_robot2 = actor_critic_robot2.get_value(rollouts_robot2.obs[-1],
rollouts_robot2.recurrent_hidden_states[-1],
rollouts_robot2.masks[-1]).detach()
else:
next_value = actor_critic.get_value(rollouts.obs[-1],
rollouts.recurrent_hidden_states[-1],
rollouts.masks[-1]).detach()
if dual_robots:
rollouts_robot1.compute_returns(next_value_robot1, args.use_gae, args.gamma, args.tau)
rollouts_robot2.compute_returns(next_value_robot2, args.use_gae, args.gamma, args.tau)
value_loss_robot1, action_loss_robot1, dist_entropy_robot1 = agent_robot1.update(rollouts_robot1)
value_loss_robot2, action_loss_robot2, dist_entropy_robot2 = agent_robot2.update(rollouts_robot2)
rollouts_robot1.after_update()
rollouts_robot2.after_update()
else:
rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.tau)
value_loss, action_loss, dist_entropy = agent.update(rollouts)
rollouts.after_update()
# save for every interval-th episode or for the last epoch
if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir != "":
save_path = os.path.join(args.save_dir, args.algo)
try:
os.makedirs(save_path)
except OSError:
pass
# A really ugly way to save a model to CPU
if dual_robots:
save_model_robot1 = actor_critic_robot1
save_model_robot2 = actor_critic_robot2
if args.cuda:
save_model_robot1 = copy.deepcopy(actor_critic_robot1).cpu()
save_model_robot2 = copy.deepcopy(actor_critic_robot2).cpu()
save_model = [save_model_robot1, save_model_robot2,
getattr(get_vec_normalize(envs), 'ob_rms', None)]
else:
save_model = actor_critic
if args.cuda:
save_model = copy.deepcopy(actor_critic).cpu()
save_model = [save_model,
getattr(get_vec_normalize(envs), 'ob_rms', None)]
torch.save(save_model, os.path.join(save_path, args.env_name + ".pt"))
total_num_steps = (j + 1) * args.num_processes * args.num_steps
if j % args.log_interval == 0 and (len(episode_rewards_robot1) > 1 if dual_robots else len(episode_rewards) > 1):
end = time.time()
if dual_robots:
print("Robot1 updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}".
format(j, total_num_steps,
int(total_num_steps / (end - start)),
len(episode_rewards_robot1),
np.mean(episode_rewards_robot1),
np.median(episode_rewards_robot1),
np.min(episode_rewards_robot1),
np.max(episode_rewards_robot1), dist_entropy_robot1,
value_loss_robot1, action_loss_robot1))
print("Robot2 updates {}, Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n".
format(j, len(episode_rewards_robot2),
np.mean(episode_rewards_robot2),
np.median(episode_rewards_robot2),
np.min(episode_rewards_robot2),
np.max(episode_rewards_robot2), dist_entropy_robot2,
value_loss_robot2, action_loss_robot2))
else:
print("Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n".
format(j, total_num_steps,
int(total_num_steps / (end - start)),
len(episode_rewards),
np.mean(episode_rewards),
np.median(episode_rewards),
np.min(episode_rewards),
np.max(episode_rewards), dist_entropy,
value_loss, action_loss))
sys.stdout.flush()
if (args.eval_interval is not None
and len(episode_rewards) > 1
and j % args.eval_interval == 0):
eval_envs = make_vec_envs(
args.env_name, args.seed + args.num_processes, args.num_processes,
args.gamma, eval_log_dir, args.add_timestep, device, True)
vec_norm = get_vec_normalize(eval_envs)
if vec_norm is not None:
vec_norm.eval()
vec_norm.ob_rms = get_vec_normalize(envs).ob_rms
if dual_robots:
eval_episode_rewards_robot1 = []
eval_episode_rewards_robot2 = []
else:
eval_episode_rewards = []
obs = eval_envs.reset()
if dual_robots:
obs_robot1 = obs[:, :obs_robot_len]
obs_robot2 = obs[:, obs_robot_len:]
eval_recurrent_hidden_states_robot1 = torch.zeros(args.num_processes,
actor_critic_robot1.recurrent_hidden_state_size, device=device)
eval_recurrent_hidden_states_robot2 = torch.zeros(args.num_processes,
actor_critic_robot2.recurrent_hidden_state_size, device=device)
else:
eval_recurrent_hidden_states = torch.zeros(args.num_processes,
actor_critic.recurrent_hidden_state_size, device=device)
eval_masks = torch.zeros(args.num_processes, 1, device=device)
eval_reward_list_robot1 = [[] for _ in range(args.num_processes)]
eval_reward_list_robot2 = [[] for _ in range(args.num_processes)]
while (len(eval_episode_rewards_robot1) < 10 if dual_robots else len(eval_episode_rewards) < 10):
with torch.no_grad():
if dual_robots:
_, action_robot1, _, eval_recurrent_hidden_states_robot1 = actor_critic_robot1.act(
obs_robot1, eval_recurrent_hidden_states_robot1, eval_masks, deterministic=True)
_, action_robot2, _, eval_recurrent_hidden_states_robot2 = actor_critic_robot2.act(
obs_robot2, eval_recurrent_hidden_states_robot2, eval_masks, deterministic=True)
else:
_, action, _, eval_recurrent_hidden_states = actor_critic.act(
obs, eval_recurrent_hidden_states, eval_masks, deterministic=True)
# Obser reward and next obs
if dual_robots:
action = torch.cat((action_robot1, action_robot2), dim=-1)
obs, reward, done, infos = eval_envs.step(action)
obs_robot1 = obs[:, :obs_robot_len]
obs_robot2 = obs[:, obs_robot_len:]
for i, info in enumerate(infos):
eval_reward_list_robot1[i].append(info['reward_robot1'])
eval_reward_list_robot2[i].append(info['reward_robot2'])
else:
obs, reward, done, infos = eval_envs.step(action)
eval_masks = torch.FloatTensor([[0.0] if done_ else [1.0]
for done_ in done])
reset_rewards = False
for info in infos:
if 'episode' in info.keys():
if dual_robots:
reset_rewards = True
eval_episode_rewards_robot1.append(np.sum(eval_reward_list_robot1[i]))
eval_episode_rewards_robot2.append(np.sum(eval_reward_list_robot2[i]))
else:
eval_episode_rewards.append(info['episode']['r'])
if reset_rewards:
eval_reward_list_robot1 = [[] for _ in range(args.num_processes)]
eval_reward_list_robot2 = [[] for _ in range(args.num_processes)]
eval_envs.close()
if dual_robots:
print(" Evaluation using {} episodes: robot1 mean reward {:.5f}, robot2 mean reward {:.5f}\n".
format(len(eval_episode_rewards_robot1),
np.mean(eval_episode_rewards_robot1), np.mean(eval_episode_rewards_robot2)))
else:
print(" Evaluation using {} episodes: mean reward {:.5f}\n".
format(len(eval_episode_rewards),
np.mean(eval_episode_rewards)))
sys.stdout.flush()
if args.vis and j % args.vis_interval == 0:
try:
# Sometimes monitor doesn't properly flush the outputs
win = visdom_plot(viz, win, args.log_dir, args.env_name,
args.algo, args.num_env_steps)
except IOError:
pass
if __name__ == "__main__":
main()
| [
"numpy.array",
"torch.cuda.is_available",
"ppo.a2c_ppo_acktr.algo.A2C_ACKTR",
"copy.deepcopy",
"ppo.a2c_ppo_acktr.utils.get_vec_normalize",
"visdom.Visdom",
"os.remove",
"numpy.mean",
"collections.deque",
"ppo.a2c_ppo_acktr.model.Policy",
"torch.set_num_threads",
"ppo.a2c_ppo_acktr.algo.PPO",
... | [((581, 591), 'ppo.a2c_ppo_acktr.arguments.get_args', 'get_args', ([], {}), '()\n', (589, 591), False, 'from ppo.a2c_ppo_acktr.arguments import get_args\n'), ((974, 1002), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (991, 1002), False, 'import torch\n'), ((1003, 1040), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (1029, 1040), False, 'import torch\n'), ((1059, 1084), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1082, 1084), False, 'import torch\n'), ((1213, 1238), 'os.makedirs', 'os.makedirs', (['args.log_dir'], {}), '(args.log_dir)\n', (1224, 1238), False, 'import os, sys, time, copy, glob\n'), ((1475, 1500), 'os.makedirs', 'os.makedirs', (['eval_log_dir'], {}), '(eval_log_dir)\n', (1486, 1500), False, 'import os, sys, time, copy, glob\n'), ((1706, 1730), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (1727, 1730), False, 'import torch\n'), ((1744, 1790), 'torch.device', 'torch.device', (["('cuda:0' if args.cuda else 'cpu')"], {}), "('cuda:0' if args.cuda else 'cpu')\n", (1756, 1790), False, 'import torch\n'), ((1911, 2018), 'ppo.a2c_ppo_acktr.envs.make_vec_envs', 'make_vec_envs', (['args.env_name', 'args.seed', '(1)', 'args.gamma', 'args.log_dir', 'args.add_timestep', 'device', '(False)'], {}), '(args.env_name, args.seed, 1, args.gamma, args.log_dir, args.\n add_timestep, device, False)\n', (1924, 2018), False, 'from ppo.a2c_ppo_acktr.envs import make_vec_envs\n'), ((2814, 2937), 'ppo.a2c_ppo_acktr.envs.make_vec_envs', 'make_vec_envs', (['args.env_name', 'args.seed', 'args.num_processes', 'args.gamma', 'args.log_dir', 'args.add_timestep', 'device', '(False)'], {}), '(args.env_name, args.seed, args.num_processes, args.gamma,\n args.log_dir, args.add_timestep, device, False)\n', (2827, 2937), False, 'from ppo.a2c_ppo_acktr.envs import make_vec_envs\n'), ((7898, 7909), 'time.time', 'time.time', ([], {}), '()\n', (7907, 7909), False, 'import os, sys, time, copy, glob\n'), ((1857, 1879), 'visdom.Visdom', 'Visdom', ([], {'port': 'args.port'}), '(port=args.port)\n', (1863, 1879), False, 'from visdom import Visdom\n'), ((3647, 3670), 'ppo.a2c_ppo_acktr.utils.get_vec_normalize', 'get_vec_normalize', (['envs'], {}), '(envs)\n', (3664, 3670), False, 'from ppo.a2c_ppo_acktr.utils import get_vec_normalize, update_linear_schedule\n'), ((4445, 4597), 'ppo.a2c_ppo_acktr.algo.A2C_ACKTR', 'algo.A2C_ACKTR', (['actor_critic', 'args.value_loss_coef', 'args.entropy_coef'], {'lr': 'args.lr', 'eps': 'args.eps', 'alpha': 'args.alpha', 'max_grad_norm': 'args.max_grad_norm'}), '(actor_critic, args.value_loss_coef, args.entropy_coef, lr=\n args.lr, eps=args.eps, alpha=args.alpha, max_grad_norm=args.max_grad_norm)\n', (4459, 4597), False, 'from ppo.a2c_ppo_acktr import algo\n'), ((5879, 6072), 'ppo.a2c_ppo_acktr.storage.RolloutStorage', 'RolloutStorage', (['args.num_steps', '(args.num_rollouts if args.num_rollouts > 0 else args.num_processes)', '[obs_robot_len]', 'action_space_robot1', 'actor_critic_robot1.recurrent_hidden_state_size'], {}), '(args.num_steps, args.num_rollouts if args.num_rollouts > 0 else\n args.num_processes, [obs_robot_len], action_space_robot1,\n actor_critic_robot1.recurrent_hidden_state_size)\n', (5893, 6072), False, 'from ppo.a2c_ppo_acktr.storage import RolloutStorage\n'), ((6147, 6340), 'ppo.a2c_ppo_acktr.storage.RolloutStorage', 'RolloutStorage', (['args.num_steps', '(args.num_rollouts if args.num_rollouts > 0 else args.num_processes)', '[obs_robot_len]', 'action_space_robot2', 'actor_critic_robot2.recurrent_hidden_state_size'], {}), '(args.num_steps, args.num_rollouts if args.num_rollouts > 0 else\n args.num_processes, [obs_robot_len], action_space_robot2,\n actor_critic_robot2.recurrent_hidden_state_size)\n', (6161, 6340), False, 'from ppo.a2c_ppo_acktr.storage import RolloutStorage\n'), ((7016, 7213), 'ppo.a2c_ppo_acktr.storage.RolloutStorage', 'RolloutStorage', (['args.num_steps', '(args.num_rollouts if args.num_rollouts > 0 else args.num_processes)', 'envs.observation_space.shape', 'envs.action_space', 'actor_critic.recurrent_hidden_state_size'], {}), '(args.num_steps, args.num_rollouts if args.num_rollouts > 0 else\n args.num_processes, envs.observation_space.shape, envs.action_space,\n actor_critic.recurrent_hidden_state_size)\n', (7030, 7213), False, 'from ppo.a2c_ppo_acktr.storage import RolloutStorage\n'), ((7744, 7767), 'collections.deque', 'deque', ([], {'maxlen': 'deque_len'}), '(maxlen=deque_len)\n', (7749, 7767), False, 'from collections import deque\n'), ((7801, 7824), 'collections.deque', 'deque', ([], {'maxlen': 'deque_len'}), '(maxlen=deque_len)\n', (7806, 7824), False, 'from collections import deque\n'), ((7861, 7884), 'collections.deque', 'deque', ([], {'maxlen': 'deque_len'}), '(maxlen=deque_len)\n', (7866, 7884), False, 'from collections import deque\n'), ((1277, 1320), 'os.path.join', 'os.path.join', (['args.log_dir', '"""*.monitor.csv"""'], {}), "(args.log_dir, '*.monitor.csv')\n", (1289, 1320), False, 'import os, sys, time, copy, glob\n'), ((1539, 1582), 'os.path.join', 'os.path.join', (['eval_log_dir', '"""*.monitor.csv"""'], {}), "(eval_log_dir, '*.monitor.csv')\n", (1551, 1582), False, 'import os, sys, time, copy, glob\n'), ((3521, 3549), 'torch.load', 'torch.load', (['args.load_policy'], {}), '(args.load_policy)\n', (3531, 3549), False, 'import torch\n'), ((3599, 3627), 'torch.load', 'torch.load', (['args.load_policy'], {}), '(args.load_policy)\n', (3609, 3627), False, 'import torch\n'), ((3809, 3908), 'ppo.a2c_ppo_acktr.model.Policy', 'Policy', (['[obs_robot_len]', 'action_space_robot1'], {'base_kwargs': "{'recurrent': args.recurrent_policy}"}), "([obs_robot_len], action_space_robot1, base_kwargs={'recurrent': args\n .recurrent_policy})\n", (3815, 3908), False, 'from ppo.a2c_ppo_acktr.model import Policy\n'), ((3954, 4053), 'ppo.a2c_ppo_acktr.model.Policy', 'Policy', (['[obs_robot_len]', 'action_space_robot2'], {'base_kwargs': "{'recurrent': args.recurrent_policy}"}), "([obs_robot_len], action_space_robot2, base_kwargs={'recurrent': args\n .recurrent_policy})\n", (3960, 4053), False, 'from ppo.a2c_ppo_acktr.model import Policy\n'), ((4106, 4249), 'ppo.a2c_ppo_acktr.model.Policy', 'Policy', (['envs.observation_space.shape', 'envs.action_space'], {'base_kwargs': "{'recurrent': args.recurrent_policy, 'hidden_size': args.hidden_size}"}), "(envs.observation_space.shape, envs.action_space, base_kwargs={\n 'recurrent': args.recurrent_policy, 'hidden_size': args.hidden_size})\n", (4112, 4249), False, 'from ppo.a2c_ppo_acktr.model import Policy\n'), ((11640, 11706), 'torch.FloatTensor', 'torch.FloatTensor', (['[([0.0] if done_ else [1.0]) for done_ in done]'], {}), '([([0.0] if done_ else [1.0]) for done_ in done])\n', (11657, 11706), False, 'import torch\n'), ((12431, 12446), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12444, 12446), False, 'import torch\n'), ((14208, 14246), 'os.path.join', 'os.path.join', (['args.save_dir', 'args.algo'], {}), '(args.save_dir, args.algo)\n', (14220, 14246), False, 'import os, sys, time, copy, glob\n'), ((15464, 15475), 'time.time', 'time.time', ([], {}), '()\n', (15473, 15475), False, 'import os, sys, time, copy, glob\n'), ((17312, 17330), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (17328, 17330), False, 'import os, sys, time, copy, glob\n'), ((17494, 17638), 'ppo.a2c_ppo_acktr.envs.make_vec_envs', 'make_vec_envs', (['args.env_name', '(args.seed + args.num_processes)', 'args.num_processes', 'args.gamma', 'eval_log_dir', 'args.add_timestep', 'device', '(True)'], {}), '(args.env_name, args.seed + args.num_processes, args.\n num_processes, args.gamma, eval_log_dir, args.add_timestep, device, True)\n', (17507, 17638), False, 'from ppo.a2c_ppo_acktr.envs import make_vec_envs\n'), ((17691, 17719), 'ppo.a2c_ppo_acktr.utils.get_vec_normalize', 'get_vec_normalize', (['eval_envs'], {}), '(eval_envs)\n', (17708, 17719), False, 'from ppo.a2c_ppo_acktr.utils import get_vec_normalize, update_linear_schedule\n'), ((18785, 18834), 'torch.zeros', 'torch.zeros', (['args.num_processes', '(1)'], {'device': 'device'}), '(args.num_processes, 1, device=device)\n', (18796, 18834), False, 'import torch\n'), ((21889, 21907), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (21905, 21907), False, 'import os, sys, time, copy, glob\n'), ((1367, 1379), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (1376, 1379), False, 'import os, sys, time, copy, glob\n'), ((1629, 1641), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (1638, 1641), False, 'import os, sys, time, copy, glob\n'), ((3167, 3202), 'numpy.array', 'np.array', (['([-1.0] * action_robot_len)'], {}), '([-1.0] * action_robot_len)\n', (3175, 3202), True, 'import numpy as np\n'), ((3207, 3241), 'numpy.array', 'np.array', (['([1.0] * action_robot_len)'], {}), '([1.0] * action_robot_len)\n', (3215, 3241), True, 'import numpy as np\n'), ((3304, 3339), 'numpy.array', 'np.array', (['([-1.0] * action_robot_len)'], {}), '([-1.0] * action_robot_len)\n', (3312, 3339), True, 'import numpy as np\n'), ((3344, 3378), 'numpy.array', 'np.array', (['([1.0] * action_robot_len)'], {}), '([1.0] * action_robot_len)\n', (3352, 3378), True, 'import numpy as np\n'), ((4766, 4959), 'ppo.a2c_ppo_acktr.algo.PPO', 'algo.PPO', (['actor_critic_robot1', 'args.clip_param', 'args.ppo_epoch', 'args.num_mini_batch', 'args.value_loss_coef', 'args.entropy_coef'], {'lr': 'args.lr', 'eps': 'args.eps', 'max_grad_norm': 'args.max_grad_norm'}), '(actor_critic_robot1, args.clip_param, args.ppo_epoch, args.\n num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr,\n eps=args.eps, max_grad_norm=args.max_grad_norm)\n', (4774, 4959), False, 'from ppo.a2c_ppo_acktr import algo\n'), ((5077, 5270), 'ppo.a2c_ppo_acktr.algo.PPO', 'algo.PPO', (['actor_critic_robot2', 'args.clip_param', 'args.ppo_epoch', 'args.num_mini_batch', 'args.value_loss_coef', 'args.entropy_coef'], {'lr': 'args.lr', 'eps': 'args.eps', 'max_grad_norm': 'args.max_grad_norm'}), '(actor_critic_robot2, args.clip_param, args.ppo_epoch, args.\n num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr,\n eps=args.eps, max_grad_norm=args.max_grad_norm)\n', (5085, 5270), False, 'from ppo.a2c_ppo_acktr import algo\n'), ((5395, 5580), 'ppo.a2c_ppo_acktr.algo.PPO', 'algo.PPO', (['actor_critic', 'args.clip_param', 'args.ppo_epoch', 'args.num_mini_batch', 'args.value_loss_coef', 'args.entropy_coef'], {'lr': 'args.lr', 'eps': 'args.eps', 'max_grad_norm': 'args.max_grad_norm'}), '(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch,\n args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps,\n max_grad_norm=args.max_grad_norm)\n', (5403, 5580), False, 'from ppo.a2c_ppo_acktr import algo\n'), ((5719, 5805), 'ppo.a2c_ppo_acktr.algo.A2C_ACKTR', 'algo.A2C_ACKTR', (['actor_critic', 'args.value_loss_coef', 'args.entropy_coef'], {'acktr': '(True)'}), '(actor_critic, args.value_loss_coef, args.entropy_coef, acktr\n =True)\n', (5733, 5805), False, 'from ppo.a2c_ppo_acktr import algo\n'), ((8161, 8236), 'ppo.a2c_ppo_acktr.utils.update_linear_schedule', 'update_linear_schedule', (['agent.optimizer', 'j', 'num_updates', 'agent.optimizer.lr'], {}), '(agent.optimizer, j, num_updates, agent.optimizer.lr)\n', (8183, 8236), False, 'from ppo.a2c_ppo_acktr.utils import get_vec_normalize, update_linear_schedule\n'), ((9246, 9261), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9259, 9261), False, 'import torch\n'), ((10498, 10547), 'torch.cat', 'torch.cat', (['(action_robot1, action_robot2)'], {'dim': '(-1)'}), '((action_robot1, action_robot2), dim=-1)\n', (10507, 10547), False, 'import torch\n'), ((10938, 10995), 'torch.tensor', 'torch.tensor', (["[[info['reward_robot1']] for info in infos]"], {}), "([[info['reward_robot1']] for info in infos])\n", (10950, 10995), False, 'import torch\n'), ((11028, 11085), 'torch.tensor', 'torch.tensor', (["[[info['reward_robot2']] for info in infos]"], {}), "([[info['reward_robot2']] for info in infos])\n", (11040, 11085), False, 'import torch\n'), ((14280, 14302), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (14291, 14302), False, 'import os, sys, time, copy, glob\n'), ((15202, 15248), 'os.path.join', 'os.path.join', (['save_path', "(args.env_name + '.pt')"], {}), "(save_path, args.env_name + '.pt')\n", (15214, 15248), False, 'import os, sys, time, copy, glob\n'), ((18264, 18364), 'torch.zeros', 'torch.zeros', (['args.num_processes', 'actor_critic_robot1.recurrent_hidden_state_size'], {'device': 'device'}), '(args.num_processes, actor_critic_robot1.\n recurrent_hidden_state_size, device=device)\n', (18275, 18364), False, 'import torch\n'), ((18446, 18546), 'torch.zeros', 'torch.zeros', (['args.num_processes', 'actor_critic_robot2.recurrent_hidden_state_size'], {'device': 'device'}), '(args.num_processes, actor_critic_robot2.\n recurrent_hidden_state_size, device=device)\n', (18457, 18546), False, 'import torch\n'), ((18639, 18731), 'torch.zeros', 'torch.zeros', (['args.num_processes', 'actor_critic.recurrent_hidden_state_size'], {'device': 'device'}), '(args.num_processes, actor_critic.recurrent_hidden_state_size,\n device=device)\n', (18650, 18731), False, 'import torch\n'), ((20494, 20560), 'torch.FloatTensor', 'torch.FloatTensor', (['[([0.0] if done_ else [1.0]) for done_ in done]'], {}), '([([0.0] if done_ else [1.0]) for done_ in done])\n', (20511, 20560), False, 'import torch\n'), ((22071, 22157), 'ppo.a2c_ppo_acktr.visualize.visdom_plot', 'visdom_plot', (['viz', 'win', 'args.log_dir', 'args.env_name', 'args.algo', 'args.num_env_steps'], {}), '(viz, win, args.log_dir, args.env_name, args.algo, args.\n num_env_steps)\n', (22082, 22157), False, 'from ppo.a2c_ppo_acktr.visualize import visdom_plot\n'), ((8307, 8378), 'ppo.a2c_ppo_acktr.utils.update_linear_schedule', 'update_linear_schedule', (['agent_robot1.optimizer', 'j', 'num_updates', 'args.lr'], {}), '(agent_robot1.optimizer, j, num_updates, args.lr)\n', (8329, 8378), False, 'from ppo.a2c_ppo_acktr.utils import get_vec_normalize, update_linear_schedule\n'), ((8399, 8470), 'ppo.a2c_ppo_acktr.utils.update_linear_schedule', 'update_linear_schedule', (['agent_robot2.optimizer', 'j', 'num_updates', 'args.lr'], {}), '(agent_robot2.optimizer, j, num_updates, args.lr)\n', (8421, 8470), False, 'from ppo.a2c_ppo_acktr.utils import get_vec_normalize, update_linear_schedule\n'), ((8513, 8577), 'ppo.a2c_ppo_acktr.utils.update_linear_schedule', 'update_linear_schedule', (['agent.optimizer', 'j', 'num_updates', 'args.lr'], {}), '(agent.optimizer, j, num_updates, args.lr)\n', (8535, 8577), False, 'from ppo.a2c_ppo_acktr.utils import get_vec_normalize, update_linear_schedule\n'), ((17823, 17846), 'ppo.a2c_ppo_acktr.utils.get_vec_normalize', 'get_vec_normalize', (['envs'], {}), '(envs)\n', (17840, 17846), False, 'from ppo.a2c_ppo_acktr.utils import get_vec_normalize, update_linear_schedule\n'), ((19123, 19138), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19136, 19138), False, 'import torch\n'), ((19924, 19973), 'torch.cat', 'torch.cat', (['(action_robot1, action_robot2)'], {'dim': '(-1)'}), '((action_robot1, action_robot2), dim=-1)\n', (19933, 19973), False, 'import torch\n'), ((14846, 14869), 'ppo.a2c_ppo_acktr.utils.get_vec_normalize', 'get_vec_normalize', (['envs'], {}), '(envs)\n', (14863, 14869), False, 'from ppo.a2c_ppo_acktr.utils import get_vec_normalize, update_linear_schedule\n'), ((15125, 15148), 'ppo.a2c_ppo_acktr.utils.get_vec_normalize', 'get_vec_normalize', (['envs'], {}), '(envs)\n', (15142, 15148), False, 'from ppo.a2c_ppo_acktr.utils import get_vec_normalize, update_linear_schedule\n'), ((15861, 15892), 'numpy.mean', 'np.mean', (['episode_rewards_robot1'], {}), '(episode_rewards_robot1)\n', (15868, 15892), True, 'import numpy as np\n'), ((15921, 15954), 'numpy.median', 'np.median', (['episode_rewards_robot1'], {}), '(episode_rewards_robot1)\n', (15930, 15954), True, 'import numpy as np\n'), ((15983, 16013), 'numpy.min', 'np.min', (['episode_rewards_robot1'], {}), '(episode_rewards_robot1)\n', (15989, 16013), True, 'import numpy as np\n'), ((16042, 16072), 'numpy.max', 'np.max', (['episode_rewards_robot1'], {}), '(episode_rewards_robot1)\n', (16048, 16072), True, 'import numpy as np\n'), ((16384, 16415), 'numpy.mean', 'np.mean', (['episode_rewards_robot2'], {}), '(episode_rewards_robot2)\n', (16391, 16415), True, 'import numpy as np\n'), ((16444, 16477), 'numpy.median', 'np.median', (['episode_rewards_robot2'], {}), '(episode_rewards_robot2)\n', (16453, 16477), True, 'import numpy as np\n'), ((16506, 16536), 'numpy.min', 'np.min', (['episode_rewards_robot2'], {}), '(episode_rewards_robot2)\n', (16512, 16536), True, 'import numpy as np\n'), ((16565, 16595), 'numpy.max', 'np.max', (['episode_rewards_robot2'], {}), '(episode_rewards_robot2)\n', (16571, 16595), True, 'import numpy as np\n'), ((17048, 17072), 'numpy.mean', 'np.mean', (['episode_rewards'], {}), '(episode_rewards)\n', (17055, 17072), True, 'import numpy as np\n'), ((17101, 17127), 'numpy.median', 'np.median', (['episode_rewards'], {}), '(episode_rewards)\n', (17110, 17127), True, 'import numpy as np\n'), ((17156, 17179), 'numpy.min', 'np.min', (['episode_rewards'], {}), '(episode_rewards)\n', (17162, 17179), True, 'import numpy as np\n'), ((17208, 17231), 'numpy.max', 'np.max', (['episode_rewards'], {}), '(episode_rewards)\n', (17214, 17231), True, 'import numpy as np\n'), ((21592, 21628), 'numpy.mean', 'np.mean', (['eval_episode_rewards_robot1'], {}), '(eval_episode_rewards_robot1)\n', (21599, 21628), True, 'import numpy as np\n'), ((21630, 21666), 'numpy.mean', 'np.mean', (['eval_episode_rewards_robot2'], {}), '(eval_episode_rewards_robot2)\n', (21637, 21666), True, 'import numpy as np\n'), ((21845, 21874), 'numpy.mean', 'np.mean', (['eval_episode_rewards'], {}), '(eval_episode_rewards)\n', (21852, 21874), True, 'import numpy as np\n'), ((11346, 11375), 'numpy.sum', 'np.sum', (['reward_list_robot1[i]'], {}), '(reward_list_robot1[i])\n', (11352, 11375), True, 'import numpy as np\n'), ((11431, 11460), 'numpy.sum', 'np.sum', (['reward_list_robot2[i]'], {}), '(reward_list_robot2[i])\n', (11437, 11460), True, 'import numpy as np\n'), ((14618, 14652), 'copy.deepcopy', 'copy.deepcopy', (['actor_critic_robot1'], {}), '(actor_critic_robot1)\n', (14631, 14652), False, 'import os, sys, time, copy, glob\n'), ((14699, 14733), 'copy.deepcopy', 'copy.deepcopy', (['actor_critic_robot2'], {}), '(actor_critic_robot2)\n', (14712, 14733), False, 'import os, sys, time, copy, glob\n'), ((15011, 15038), 'copy.deepcopy', 'copy.deepcopy', (['actor_critic'], {}), '(actor_critic)\n', (15024, 15038), False, 'import os, sys, time, copy, glob\n'), ((20882, 20916), 'numpy.sum', 'np.sum', (['eval_reward_list_robot1[i]'], {}), '(eval_reward_list_robot1[i])\n', (20888, 20916), True, 'import numpy as np\n'), ((20981, 21015), 'numpy.sum', 'np.sum', (['eval_reward_list_robot2[i]'], {}), '(eval_reward_list_robot2[i])\n', (20987, 21015), True, 'import numpy as np\n')] |
from __future__ import print_function
import argparse
import torch
import os
import numpy as np
import torch.utils.data
from torch import nn, optim, save
from PIL import Image
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.utils.data import Dataset, DataLoader
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
imsize = 256 if torch.cuda.is_available() else 64
loader = transforms.Compose([
transforms.Resize(imsize),
transforms.ToTensor()])
def image_loader(image_name):
image = Image.open(image_name).convert('L')
image = loader(image).unsqueeze(0)
return image.to(device, torch.float)
parser = argparse.ArgumentParser(description='VAE MNIST Example')
parser.add_argument('--batch-size', type=int, default=50, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=50, metavar='N', help='number of epochs to train (default: 10)')
parser.add_argument('--no-cuda', action='store_true', default=False, help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=20, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.fc1 = nn.Linear(6144, 400)
self.fc21 = nn.Linear(400, 100)
self.fc22 = nn.Linear(400, 100)
self.fc3 = nn.Linear(100, 400)
self.fc4 = nn.Linear(400, 6144)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z):
h3 = F.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3))
def forward(self, x):
mu, logvar = self.encode(x.view(-1, 6144))
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
model = VAE().to(device)
model.load_state_dict(torch.load('./models/last_model' ))
model.eval()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(recon_x, x, mu, logvar):
BCE = F.binary_cross_entropy(recon_x, x.view(-1, 6144), reduction='sum')
# see Appendix B from VAE paper:
# <NAME> Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return BCE + KLD
if __name__ == "__main__":
PATH = 'sequences/1'
global_output_data = np.array([])
for j in range(500):
LPATH = PATH + '/seq_' + str(j)
with open(LPATH + '/actions.txt', 'r') as f:
actions = f.read()
local_data = []
output_data = np.array([])
for i in range(150):
local_data.append(image_loader(LPATH + '/' + str(i) + '.png'))
encoded = model.encode(local_data[i].view(-1,6144))[0][0]
no_grad = model.encode(local_data[i].view(-1,6144))[0][0].detach()
output_data = np.append(output_data, no_grad)
seq = torch.from_numpy(output_data).view(150, 100)
global_output_data = np.append(global_output_data, seq)
torch.save(seq, LPATH + '/encoded.txt')
print("Sequence ", str(j), " finished")
torch.save(global_output_data, PATH + '/encoded.txt')
| [
"torch.manual_seed",
"PIL.Image.open",
"argparse.ArgumentParser",
"torch.load",
"torch.exp",
"torch.from_numpy",
"numpy.append",
"numpy.array",
"torch.randn_like",
"torch.cuda.is_available",
"torch.save",
"torch.nn.Linear",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor"... | [((735, 791), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""VAE MNIST Example"""'}), "(description='VAE MNIST Example')\n", (758, 791), False, 'import argparse\n'), ((1491, 1519), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1508, 1519), False, 'import torch\n'), ((1530, 1574), 'torch.device', 'torch.device', (["('cuda' if args.cuda else 'cpu')"], {}), "('cuda' if args.cuda else 'cpu')\n", (1542, 1574), False, 'import torch\n'), ((436, 461), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (459, 461), False, 'import torch\n'), ((1464, 1489), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1487, 1489), False, 'import torch\n'), ((2572, 2605), 'torch.load', 'torch.load', (['"""./models/last_model"""'], {}), "('./models/last_model')\n", (2582, 2605), False, 'import torch\n'), ((3230, 3242), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3238, 3242), True, 'import numpy as np\n'), ((3985, 4038), 'torch.save', 'torch.save', (['global_output_data', "(PATH + '/encoded.txt')"], {}), "(global_output_data, PATH + '/encoded.txt')\n", (3995, 4038), False, 'import torch\n'), ((381, 406), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (404, 406), False, 'import torch\n'), ((505, 530), 'torchvision.transforms.Resize', 'transforms.Resize', (['imsize'], {}), '(imsize)\n', (522, 530), False, 'from torchvision import datasets, transforms\n'), ((537, 558), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (556, 558), False, 'from torchvision import datasets, transforms\n'), ((1748, 1768), 'torch.nn.Linear', 'nn.Linear', (['(6144)', '(400)'], {}), '(6144, 400)\n', (1757, 1768), False, 'from torch import nn, optim, save\n'), ((1789, 1808), 'torch.nn.Linear', 'nn.Linear', (['(400)', '(100)'], {}), '(400, 100)\n', (1798, 1808), False, 'from torch import nn, optim, save\n'), ((1829, 1848), 'torch.nn.Linear', 'nn.Linear', (['(400)', '(100)'], {}), '(400, 100)\n', (1838, 1848), False, 'from torch import nn, optim, save\n'), ((1868, 1887), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(400)'], {}), '(100, 400)\n', (1877, 1887), False, 'from torch import nn, optim, save\n'), ((1907, 1927), 'torch.nn.Linear', 'nn.Linear', (['(400)', '(6144)'], {}), '(400, 6144)\n', (1916, 1927), False, 'from torch import nn, optim, save\n'), ((3438, 3450), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3446, 3450), True, 'import numpy as np\n'), ((3850, 3884), 'numpy.append', 'np.append', (['global_output_data', 'seq'], {}), '(global_output_data, seq)\n', (3859, 3884), True, 'import numpy as np\n'), ((3893, 3932), 'torch.save', 'torch.save', (['seq', "(LPATH + '/encoded.txt')"], {}), "(seq, LPATH + '/encoded.txt')\n", (3903, 3932), False, 'import torch\n'), ((607, 629), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (617, 629), False, 'from PIL import Image\n'), ((2118, 2141), 'torch.exp', 'torch.exp', (['(0.5 * logvar)'], {}), '(0.5 * logvar)\n', (2127, 2141), False, 'import torch\n'), ((2158, 2179), 'torch.randn_like', 'torch.randn_like', (['std'], {}), '(std)\n', (2174, 2179), False, 'import torch\n'), ((3730, 3761), 'numpy.append', 'np.append', (['output_data', 'no_grad'], {}), '(output_data, no_grad)\n', (3739, 3761), True, 'import numpy as np\n'), ((3776, 3805), 'torch.from_numpy', 'torch.from_numpy', (['output_data'], {}), '(output_data)\n', (3792, 3805), False, 'import torch\n')] |
import networkx as nx
from networkx.algorithms import isomorphism
from networkx.algorithms.approximation import ramsey
import re
import os
import csv
import matplotlib.pyplot as plt
import sys
from networkx.algorithms.traversal.depth_first_search import dfs_tree
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import DBSCAN
import numpy as np
from graphviz import Digraph
np.set_printoptions(threshold=sys.maxsize)
G1 = nx.Graph()
G2 = nx.Graph()
def read_txt(path):
f1 = open(path)
Families = []
for line in f1:
line = line.rstrip("\n")
Families.append(str(line))
f1.close()
return Families
def read_file(path):
f = open(path)
graph = []
for line in f:
line = line.rstrip("\n")
graph.append(str(line))
f.close()
return graph
def create_graph(graph, parent):
l = len(graph)
neighbors = []
#G = nx.DiGraph()
G = nx.MultiDiGraph()
G1 = Digraph(comment = '')
for i in range(0, l):
string1 = graph[i]
node = str(string1.split("---->")[0])
node = node.replace(" ", "")
if len(string1.split("---->")) > 1 and str(string1.split("---->")[1]).split(",") != [' []']:
#print(string1)
edges = str(string1.split("---->")[1]).split(",")
if not G.has_node(node):
G.add_node(node)
G1.node(node)
for j in range(0, len(edges)):
edge = edges[j]
edge = edge.replace("[","",1)
edge = edge.replace("]","",-1)
edge = edge.replace("'", "")
edge = edge.replace(" ", "")
#nx.add_path(G, [node, edge])
#if 'com/scvngr/levelup/' in edge:
G.add_edge(node, edge)
G1.edge(node, edge)
if node in parent:
neighbors.append(edge)
#print(G.edges('Lcom/popdeem/sdk/core/utils/PDUniqueIdentifierUtils$GenerateUIDAsync;.doInBackground:([Ljava/lang/Void;)Ljava/lang/String;'))
return G, neighbors
def create_subtree(Graph, node):
T = nx.dfs_tree(Graph, source=node)
return T
def print_loop(neighbor1, neighbor2, names1, names2, G1, G2):
sum1 = []
sum2 = []
index1 = []
index2 = []
sum1 = 0
io = False
count = 0
owncode = False
#print(neighbor1)
#print(neighbor2)
sums1 = len(neighbor1)
sums2 = len(neighbor2)
for i1 in range(0, len(neighbor1)):
T1 = create_subtree(G1, neighbor1[i1])
n1 = neighbor1[i1]
match = False
n_max = ""
obfuscate = False
i_max = -1
ll2 = 0
dd = {}
nn = 0
for i2 in range(0, len(neighbor2)):
T2 = create_subtree(G2, neighbor2[i2])
n1 = neighbor1[i1]
n2 = neighbor2[i2]
#if n1.split(":")[-1] == n2.split(":")[-1] and n1.split(":")[-1] == "(Landroid/content/Context;)Ljava/lang/String;":
if n1.split(":")[-1] == n2.split(":")[-1]:
if len(n1.split("/")) <= 2 or len(n2.split("/")) <= 2:
continue
f11 = n1.split("/")[0]
f12 = n1.split("/")[1]
f13 = n1.split("/")[2]
f21 = n2.split("/")[0]
f22 = n2.split("/")[1]
f23 = n2.split("/")[2]
l1_local = str(n1.split(";")[0])
for ii in range(1, len(l1_local.split("/"))):
f = l1_local.split("/")[ii]
if len(f) == 1:
obfuscate = True
l2_local = str(n2.split(";")[0])
for ii in range(1, len(l2_local.split("/"))):
f = l2_local.split("/")[ii]
if len(f) == 1:
obfuscate = True
if len(l1_local.split("/")[len(l1_local.split("/")) - 1]) != 1 and len(l2_local.split("/")[len(l2_local.split("/")) - 1]) != 1:
#print(l1_local.split("/")[len(l1_local.split("/")) - 1])
obfuscate = False
if f11 == f21:
if len(f12) == 1 and len(f22) >= 1:
match = True
elif len(f12) >= 1 and len(f22) == 1:
match = True
#nx.draw_networkx(T1)
#nx.draw_networkx(T2)
#plt.show()
#print(sorted(T1.degree, key=lambda x: x[1], reverse=True))
if f11 == f21 and f12 == f22:
match = True
if f13 != f23 and len(f13) > 1 and len(f23) > 1:
match = False
if len(str(l1_local).split("/")) != len(str(l2_local).split("/")):
match = False
m1 = str(str(str(n1.split(";")[1]).split(":")[0]).split(".")[1])
m2 = str(str(str(n2.split(";")[1]).split(":")[0]).split(".")[1])
#print(m1, m2)
if len(m1) > 1 and len(m2) > 1:
match = False
if match == True:
T1_deob = keep_argument(T1)
T2_deob = keep_argument(T2)
common = de_obfuscate(T1_deob, T2_deob)
if n1 in dd:
if dd[n1] < common:
dd[n1] = common
nn = n2
ll2 = len(T2)
else:
dd[n1] = common
nn =n2
ll2 = len(T2)
m1 = str(n1.split(";")[1]).split(":")[0]
m2 = str(n2.split(";")[1]).split(":")[0]
if n1 in dd:
sum1 = sum1 + dd[n1]*2 + 2
if (dd[n1]*2)/(len(T1) + ll2) >= 0.8:
with open('/Users/sujon335/PycharmProjects/CFG_analyzer/TopFreeApps/matches2.csv', 'a') as file:
file.write("{},{},{},{}".format(names1, names2, n1, nn))
file.write("\n")
file.close()
#print(names1, names2, n1, nn, (dd[n1]*2)/(len(T1) + ll2))
if names1 in dic:
ll = dic[names1]
ll.append(n1)
dic[names1] = ll
else:
ll = [n1]
dic[names1] = ll
if names2 in dic:
ll = dic[names2]
ll.append(nn)
dic[names2] = ll
else:
ll = [nn]
dic[names2] = ll
if len(G1.edges())+ len(G2.edges()) == 0:
#print("{},{},{}".format(names1, names2, 0))
#print(0)
return 0
else:
#print("{},{},{}".format(names1, names2, sum1/(len(G1.edges())+ len(G2.edges()))))
#print(sum1/(len(G1.edges())+ len(G2.edges())))
return 1 - sum1/(len(G1.edges())+ len(G2.edges()))
####remove obfuscated part
def keep_argument(Graph):
G = nx.DiGraph()
result = []
list2 = list(Graph.edges())
#print(list2)
for edge in list2:
s1 = str(edge[0].split(":")[-1])
s2 = str(edge[1].split(":")[-1])
result.append(tuple([s1,s2]))
#print(edge)
#nx.draw_networkx(Graph)
#plt.show()
return result
####deobfuscate graph
def de_obfuscate(list1, list2):
visited1 = [False] * len(list1)
visited2 = [False] * len(list2)
intersection=[i for i in list1 if i in list2]
#print(intersection)
return len(intersection)
#print(edges2)
#path1 = '/Users/xinyin/Downloads/PunchhTech/AndroidID/'
#path1 = '/Users/xinyin/Downloads/PunchhTech/IMEI/'
#path1 = '/Users/xinyin/Downloads/'
#path2 = '/Users/xinyin/Downloads/Graphs2/Paytronix/AndroidID/'
#path1= '/Users/xinyin/Downloads/LevelUp/IMEI/'
#path1 = '/Users/xinyin/Downloads/Combined_Graphs_With_All_IDs/Levelup/'
path1 = '/Users/sujon335/PycharmProjects/CFG_analyzer/TopFreeApps/Combined/'
#Families1 = ['CPKRewards.txt', 'DogfishOff.txt']
#Families1 = ['BlueSushiSakeGrill.txt', 'BluestoneLane.txt']
Families1 = read_txt(path1 + 'sort.txt')
#Families1 = ['DeliDoughRewards.txt', 'JPLicksRewards.txt', 'com.thanx.club801.txt']
#Families2 = read_txt(path2 + 'sort.txt')
#Families1 = ['graph_ML_CitybBQ_punchh.txt', 'graph_ML_freebirdrestaurant_punchh.txt']
#Families1 = ["graph_ML_SnappySalads_Punchh.txt"]
#Families2 = ["CityGreens.txt"]
#Families1 = ["DogfishOff.txt", "CPKRewards.txt"]
#Families1 = read_txt(path1 + 'sort.txt')
#Families2 = ['graph_ML_costavida_punchh.txt']
#Families = ['graph_ML_LYFEKITCHENRewards.txt', 'Beefsteak1.txt']
#Families1 = ['graph_ML_PieFivePizza_Punchh.txt', 'graph_ML_FarmerBoys_punchh.txt']
#Families = ['graph_ML_LYFEKITCHENRewards.txt', 'graph_ML_MoesRewards_Punchh.txt']
#two_d_array = [[0 for j in range(len(Families))] for i in range(len(Families))]
#print(Families)
distance_matrix = np.zeros((len(Families1), len(Families1)))
dic = {}
for i in range(0, len(Families1)):
list1 =[]
#print(Families[i])
for j in range(i + 1, len(Families1)):
if i == j:
edit_distance = 0
continue
graph1 = read_file(path1 + Families1[i])
#print(Families1[j])
graph2 = read_file(path1 + Families1[j])
parent1 = ['IMEI', 'IMSI', 'AndroidID', 'SERIAL', 'MacAddress', 'AdvertisingID']
parent2 = ['IMEI', 'IMSI', 'AndroidID', 'SERIAL', 'MacAddress', 'AdvertisingID']
G1, nei1 = create_graph(graph1, parent1)
G2, nei2 = create_graph(graph2, parent2)
#print(Families1[j])
#nx.draw_networkx(G1)
#plt.show()
#nx.draw_networkx(G2)
#plt.show()
names1 = str(Families1[i].split(".txt")[0])
names2 = str(Families1[j].split(".txt")[0])
edit_distance = print_loop(nei1, nei2, names1, names2, G1, G2)
if edit_distance < 0:
edit_distance = 0
distance_matrix[i][j] = edit_distance
distance_matrix[j][i] = edit_distance
#print(dic)
import csv
row_list = 1 - distance_matrix
#############write similarity matrix
with open('/Users/sujon335/PycharmProjects/CFG_analyzer/TopFreeApps/similarity2.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerows(row_list)
file.close()
############clustering ##############
############set eps #############
clustering = DBSCAN(eps = 0.5, min_samples = 2, metric = "precomputed").fit(distance_matrix)
print(clustering.labels_)
for j in range(0, 30):
print("clustering ",j+1)
for i in range(0, len(Families1)):
if clustering.labels_[i] == j:
print(Families1[i].replace(".txt", ""))
#if Families1[i].replace(".txt", "") in dic:
# print(set(dic[Families1[i].replace(".txt", "")]))
#print(dic)
| [
"networkx.MultiDiGraph",
"networkx.DiGraph",
"csv.writer",
"networkx.Graph",
"networkx.dfs_tree",
"graphviz.Digraph",
"sklearn.cluster.DBSCAN",
"numpy.set_printoptions"
] | [((413, 455), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (432, 455), True, 'import numpy as np\n'), ((462, 472), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (470, 472), True, 'import networkx as nx\n'), ((479, 489), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (487, 489), True, 'import networkx as nx\n'), ((985, 1002), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {}), '()\n', (1000, 1002), True, 'import networkx as nx\n'), ((1013, 1032), 'graphviz.Digraph', 'Digraph', ([], {'comment': '""""""'}), "(comment='')\n", (1020, 1032), False, 'from graphviz import Digraph\n'), ((2365, 2396), 'networkx.dfs_tree', 'nx.dfs_tree', (['Graph'], {'source': 'node'}), '(Graph, source=node)\n', (2376, 2396), True, 'import networkx as nx\n'), ((7675, 7687), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (7685, 7687), True, 'import networkx as nx\n'), ((11060, 11076), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (11070, 11076), False, 'import csv\n'), ((11213, 11265), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': '(0.5)', 'min_samples': '(2)', 'metric': '"""precomputed"""'}), "(eps=0.5, min_samples=2, metric='precomputed')\n", (11219, 11265), False, 'from sklearn.cluster import DBSCAN\n')] |
import numpy as np
from hyper_opt import create_mask,model_1D,model_reduced,model_1D_calibrate
import load_data
import data_preprocessing
import generate_result
from sklearn.model_selection import train_test_split
def main():
# define input file names, directories, and parmaeters
train_Con_file_name = 'CV_con.npz'
train_AD_file_name = 'CV_pat.npz'
#test_Con_file_name = 'CV_ADNI_CON.npz'
#test_AD_file_name = 'CV_ADNI_AD.npz'
mask_name = '4mm_brain_mask_bin_epl.nii.gz'
results_directory = 'Output_results_directory'
results_path = load_data.find_path(results_directory)
number_of_cv = 5
feature_selection_type = 'L2_penality'
Hyperparameter_model__1 = 1000
Hyperparameter_model__3 = 1000
number_of_neighbours = 1
model_name = 'gaussian_process'
# loading input data and mask
train_data,train_labels=load_data.train_data_3d(train_Con_file_name,train_AD_file_name)
#test_data, test_labels = load_data.test_data_3d(test_Con_file_name, test_AD_file_name)
mask_4mm = load_data.mask(mask_name)
original_mask=mask_4mm.get_fdata()
# data preprocessing
train_data = np.moveaxis(train_data.copy(), 3, 0)
#test_data = np.moveaxis(test_data.copy(), 3, 0)
train_data = train_data * original_mask
#test_data = test_data * original_mask
shape = np.shape(train_data)
train_data_flattened = data_preprocessing.flatten(train_data.copy())
#test_data_flattened = data_preprocessing.flatten(test_data.copy())
orignal_mask_flatten = data_preprocessing.flatten(original_mask[np.newaxis, :, :, :].copy())
orignal_mask_flatten = np.reshape(orignal_mask_flatten, (-1))
train_data_flattened = data_preprocessing.MinMax_scaler(train_data_flattened.copy())
#test_data_flattened = data_preprocessing.MinMax_scaler(test_data_flattened.copy())
# train_data_flattened, test_data_flattened=data_preprocessing.MinMax_scaler_correct(train_data_flattened, test_data_flattened)
train_data_flattened, test_data_flattened, train_labels, test_labels = train_test_split(train_data_flattened, train_labels, test_size=.2, random_state=42)
train_data_inlier, train_labels_inlier, outlier_indices_train = data_preprocessing.outliers(train_data_flattened,
train_labels,
number_of_neighbours)
test_data_inlier, test_labels_inlier, outlier_indices_test = data_preprocessing.novelty(train_data_inlier,
train_labels_inlier,
test_data_flattened,
test_labels,
number_of_neighbours)
train_data_inlier_unflattened = data_preprocessing.deflatten(train_data_inlier, shape)
train_data_outlier_unflattened = data_preprocessing.deflatten(train_data_flattened[outlier_indices_train], shape)
train_data_inlier_unflattened = np.moveaxis(train_data_inlier_unflattened.copy(), 0, 3)
train_data_outlier_unflattened = np.moveaxis(train_data_outlier_unflattened.copy(), 0, 3)
trian_labels_outliers = train_labels[outlier_indices_train]
train_data_inlier_noised = data_preprocessing.apply_noise_manytypes(train_data_inlier_unflattened.copy())
train_data_inlier_filtered = data_preprocessing.apply_filter_manytypes(train_data_inlier_unflattened.copy())
train_data_inlier_more = data_preprocessing.concatination(train_data_inlier_noised, train_data_inlier_filtered)
#train_labels_inlier_more = data_preprocessing.dublicate(train_labels_inlier.copy(), 29) #to match length of data
train_data_outlier_noised = data_preprocessing.apply_noise_manytypes(train_data_outlier_unflattened.copy())
train_data_outlier_filtered = data_preprocessing.apply_filter_manytypes(train_data_outlier_unflattened.copy())
train_data_outlier_more = data_preprocessing.concatination(train_data_outlier_noised, train_data_outlier_filtered)
train_labels_outlier_more = data_preprocessing.dublicate(trian_labels_outliers[:, np.newaxis].copy(), 29)#to match length of data
train_data_inlier_more = np.moveaxis(train_data_inlier_more.copy(), 3, 0)
train_data_outlier_more = np.moveaxis(train_data_outlier_more.copy(), 3, 0)
train_data_outlier_more_flattened = data_preprocessing.flatten(train_data_outlier_more.copy())
# train_data_inlier_more_flattened = data_preprocessing.flatten(train_data_inlier_more.copy()) #uncomment to use noised inliers
# train_data_inlier_inlier, train_labels_inlier_inlier, inlier_outlier_indices_train = data_preprocessing.novelty(
# train_data_inlier, train_labels_inlier,
# train_data_inlier_more_flattened,
# train_labels_inlier_more,
# number_of_neighbours)
train_data_outlier_inlier, train_labels_outlier_inlier, outlier_outlier_indices_train = data_preprocessing.novelty(
train_data_flattened[outlier_indices_train], train_labels[outlier_indices_train],
train_data_outlier_more_flattened,
train_labels_outlier_more,
number_of_neighbours)
train_data_inlier, train_labels_inlier = data_preprocessing.upsampling(train_data_inlier,train_labels_inlier[:, np.newaxis])
train_data_inlier, train_labels_inlier = data_preprocessing.shuffling(train_data_inlier,train_labels_inlier)
train_data_outlier_inlier, train_labels_outlier_inlier = data_preprocessing.upsampling(
train_data_outlier_inlier,
train_labels_outlier_inlier)
train_data_outlier_inlier, train_labels_outlier_inlier = data_preprocessing.shuffling(train_data_outlier_inlier,
train_labels_outlier_inlier)
#Brain extraction of data
train_data_inlier_brain=train_data_inlier[:,np.squeeze(np.where(orignal_mask_flatten>0),axis=0)]
test_data_inlier_brain=test_data_inlier[:,np.squeeze(np.where(orignal_mask_flatten>0),axis=0)]
train_data_outlier_inlier_brain=train_data_outlier_inlier[:,np.squeeze(np.where(orignal_mask_flatten>0),axis=0)]
test_data_outlier_brain=(test_data_flattened[outlier_indices_test])[:,np.squeeze(np.where(orignal_mask_flatten>0),axis=0)]
concated_data = data_preprocessing.concat(train_data_inlier, train_data_outlier_inlier)
concated_labels = data_preprocessing.concat(train_labels_inlier[:, np.newaxis],
train_labels_outlier_inlier[:, np.newaxis])
#Model stage 1 with high certainity
model1_created_mask, model1_, model1_name, model1_weights = create_mask(train_data_inlier_brain, train_labels_inlier,
number_of_cv, feature_selection_type,
Hyperparameter_model__1, mask_threshold=4,
model_type=model_name)
#train_data_inlier_CVspace = data_preprocessing.coefficient_of_variance(train_data_inlier_brain * model1_created_mask)[:,np.newaxis]
#test_data_inlier_CVspace = data_preprocessing.coefficient_of_variance(test_data_inlier_brain * model1_created_mask)[:,np.newaxis]
#train_data_inlier_CVspace = np.sum(train_data_inlier_brain * model1_created_mask, axis=1)[:,np.newaxis]
#test_data_inlier_CVspace = np.sum(test_data_inlier_brain * model1_created_mask, axis=1)[:,np.newaxis]
train_data_inlier_CVspace = (train_data_inlier_brain * model1_created_mask)
test_data_inlier_CVspace = (test_data_inlier_brain * model1_created_mask)
model1_, model1_name = model_reduced(train_data_inlier_CVspace, train_labels_inlier, model1_created_mask,
data_validation=None, labels_validation=None,
model_type='gaussian_process')
model1_test_accuracy, model1_F1_score, model1_auc,low_confidence_indices=generate_result.out_result_highprob(test_data_inlier_CVspace,
test_labels_inlier,
original_mask,model1_created_mask,
model1_)
#Model stage 2 with low certainity
model2_, model2_name = model_reduced(train_data_inlier_CVspace, train_labels_inlier, model1_created_mask,
data_validation=None, labels_validation=None,
model_type=model_name)
model2_test_accuracy, model2_F1_score, model2_auc = generate_result.out_result(test_data_inlier_CVspace[low_confidence_indices],
test_labels_inlier[low_confidence_indices],
original_mask,
model1_created_mask,
model2_)
#Model stage 3 with outliers
model3_created_mask, model3_, model3_name, model3_weights = create_mask(concated_data,
concated_labels, number_of_cv,
feature_selection_type, Hyperparameter_model__3,
mask_threshold=3,
model_type=model_name)
#concated_data_cv = data_preprocessing.coefficient_of_variance(
# concated_data[:,np.squeeze(np.where(orignal_mask_flatten>0),axis=0)].copy() * model3_created_mask[np.squeeze(np.where(orignal_mask_flatten > 0), axis=0)])[:, np.newaxis]
#test_data_outlier_cv = data_preprocessing.coefficient_of_variance(
# test_data_outlier_brain *model3_created_mask[np.squeeze(np.where(orignal_mask_flatten > 0), axis=0)])[:, np.newaxis]
#concated_data_cv = np.sum(
# concated_data[:,np.squeeze(np.where(orignal_mask_flatten>0),axis=0)].copy() * model3_created_mask[np.squeeze(np.where(orignal_mask_flatten > 0), axis=0)], axis=1)[:, np.newaxis]
#test_data_outlier_cv = np.sum(
# test_data_outlier_brain *model3_created_mask[np.squeeze(np.where(orignal_mask_flatten > 0), axis=0)], axis=1)[:, np.newaxis]
concated_data_cv = (concated_data[:,np.squeeze(np.where(orignal_mask_flatten>0),axis=0)].copy() *
model3_created_mask[np.squeeze(np.where(orignal_mask_flatten > 0), axis=0)])
test_data_outlier_cv = (
test_data_outlier_brain *model3_created_mask[np.squeeze(np.where(orignal_mask_flatten > 0), axis=0)])
model3_, model3_name = model_reduced(concated_data_cv, concated_labels, model3_created_mask,
data_validation=None, labels_validation=None, model_type=model_name)
model3_test_accuracy,model3_F1_score,model3_auc = generate_result.out_result(np.nan_to_num(test_data_outlier_cv) ,
np.nan_to_num(test_labels[outlier_indices_test]), np.nan_to_num(original_mask),
np.nan_to_num(model3_created_mask[np.squeeze(np.where(orignal_mask_flatten > 0), axis=0)]), model3_)
testnum=len(test_labels)
highcernum=(len(test_labels_inlier)-len(test_labels_inlier[low_confidence_indices]))/testnum
lowcernum=(len(test_labels_inlier[low_confidence_indices]))/testnum
outnum=(len(test_labels[outlier_indices_test]))/testnum
data_preprocessing_method = "Seperating outlier of training set and test set, then synthethise more data from training-outliers, then appling probability predictions. High probability " \
"samples model is used with predictions with high probability, then apply low probability model. Finally add noise to outliers and concatinate with inlier data " \
"to be used for outlier model"
generate_result.print_result_3models(mask_4mm, results_path, model3_created_mask[np.squeeze(np.where(orignal_mask_flatten>0),axis=0)],model3_,model3_name,
model3_weights[np.squeeze(np.where(orignal_mask_flatten>0),axis=0)], model3_test_accuracy,
model3_auc, model3_F1_score, Hyperparameter_model__3,
model2_,model2_name, model2_test_accuracy, model2_auc, model2_F1_score,
model1_,model1_created_mask, model1_name, model1_weights, model1_test_accuracy, model1_auc, model1_F1_score,
Hyperparameter_model__1,
feature_selection_type, data_preprocessing_method,highcernum,lowcernum,outnum)
if __name__=='__main__':
main()
| [
"data_preprocessing.concatination",
"numpy.reshape",
"data_preprocessing.shuffling",
"generate_result.out_result_highprob",
"numpy.where",
"data_preprocessing.concat",
"data_preprocessing.upsampling",
"generate_result.out_result",
"load_data.find_path",
"sklearn.model_selection.train_test_split",
... | [((567, 605), 'load_data.find_path', 'load_data.find_path', (['results_directory'], {}), '(results_directory)\n', (586, 605), False, 'import load_data\n'), ((868, 932), 'load_data.train_data_3d', 'load_data.train_data_3d', (['train_Con_file_name', 'train_AD_file_name'], {}), '(train_Con_file_name, train_AD_file_name)\n', (891, 932), False, 'import load_data\n'), ((1039, 1064), 'load_data.mask', 'load_data.mask', (['mask_name'], {}), '(mask_name)\n', (1053, 1064), False, 'import load_data\n'), ((1337, 1357), 'numpy.shape', 'np.shape', (['train_data'], {}), '(train_data)\n', (1345, 1357), True, 'import numpy as np\n'), ((1627, 1663), 'numpy.reshape', 'np.reshape', (['orignal_mask_flatten', '(-1)'], {}), '(orignal_mask_flatten, -1)\n', (1637, 1663), True, 'import numpy as np\n'), ((2050, 2138), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_data_flattened', 'train_labels'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(train_data_flattened, train_labels, test_size=0.2,\n random_state=42)\n', (2066, 2138), False, 'from sklearn.model_selection import train_test_split\n'), ((2202, 2291), 'data_preprocessing.outliers', 'data_preprocessing.outliers', (['train_data_flattened', 'train_labels', 'number_of_neighbours'], {}), '(train_data_flattened, train_labels,\n number_of_neighbours)\n', (2229, 2291), False, 'import data_preprocessing\n'), ((2545, 2671), 'data_preprocessing.novelty', 'data_preprocessing.novelty', (['train_data_inlier', 'train_labels_inlier', 'test_data_flattened', 'test_labels', 'number_of_neighbours'], {}), '(train_data_inlier, train_labels_inlier,\n test_data_flattened, test_labels, number_of_neighbours)\n', (2571, 2671), False, 'import data_preprocessing\n'), ((3077, 3131), 'data_preprocessing.deflatten', 'data_preprocessing.deflatten', (['train_data_inlier', 'shape'], {}), '(train_data_inlier, shape)\n', (3105, 3131), False, 'import data_preprocessing\n'), ((3169, 3254), 'data_preprocessing.deflatten', 'data_preprocessing.deflatten', (['train_data_flattened[outlier_indices_train]', 'shape'], {}), '(train_data_flattened[outlier_indices_train], shape\n )\n', (3197, 3254), False, 'import data_preprocessing\n'), ((3752, 3842), 'data_preprocessing.concatination', 'data_preprocessing.concatination', (['train_data_inlier_noised', 'train_data_inlier_filtered'], {}), '(train_data_inlier_noised,\n train_data_inlier_filtered)\n', (3784, 3842), False, 'import data_preprocessing\n'), ((4214, 4306), 'data_preprocessing.concatination', 'data_preprocessing.concatination', (['train_data_outlier_noised', 'train_data_outlier_filtered'], {}), '(train_data_outlier_noised,\n train_data_outlier_filtered)\n', (4246, 4306), False, 'import data_preprocessing\n'), ((5199, 5399), 'data_preprocessing.novelty', 'data_preprocessing.novelty', (['train_data_flattened[outlier_indices_train]', 'train_labels[outlier_indices_train]', 'train_data_outlier_more_flattened', 'train_labels_outlier_more', 'number_of_neighbours'], {}), '(train_data_flattened[outlier_indices_train],\n train_labels[outlier_indices_train], train_data_outlier_more_flattened,\n train_labels_outlier_more, number_of_neighbours)\n', (5225, 5399), False, 'import data_preprocessing\n'), ((5470, 5559), 'data_preprocessing.upsampling', 'data_preprocessing.upsampling', (['train_data_inlier', 'train_labels_inlier[:, np.newaxis]'], {}), '(train_data_inlier, train_labels_inlier[:, np.\n newaxis])\n', (5499, 5559), False, 'import data_preprocessing\n'), ((5601, 5669), 'data_preprocessing.shuffling', 'data_preprocessing.shuffling', (['train_data_inlier', 'train_labels_inlier'], {}), '(train_data_inlier, train_labels_inlier)\n', (5629, 5669), False, 'import data_preprocessing\n'), ((5730, 5819), 'data_preprocessing.upsampling', 'data_preprocessing.upsampling', (['train_data_outlier_inlier', 'train_labels_outlier_inlier'], {}), '(train_data_outlier_inlier,\n train_labels_outlier_inlier)\n', (5759, 5819), False, 'import data_preprocessing\n'), ((6063, 6151), 'data_preprocessing.shuffling', 'data_preprocessing.shuffling', (['train_data_outlier_inlier', 'train_labels_outlier_inlier'], {}), '(train_data_outlier_inlier,\n train_labels_outlier_inlier)\n', (6091, 6151), False, 'import data_preprocessing\n'), ((6735, 6806), 'data_preprocessing.concat', 'data_preprocessing.concat', (['train_data_inlier', 'train_data_outlier_inlier'], {}), '(train_data_inlier, train_data_outlier_inlier)\n', (6760, 6806), False, 'import data_preprocessing\n'), ((6829, 6938), 'data_preprocessing.concat', 'data_preprocessing.concat', (['train_labels_inlier[:, np.newaxis]', 'train_labels_outlier_inlier[:, np.newaxis]'], {}), '(train_labels_inlier[:, np.newaxis],\n train_labels_outlier_inlier[:, np.newaxis])\n', (6854, 6938), False, 'import data_preprocessing\n'), ((7087, 7256), 'hyper_opt.create_mask', 'create_mask', (['train_data_inlier_brain', 'train_labels_inlier', 'number_of_cv', 'feature_selection_type', 'Hyperparameter_model__1'], {'mask_threshold': '(4)', 'model_type': 'model_name'}), '(train_data_inlier_brain, train_labels_inlier, number_of_cv,\n feature_selection_type, Hyperparameter_model__1, mask_threshold=4,\n model_type=model_name)\n', (7098, 7256), False, 'from hyper_opt import create_mask, model_1D, model_reduced, model_1D_calibrate\n'), ((8151, 8318), 'hyper_opt.model_reduced', 'model_reduced', (['train_data_inlier_CVspace', 'train_labels_inlier', 'model1_created_mask'], {'data_validation': 'None', 'labels_validation': 'None', 'model_type': '"""gaussian_process"""'}), "(train_data_inlier_CVspace, train_labels_inlier,\n model1_created_mask, data_validation=None, labels_validation=None,\n model_type='gaussian_process')\n", (8164, 8318), False, 'from hyper_opt import create_mask, model_1D, model_reduced, model_1D_calibrate\n'), ((8480, 8610), 'generate_result.out_result_highprob', 'generate_result.out_result_highprob', (['test_data_inlier_CVspace', 'test_labels_inlier', 'original_mask', 'model1_created_mask', 'model1_'], {}), '(test_data_inlier_CVspace,\n test_labels_inlier, original_mask, model1_created_mask, model1_)\n', (8515, 8610), False, 'import generate_result\n'), ((8945, 9104), 'hyper_opt.model_reduced', 'model_reduced', (['train_data_inlier_CVspace', 'train_labels_inlier', 'model1_created_mask'], {'data_validation': 'None', 'labels_validation': 'None', 'model_type': 'model_name'}), '(train_data_inlier_CVspace, train_labels_inlier,\n model1_created_mask, data_validation=None, labels_validation=None,\n model_type=model_name)\n', (8958, 9104), False, 'from hyper_opt import create_mask, model_1D, model_reduced, model_1D_calibrate\n'), ((9225, 9398), 'generate_result.out_result', 'generate_result.out_result', (['test_data_inlier_CVspace[low_confidence_indices]', 'test_labels_inlier[low_confidence_indices]', 'original_mask', 'model1_created_mask', 'model2_'], {}), '(test_data_inlier_CVspace[low_confidence_indices],\n test_labels_inlier[low_confidence_indices], original_mask,\n model1_created_mask, model2_)\n', (9251, 9398), False, 'import generate_result\n'), ((9860, 10015), 'hyper_opt.create_mask', 'create_mask', (['concated_data', 'concated_labels', 'number_of_cv', 'feature_selection_type', 'Hyperparameter_model__3'], {'mask_threshold': '(3)', 'model_type': 'model_name'}), '(concated_data, concated_labels, number_of_cv,\n feature_selection_type, Hyperparameter_model__3, mask_threshold=3,\n model_type=model_name)\n', (9871, 10015), False, 'from hyper_opt import create_mask, model_1D, model_reduced, model_1D_calibrate\n'), ((11592, 11734), 'hyper_opt.model_reduced', 'model_reduced', (['concated_data_cv', 'concated_labels', 'model3_created_mask'], {'data_validation': 'None', 'labels_validation': 'None', 'model_type': 'model_name'}), '(concated_data_cv, concated_labels, model3_created_mask,\n data_validation=None, labels_validation=None, model_type=model_name)\n', (11605, 11734), False, 'from hyper_opt import create_mask, model_1D, model_reduced, model_1D_calibrate\n'), ((11848, 11883), 'numpy.nan_to_num', 'np.nan_to_num', (['test_data_outlier_cv'], {}), '(test_data_outlier_cv)\n', (11861, 11883), True, 'import numpy as np\n'), ((11967, 12015), 'numpy.nan_to_num', 'np.nan_to_num', (['test_labels[outlier_indices_test]'], {}), '(test_labels[outlier_indices_test])\n', (11980, 12015), True, 'import numpy as np\n'), ((12017, 12045), 'numpy.nan_to_num', 'np.nan_to_num', (['original_mask'], {}), '(original_mask)\n', (12030, 12045), True, 'import numpy as np\n'), ((6330, 6364), 'numpy.where', 'np.where', (['(orignal_mask_flatten > 0)'], {}), '(orignal_mask_flatten > 0)\n', (6338, 6364), True, 'import numpy as np\n'), ((6429, 6463), 'numpy.where', 'np.where', (['(orignal_mask_flatten > 0)'], {}), '(orignal_mask_flatten > 0)\n', (6437, 6463), True, 'import numpy as np\n'), ((6546, 6580), 'numpy.where', 'np.where', (['(orignal_mask_flatten > 0)'], {}), '(orignal_mask_flatten > 0)\n', (6554, 6580), True, 'import numpy as np\n'), ((6673, 6707), 'numpy.where', 'np.where', (['(orignal_mask_flatten > 0)'], {}), '(orignal_mask_flatten > 0)\n', (6681, 6707), True, 'import numpy as np\n'), ((11353, 11387), 'numpy.where', 'np.where', (['(orignal_mask_flatten > 0)'], {}), '(orignal_mask_flatten > 0)\n', (11361, 11387), True, 'import numpy as np\n'), ((11518, 11552), 'numpy.where', 'np.where', (['(orignal_mask_flatten > 0)'], {}), '(orignal_mask_flatten > 0)\n', (11526, 11552), True, 'import numpy as np\n'), ((13035, 13069), 'numpy.where', 'np.where', (['(orignal_mask_flatten > 0)'], {}), '(orignal_mask_flatten > 0)\n', (13043, 13069), True, 'import numpy as np\n'), ((13166, 13200), 'numpy.where', 'np.where', (['(orignal_mask_flatten > 0)'], {}), '(orignal_mask_flatten > 0)\n', (13174, 13200), True, 'import numpy as np\n'), ((12173, 12207), 'numpy.where', 'np.where', (['(orignal_mask_flatten > 0)'], {}), '(orignal_mask_flatten > 0)\n', (12181, 12207), True, 'import numpy as np\n'), ((11245, 11279), 'numpy.where', 'np.where', (['(orignal_mask_flatten > 0)'], {}), '(orignal_mask_flatten > 0)\n', (11253, 11279), True, 'import numpy as np\n')] |
import torch
from torch._C import _parse_source_def
import torch.nn as nn
import torch.nn.functional as F
import gym
import random
from collections import deque
import numpy as np
class SimpleNet(nn.Module):
def __init__(self, input_size, hidden_size, action_size):
super(SimpleNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, action_size)
self.action_size = action_size
def forward(self, x):
h = F.relu(self.fc1(x))
o = F.softmax(self.fc2(h))
return o
def get_action(self,x, eps):
action_prob = self.forward(x)
if random.random < eps:
# fix this
action = torch.randint(0,self.action_size)
else:
action = torch.argmax(action_prob)
return action
class Trainer:
""" A Trainer class run a simple
"""
def __init__(self, agent,optimizer, learning_rate, epoch_size, batch_size, sample_size, nr_epochs, mem_cap, discount_rate, max_length):
self.agent = agent
self.optimizer = optimizer(self.agent.parameters(),learning_rate)
self.env = env = gym.make('CartPole-v0')
self.learning_rate = learning_rate
self.batch_size = batch_size
self.epoch_size = epoch_size
self.sample_size = sample_size
self.nr_epochs = nr_epochs
self.discount_rate = discount_rate
self.max_length = max_length
memory = deque(maxlen=mem_cap)
def _collect_episode(env, agent, max_length, render=False):
observation = env.reset()
observations = []
actions = []
rewards = []
for i in range(max_length):
action = agent.get_action(observation)
observation, reward, done, info = env.step(action)
observations.append(observation)
actions.append(action)
rewards.append(reward)
if done:
break
if render:
env.render()
return [observations, actions, rewards]
def train(self):
sampled_av_reward = []
losses = []
for n in range(self.nr_epochs):
with torch.no_grad:
current_av_reward = self._collect_data()
sampled_av_reward.append(current_av_reward)
print("The current sampled return is: %s".format(current_av_reward))
current_loss = self.train_epoch()
losses.append(current_loss)
print("The current training loss is: %s".format(current_av_reward))
return sampled_av_reward, losses
def test(self, render=False):
pass
def train_epoch(self):
av_loss = 0
for i in range(self.epoch_size):
batch = self._sample_batch()
self.optimizer.zero_grad()
av_loss += self.train_batch(batch)
return av_loss/self.epoch_size/self.batch_size
def train_batch(self, batch):
ob_batch, actions_batch, discounted_returns = batch
action_prob = self.agent(ob_batch)
loss = torch.sum(discounted_returns*torch.log(action_prob))
loss.backward()
self.optimizer_step()
def _sample_batch(self):
episodes = zip(*random.sample(self.memory, self.batch_size))
observations, actions, rewards = list(episodes)
ob_batch = torch.cat(observations)
actions_batch = torch.cat(actions)
discounted_returns = self._estimate_return(rewards, self.discount_rate)
rewards_batch = torch.cat(discounted_returns)
return ob_batch, actions_batch, discounted_returns
def _estimate_return(self, rewards, gamma):
t_steps = np.arange(rewards.size)
r = rewards * gamma**t_steps
r = r[::-1].cumsum()[::-1] / gamma**t_steps
return r
def _collect_data(self):
for i in range(self.sample_size):
episode = self._collect_episode(self.env, self.agent, self.max_length)
self.memory.append(episode)
if __name__ == '__main__':
net = SimpleNet(4,16,2)
adam = torch.optim.Adam
learning_rate = 0.01
epoch_size = 10
batch_size = 16
sample_size = 100
nr_epochs = 10
mem_cap = 2000
discount_rate = 0.99
max_length = 200
Trainer(net, adam, learning_rate, epoch_size, batch_size, sample_size, nr_epochs, mem_cap, discount_rate, max_length)
| [
"random.sample",
"collections.deque",
"torch.log",
"numpy.arange",
"torch.randint",
"torch.nn.Linear",
"gym.make",
"torch.cat",
"torch.argmax"
] | [((333, 367), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (342, 367), True, 'import torch.nn as nn\n'), ((387, 422), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'action_size'], {}), '(hidden_size, action_size)\n', (396, 422), True, 'import torch.nn as nn\n'), ((1168, 1191), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (1176, 1191), False, 'import gym\n'), ((1481, 1502), 'collections.deque', 'deque', ([], {'maxlen': 'mem_cap'}), '(maxlen=mem_cap)\n', (1486, 1502), False, 'from collections import deque\n'), ((3400, 3423), 'torch.cat', 'torch.cat', (['observations'], {}), '(observations)\n', (3409, 3423), False, 'import torch\n'), ((3448, 3466), 'torch.cat', 'torch.cat', (['actions'], {}), '(actions)\n', (3457, 3466), False, 'import torch\n'), ((3571, 3600), 'torch.cat', 'torch.cat', (['discounted_returns'], {}), '(discounted_returns)\n', (3580, 3600), False, 'import torch\n'), ((3727, 3750), 'numpy.arange', 'np.arange', (['rewards.size'], {}), '(rewards.size)\n', (3736, 3750), True, 'import numpy as np\n'), ((723, 757), 'torch.randint', 'torch.randint', (['(0)', 'self.action_size'], {}), '(0, self.action_size)\n', (736, 757), False, 'import torch\n'), ((792, 817), 'torch.argmax', 'torch.argmax', (['action_prob'], {}), '(action_prob)\n', (804, 817), False, 'import torch\n'), ((3137, 3159), 'torch.log', 'torch.log', (['action_prob'], {}), '(action_prob)\n', (3146, 3159), False, 'import torch\n'), ((3279, 3322), 'random.sample', 'random.sample', (['self.memory', 'self.batch_size'], {}), '(self.memory, self.batch_size)\n', (3292, 3322), False, 'import random\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: <NAME> (<EMAIL>)
# @Date: 2019-02-19
# @Filename: target.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
#
# @Last modified by: <NAME> (<EMAIL>)
# @Last modified time: 2019-09-25 15:20:31
import os
import pathlib
from copy import copy
import astropy
import numpy
import yaml
import warnings
from lvmsurveysim.ifu import IFU
from lvmsurveysim.utils import plot as lvm_plot
import lvmsurveysim.utils.spherical
from lvmsurveysim.exceptions import LVMSurveyOpsError, LVMSurveyOpsWarning
from lvmsurveysim.utils.plot import __MOLLWEIDE_ORIGIN__, get_axes, transform_patch_mollweide, convert_to_mollweide
from .. import config
from ..telescope import Telescope
from .skyregion import SkyRegion
from .tile import Tile
__all__ = ['Target', 'TargetList']
class Target(object):
"""A `.Region` with additional observing information.
A `.Target` object is similar to a `.SkyRegion` but it is named and contains
information about what telescope will observe it, its observing
priority relative to all other targets, and a set of observing constraints
ans strategies to be implemented during scheduling of the target (airmass,
lunation, shadow height, tile order, ...).
There is a special kind of target not represented internally as
a `.SkyRegion`, the fullsky target, which represents a (sparse) grid of tiles
on the whole sky.
The Target constructor accepts the following keyword parameters, which are
also available as keywords from a list read by `.from_list`. Typically a Target
will not be instatiated indivisually. The typical use case will involve the `.TargetList`
class which is initialize via a yaml configuration file, the survey 'target list'.
Parameters
----------
name : str
The name of the target.
priority : int
The priority at which this target should be observed. Higher numbers
mean higher priority.
telescope : str
The telescope that will observe the target. Must be a string that
matches a telescope entry in the configuration file or a
`~lvmsurveysim.telescope.Telescope` instance.
max_airmass : float
Maximum air mass to observe the given target
min_shadowheight : float
Minimum shadow height in km to observe the given target
exptime : float
Exposure time of an individual pointing
n_exposures
Number of individual pointings to reach desired S/N
min_exposures : int
Minimum number of exposures to make a "good visit"
min_moon_dist : float
Minimum moon distance between target before observations are
called off.
max_lunation : float
The maximum lunation (fraction of moon illuminated,
number between 0 and 1)
overhead : float
The overhead factor per exposure quantum for this target's observing
scheme.
overlap:
calculate overlap between this target and others and discard, defaults to true
tile_union:
tile_union that the target belongs to, if any; that is an area of sky that is tiled
from a single hexagon grid to ensure gapless tiling of overlapping regions.
tile_overlap: fraction of tile separation to overlap with neighboring tiles (ignored for sparse targets)
geodesic:
geodesic tiling of the full sphere instead of region
sparse:
sparse tiling factor, or depth value (number of subdivisions) in case of geodesic tiling
group:
(list of) group names the target belongs to (e.g. MilkyWay). used for aggregating survey statistics
and plotting survey progress.
Attributes
----------
region : `.SkyRegion`
The `.SkyRegion` object associated with this target.
"""
def __init__(self, *args, **kwargs):
self.name = kwargs.pop('name', '')
self.priority = kwargs.pop('priority', 1)
self.observatory = kwargs.pop('observatory', 'BOTH')
self.max_airmass = kwargs.pop('max_airmass', 1.75)
self.min_shadowheight = kwargs.pop('min_shadowheight', 1000.0)
self.exptime = kwargs.pop('exptime', 900)
self.n_exposures = kwargs.pop('n_exposures', 9)
self.min_exposures = kwargs.pop('min_exposures', 3)
self.min_moon_dist = kwargs.pop('min_moon_dist', 90)
self.max_lunation = kwargs.pop('max_lunation', 1.0)
self.overhead = kwargs.pop('overhead', 1.0)
self.groups = kwargs.pop('group', [])
self.tiling_strategy = kwargs.pop('tiling_strategy', 'lowest_airmass')
self.tile_union = kwargs.pop('tile_union', None)
self.tile_overlap = kwargs.pop('tile_overlap', None)
self.overlap = kwargs.pop('overlap', True)
self.geodesic = kwargs.pop('geodesic', False) # full sky tiling, use sparse for depth
self.sparse = kwargs.pop('sparse', None)
telescope = kwargs.pop('telescope', None)
assert telescope is not None, 'must specify a telescope keyword.'
if isinstance(telescope, Telescope):
self.telescope = Telescope
else:
self.telescope = Telescope.from_config(telescope)
self.region = SkyRegion(*args, **kwargs)
self.frame = self.region.frame
self.tiles = None
self.tile_priorities = None
def __repr__(self):
return (f'<Target (name={self.name!r}, telescope={self.telescope.name!r}, '
f'region_type={self.region.region_type!r})>')
@classmethod
def from_list(cls, name, targets=None):
"""Returns an instance of `.Target` from a target list.
Initialises a new `.Target` whose parameters have been previously
defined in a target list. Target lists must be YAML files in which each
target defines region and the telescope that will observe it, as
detailed in :ref:`target-defining`. For example:
.. code-block:: yaml
M81:
coords: [148.888333, 69.0652778]
region_type: ellipse
frame: icrs
region_params:
a: 0.209722
b: 0.106958333
pa: 149
priority: 1
observatory: APO {LCO, BOTH}
telecope: LVM-1m {LVM-160}
max_airmass: 1.75
min_shadowheight: 1000.0
exptime: 900
n_exposures: 1
min_exposures: 1
...
Parameters
----------
name : str
The identifier for the target. Must be defined in the region.
list file.
target_file : `str`, `~pathlib.Path`, or `None`
The path to the YAML file containing the region list. If
`None`, default to the target list contained in ``lvmcore``.
Example:
>>> from lvmsurveysim.target import Target
>>> m81 = Target.from_list('M81')
"""
assert targets is not None, "target dictionary not defined"
assert name in targets, 'target not found in target list.'
target = targets[name]
region_type = target.pop('region_type')
coords = target.pop('coords')
region_params = target.pop('region_params', {})
target.update(region_params)
return cls(region_type, coords, name=name, **target)
@classmethod
def supertarget(cls, targets):
'''Create a new target from a list of targets forming a tile union.
This method takes a list of targets and returns a new target object
whose region is the union of the regions of the targets in the list.
This is used in tiling tile unions, which are multiple distinct targets
that overlap or at least share an edge and need to be tiled uniformly across
these interfaces. This is achieved by tiling them as if they were a single
target and then redistributing the tiles back to the original targets according
to their boundaries.
Parameters:
-----------
targets : list of `~lvmsurveysim.target.Target`
list of targets forming a tile union.
Returns:
--------
target : `~lvmsurveysim.target.Target`
'supertarget' consisiting of the union of the input targets
'''
uregion = SkyRegion.multi_union([t.get_skyregion() for t in targets])
supertarget = copy(targets[0])
supertarget.region = uregion
supertarget.name = 'TileUnion ' + targets[0].tile_union
return supertarget
def get_pixarea(self, pixarea=None, ifu=None, telescope=None):
"""Gets the size of the tile in square degrees."""
telescope = telescope or self.telescope
if ifu is None:
ifu = IFU.from_config()
# warnings.warn(f'target {self.name}: no IFU provided. '
# f'Using default IFU {ifu.name!r}.', LVMSurveyOpsWarning)
assert pixarea is not None or (ifu is not None and telescope is not None), \
'either pixarea or ifu and telescope need to be defined.'
if pixarea is None:
pixarea = (ifu.fibre_size / 2. * telescope.plate_scale).to('degree')**2 * numpy.pi
pixarea *= ifu.n_fibres
pixarea = pixarea.value
return pixarea
def tile(self, ifu=None, telescope=None, to_frame=None):
"""Tessellates the target region and populates the tiles, pa and tile_priorities
fields.
Parameters
----------
ifu : ~lvmsurveysim.tiling.IFU
The IFU used for tiling the region. If not provided, the default
one is used.
telescope : ~lvmsurveysim.telescope.Telescope
The telescope on which the IFU is mounted. Defaults to the object
``telescope`` attribute.
to_frame : str
The reference frame in which the coordinates should be returned.
If `None`, defaults to the region internal reference frame.
"""
telescope = telescope or self.telescope
if ifu is None:
ifu = IFU.from_config()
# warnings.warn(f'target {self.name}: no IFU provided. '
# f'Using default IFU {ifu.name!r}.', LVMSurveyOpsWarning)
print('Tiling target ' + self.name)
coords, pa = ifu.get_tile_grid(self.region, telescope.plate_scale,
tile_overlap=self.tile_overlap, sparse=self.sparse, geodesic=self.geodesic)
# convert to skycoords and optionally transform in to the requested frame, most likely 'icrs'
tiles, pa2 = self.transform_skycoords(coords[:, 0], coords[:, 1], unit='deg', to_frame=to_frame)
self.pa = pa + pa2
# cache the new tiles and the priorities
self.tiles = tiles
self.tile_priorities = self.get_tile_priorities()
def make_tiles(self):
""" Return a list of `~lvmsurveysim.schedule.Tile` tile objects for this target.
Requires the self.tiles, self.pa and self.tile_priorites arrays to have been
calculated using the `.tile` method.
"""
return [Tile(self.tiles[i], self.pa[i], self.tile_priorities[i]) for i in range(len(self.tiles))]
def get_tiles_from_union(self, coords, pa):
""" Select tiles belonging to this target from a list of coordinates.
This method is used to select and assign the tiles belonging to this target
from a list of coordinates of a tile union.
Parameters
----------
coords, pa : ~numpy.array
Vectors of coordinates and PAs of the tile union before selection. Assumed
to be in the ICRS frame.
Returns
-------
coords, pa : ~numpy.array
Vectors of coordinates and PAs remaining in tile union after selection.
"""
mask = numpy.full(len(coords), True)
icrs_r = self.region.icrs_region()
for i, c in enumerate(coords):
if icrs_r.contains_point(c.ra.deg, c.dec.deg):
mask[i] = False
self.tiles = coords[~mask]
self.pa = pa[~mask]
self.tile_priorities = self.get_tile_priorities()
return coords[mask], pa[mask]
def get_tile_priorities(self):
"""Return an array with tile priorities according to the tiling
strategy defined for this target.
Returns
-------
priorities: ~numpy.array
Array of length of number of tiles with the priority for each tile.
"""
if len(self.tiles) == 0:
warnings.warn(f'target {self.name}: no tiles when calling get_tile_priorities(). ', LVMSurveyOpsWarning)
return numpy.array([])
if self.tiling_strategy == 'lowest_airmass':
self.tile_priorities = numpy.ones(len(self.tiles), dtype=int)
elif self.tiling_strategy == 'center_first':
self.tile_priorities = self.center_first_priorities_()
else:
raise ValueError(f'invalid tiling strategy: {self.tiling_strategy}.')
return self.tile_priorities
def center_first_priorities_(self):
"""Return an array with tile priorities according for the center-first
tiling strategy.
Tiles are prioritized according to the distance from the region
barycenter. Priorities are equal along lines of constant distance
from the barycenter, quantized in units of the tile diameter.
Returns
-------
priorities : ~numpy.array
Array of length of number of tiles with the priority for each tile.
"""
if self.tiles.frame.name=='icrs':
r, d = self.tiles.ra.deg, self.tiles.dec.deg
else:
r, d = self.tiles.l.deg, self.tiles.b.deg
# TODO: proper calculation of barycenter on the sphere!
rc = numpy.average(r)
dc = numpy.average(d)
dist = lvmsurveysim.utils.spherical.great_circle_distance(r, d, rc, dc)
field = numpy.sqrt(self.get_pixarea() / numpy.pi) # TODO: better way to get field size!!!
p = numpy.floor(dist / field).astype(int)
return numpy.max(p) - p + 1 # invert since priorities increase with value
def transform_skycoords(self, lat, lon, unit, to_frame):
'''
Construct `~astropy.coodinates.SkyCoords` from a set of longitude and lattitude
spherical coordinates. Optionally transform to a different frame and calculate the
change of position angle resulting from that transform
The output is a set of SkyCoords and the position angle N through E
Parameters:
-----------
lat, lon : array-like
lattitude and longitude input coordinates
unit : str
the unit of lat and lon
to_frame : str
optional, the name of a new frame of reference for the output values
Return:
-------
sk : `~astropy.coodinates.SkyCoords`
output coordinates
pa : `~numpy.array`
position angle at new coordinates relative to the old coordinates
'''
tiles = astropy.coordinates.SkyCoord(lat, lon, frame=self.frame, unit=unit)
pa = numpy.zeros(len(lat))
# transform not only centers, but also second set of coordinates slightly north, then compute the angle
if to_frame:
tiles = tiles.transform_to(to_frame)
# second set offset in dec to find position angle after transform
tiles2 = astropy.coordinates.SkyCoord(lat, lon+1./3600, frame=self.frame, unit=unit)
tiles2 = tiles2.transform_to(to_frame)
pa = tiles.position_angle(tiles2)
return tiles, pa
def get_skyregion(self):
""" Return the `.SkyRegion` of the target
"""
return self.region
def is_sparse(self):
'''Return True if the Target is sparse.
'''
if self.sparse == None:
return False
else:
return True
def density(self):
'''Return the tile density of the target.
The tile density is 1 if the target is not sparse, and
1/sparse otherwise.
'''
if self.is_sparse():
return 1.0/self.sparse
else:
return 1.0
def in_tile_union_with(self, other):
'''Return True if `self` is a member of the same tile union as `other`.
'''
return (self.tile_union != None) and (self.tile_union==other.tile_union)
def plot(self, *args, **kwargs):
"""Plots the region. An alias for ``.SkyRegion.plot``.
"""
return self.region.plot(*args, **kwargs)
def plot_tiling(self, projection='rectangular', ifu=None, frame=None, fig=None, **kwargs):
"""Plots the tiles within the region.
Parameters
----------
ifu : ~lvmsurveysim.tiling.IFU
The IFU used for tiling the region. If not provided, the default
one is used.
frame : str
The reference frame on which the pixels will be displayed. Defaults
to the internal frame of the target.
ax : ~matplotlib.axes.Axes
A Matplotlib `~matplotlib.axes.Axes` object to use. Otherwise, a
new one will be created.
kwargs : dict
Parameters to be passed to `~matplotlib.axes.scatter`.
Returns
-------
figure : `~matplotlib.figure.Figure`
The Matplotlib `~matplotlib.figure.Figure`.
"""
frame = frame or self.frame
ifu = ifu or IFU.from_config()
if self.tiles is None:
self.tile(ifu=ifu, to_frame=frame)
if frame == 'icrs':
lon, lat = self.tiles.ra.deg, self.tiles.dec.deg
elif frame == 'galactic':
lon, lat = self.tiles.l.deg, self.tiles.b.deg
if fig is None:
fig, ax = lvm_plot.get_axes(projection=projection, frame=frame)
else:
ax = fig.axes[0]
if projection=='mollweide':
c1,c2 = lvm_plot.convert_to_mollweide(lon, lat)
else:
c1, c2 = lon, lat
patches = [ifu.get_patch(scale=self.telescope.plate_scale, centre=[c1[p], c2[p]], pa=self.pa[p],
edgecolor='r', linewidth=1, alpha=0.5)[0]
for p in range(len(c1))]
if projection == 'mollweide':
patches = [transform_patch_mollweide(patch) for patch in patches]
for patch in patches:
ax.add_patch(patch)
ax.scatter(c1, c2, s=1, **kwargs)
return fig, ax
class TargetList(list):
"""A list of all the targets to observe.
Parameters
----------
target_file : str
The YAML file with all the targets to observe. Defaults to the
``lvmcore`` target list.
Returns
-------
target_set : list
A list of `.Target` instances.
"""
def __init__(self, targets=None, target_file=None):
self.filename = None
if targets:
self._names = [target.name for target in targets]
super().__init__(targets)
else:
if target_file is None:
target_file = pathlib.Path(
os.path.expanduser(os.path.expandvars(config['tiledb']['target_file'])))
else:
target_file = pathlib.Path(target_file)
assert target_file.exists()
self.filename = target_file
targets_dict = yaml.load(open(str(target_file)), Loader=yaml.FullLoader)
self._names = list(targets_dict.keys())
targets = [Target.from_list(name, targets=targets_dict)
for name in self._names]
super().__init__(targets)
def get_target(self, name):
"""Returns the target whose name correspond to ``name``."""
return self[self._names.index(name)]
def get_group_targets(self, group, primary=True):
"""Returns the targets that are in a group.
Parameters
----------
group : str
The group name.
primary : bool
Return only the target if ``group`` is the primary group to which
the target belongs (i.e., the first one in the list).
Returns
-------
targets : `list`
A list of target names that are included in ``group``.
"""
targets = []
for target in self:
if group in target.groups:
if (primary and group == target.groups[0]) or (not primary):
targets.append(target.name)
return targets
def get_groups(self):
"""Returns a list of all the groups for all the targets in the list."""
groups = set()
for target in self:
groups.update(target.groups)
return list(groups)
def get_tile_unions(self):
"""Returns a list of all the tile unions in the target list."""
unions = set()
for target in self:
if target.tile_union:
unions.update([target.tile_union])
return list(unions)
def get_union_targets(self, tile_union):
"""Returns the targets that are in a tile union.
Parameters
----------
tile_union : str
The group name.
Returns
-------
targets : `list`
A list of target names that are included in ``tile_union``.
"""
ut = []
for target in self:
if tile_union == target.tile_union:
ut.append(target)
return TargetList(targets=ut)
def order_by_priority(self):
""" Return a copy of the target list ordered by priorities highest to lowest.
"""
def prio(t):
return t.priority
return sorted(self, key=prio, reverse=True)
def plot_tiling(self, frame='icrs', **kwargs):
"""Plots all the target pixels in a single Mollweide projection.
Parameters
----------
frame : str
The coordinate frame to which all the pixel centres will be
converted.
kwargs : dict
Parameters to be passed to `.Target.plot_tiling`. By default, each
target will be plotted in a different colour.
Returns
-------
figure : `~matplotlib.figure.Figure`
The Matplotlib `~matplotlib.figure.Figure`.
"""
assert len(self) > 0, 'no targets in list.'
zorder = 100
fig = self[0].plot_tiling(frame=frame, zorder=zorder, **kwargs)
if len(self) > 1:
for target in self[1:]:
zorder -= 1
fig = target.plot_tiling(fig=fig, frame=frame, zorder=zorder, **kwargs)
return fig
| [
"lvmsurveysim.utils.plot.transform_patch_mollweide",
"numpy.average",
"pathlib.Path",
"os.path.expandvars",
"numpy.floor",
"astropy.coordinates.SkyCoord",
"lvmsurveysim.utils.plot.convert_to_mollweide",
"numpy.max",
"numpy.array",
"lvmsurveysim.ifu.IFU.from_config",
"warnings.warn",
"copy.copy... | [((8520, 8536), 'copy.copy', 'copy', (['targets[0]'], {}), '(targets[0])\n', (8524, 8536), False, 'from copy import copy\n'), ((14020, 14036), 'numpy.average', 'numpy.average', (['r'], {}), '(r)\n', (14033, 14036), False, 'import numpy\n'), ((14050, 14066), 'numpy.average', 'numpy.average', (['d'], {}), '(d)\n', (14063, 14066), False, 'import numpy\n'), ((15303, 15370), 'astropy.coordinates.SkyCoord', 'astropy.coordinates.SkyCoord', (['lat', 'lon'], {'frame': 'self.frame', 'unit': 'unit'}), '(lat, lon, frame=self.frame, unit=unit)\n', (15331, 15370), False, 'import astropy\n'), ((8885, 8902), 'lvmsurveysim.ifu.IFU.from_config', 'IFU.from_config', ([], {}), '()\n', (8900, 8902), False, 'from lvmsurveysim.ifu import IFU\n'), ((10226, 10243), 'lvmsurveysim.ifu.IFU.from_config', 'IFU.from_config', ([], {}), '()\n', (10241, 10243), False, 'from lvmsurveysim.ifu import IFU\n'), ((12730, 12843), 'warnings.warn', 'warnings.warn', (['f"""target {self.name}: no tiles when calling get_tile_priorities(). """', 'LVMSurveyOpsWarning'], {}), "(\n f'target {self.name}: no tiles when calling get_tile_priorities(). ',\n LVMSurveyOpsWarning)\n", (12743, 12843), False, 'import warnings\n'), ((12854, 12869), 'numpy.array', 'numpy.array', (['[]'], {}), '([])\n', (12865, 12869), False, 'import numpy\n'), ((15687, 15772), 'astropy.coordinates.SkyCoord', 'astropy.coordinates.SkyCoord', (['lat', '(lon + 1.0 / 3600)'], {'frame': 'self.frame', 'unit': 'unit'}), '(lat, lon + 1.0 / 3600, frame=self.frame, unit=unit\n )\n', (15715, 15772), False, 'import astropy\n'), ((17797, 17814), 'lvmsurveysim.ifu.IFU.from_config', 'IFU.from_config', ([], {}), '()\n', (17812, 17814), False, 'from lvmsurveysim.ifu import IFU\n'), ((18123, 18176), 'lvmsurveysim.utils.plot.get_axes', 'lvm_plot.get_axes', ([], {'projection': 'projection', 'frame': 'frame'}), '(projection=projection, frame=frame)\n', (18140, 18176), True, 'from lvmsurveysim.utils import plot as lvm_plot\n'), ((18277, 18316), 'lvmsurveysim.utils.plot.convert_to_mollweide', 'lvm_plot.convert_to_mollweide', (['lon', 'lat'], {}), '(lon, lat)\n', (18306, 18316), True, 'from lvmsurveysim.utils import plot as lvm_plot\n'), ((14259, 14284), 'numpy.floor', 'numpy.floor', (['(dist / field)'], {}), '(dist / field)\n', (14270, 14284), False, 'import numpy\n'), ((14312, 14324), 'numpy.max', 'numpy.max', (['p'], {}), '(p)\n', (14321, 14324), False, 'import numpy\n'), ((18654, 18686), 'lvmsurveysim.utils.plot.transform_patch_mollweide', 'transform_patch_mollweide', (['patch'], {}), '(patch)\n', (18679, 18686), False, 'from lvmsurveysim.utils.plot import __MOLLWEIDE_ORIGIN__, get_axes, transform_patch_mollweide, convert_to_mollweide\n'), ((19608, 19633), 'pathlib.Path', 'pathlib.Path', (['target_file'], {}), '(target_file)\n', (19620, 19633), False, 'import pathlib\n'), ((19506, 19557), 'os.path.expandvars', 'os.path.expandvars', (["config['tiledb']['target_file']"], {}), "(config['tiledb']['target_file'])\n", (19524, 19557), False, 'import os\n')] |
# Sarsa is a value iteration algorithm that learns Q(s,a)
# Combined with some policy that is based on Q-values (like eps-greedy)
# Task that is somewhat problem dependent in linear SARSA is how to pick the right features
# Specifically, these features should predict the value of a particular action given the state description by features
# These features could just be the state description, but also should probably be augmented.
# Polynomial features: this is adding polynomial terms and interaction terms
# References and Plan
# Can use the textbook RLbook2020.pdf to get pseudocode
# Algorithm: n-step semi-gradient SARSA (pg 244)
# This algorithm is not going to be truly online. Will have to go
# Input: function approximation and featurization of observation space.
# Tilings of the position-velocity space give the feature vector x.
# q function is linear in these tiling features
# q function is exactly the form in Equation 10.3 on page 246
# Basically all of the work is in defining how q works and how the features work
# Other than this, can follow the algorithm exactly
# Need to understand exaclty the end of the algorithm and would like to understand, but prob not implement,
# The difference that eligibility traces would make- why is it not truly online without eligibility traces?
# Write out the whole algorithm here, with classes for the Q function approximation and the policy
import click
import gym
import numpy as np
from math import log, ceil, exp
from tiles3 import IHT
from QFunctions import LinearTilingQApproximator
# python EpisodicNStepSarsa.py --episodes 1 --alpha 0.03 --init_epsilon 0.1 --eps_decay_factor 0.0 --n 1 --gamma 0.9 --tile_resolution 8 --n_tilings 8 --d 3 --render
@click.command()
@click.option('--episodes', default=10)
@click.option('--alpha', default=0.1)
@click.option('--init_epsilon', default=0.1)
@click.option('--eps_decay_factor', default=0.1) # epsilon_t = init_epsilon*exp(-eps_decay_factor*t) so decay = 0 is constant
@click.option('--n', default=1)
@click.option('--gamma', default=0.5) # reward discounting factor
@click.option('--tile_resolution', default=8) # number of tiles in each dimension of feature space
@click.option('--n_tilings', default=8) # number of overlapping tilings
@click.option('--d', default=3) # either 2 if (position,velocity) and 3 if (position,velocity,acceleration)
@click.option('--render/--no-render', default=True)
def main(episodes, alpha, init_epsilon, eps_decay_factor,
n, gamma, tile_resolution, n_tilings, d, render):
# Instantiate the environment
env = gym.make('MountainCar-v0')
n_actions = 3 # Action space is 0,1,2 for mountain car
# Initialize the hash table to store the tiling
n_tiles = tile_resolution ** d * n_tilings * n_actions
iht = IHT(2**(1+ceil(log(n_tiles, 2)))) # should be double the hash table size that we need
# Initialize the Q function
q_hat = LinearTilingQApproximator(iht, n_tilings, tile_resolution)
# Initialize arrays to store actions, states and rewards
# n-step SARSA means storing current info AND info for n more steps
A = np.zeros(n+1, dtype=int)
S = np.zeros((n+1,d)) # each row is either [position, velocity] or [position, velocity, acceleration]
R = np.zeros(n+1)
# Loop over episodes
for episode in range(episodes):
# Initial observation
# For MountainCar, always starts with 0 velocity and append 0 acceleration
observation = list(env.reset())
if d == 2:
S[0] = observation
else:
S[0] = observation + [0]
# epsilon-greedy action based on initial state
if np.random.uniform() <= init_epsilon:
A[0] = env.action_space.sample()
else:
A[0] = np.argmax([q_hat(S[0], a) for a in range(n_actions)])
# Set termination time to infinity to start
# Initialize time counter
t = 0
T = np.inf
# render
if render: env.render()
### Print initial state
print('t = {:d}'.format(t))
print('S[t] = ' + np.array2string(S[0], precision=2))
print('A[0] = {:d}'.format(A[0]))
# Loop over time periods within an episode
while True:
# If we haven't terminated, then take an action
# Store the next state and reward
if t < T:
observation, reward, done, info = env.step(A[t % (n+1)])
if render: env.render()
R[(t+1) % (n+1)] = reward
if d == 2:
S[(t+1) % (n+1)] = list(observation)
else:
S[(t+1) % (n+1)] = list(observation) + [observation[1] - S[t % (n+1), 1]]
if done:
T = t + 1
else:
epsilon = init_epsilon*exp(-eps_decay_factor*t)
if np.random.uniform() <= epsilon:
A[(t+1) % (n+1)] = env.action_space.sample()
else:
A[(t+1) % (n+1)] = np.argmax([q_hat(S[(t+1) % (n+1)], a) for a in range(n_actions)])
### Print state for t + 1
print('After taking A[{:d}], the info for t+1 = {:d} is:'.format(t, t+1))
print('t + 1 = {:d}'.format(t+1))
print('S[t+1] = ' + np.array2string(S[(t+1) % (n+1)], precision=2))
print('R[t+1] = {:f}'.format(R[(t+1) % (n+1)]))
print('A[t+1] chosen based on S[t+1] and R[t+1].')
print('A[t+1] = {:d}'.format(A[(t+1) % (n+1)]))
# Set the period for which we are updating the weights
# E.g. if n = 1, then can start updating at t = 0 because we have stored S_1 and R_1
tau = t - n + 1
# If we are ready to update the first state, then go ahead
if tau >= 0:
# discounted n-step return
G = sum([gamma**(i-tau-1)*R[(i % (n+1))] for i in range(tau+1, min(tau+n, T) + 1)])
# if you haven't terminated within n steps, then add the estimated return to go
if tau + n < T:
G = G + gamma**n * q_hat(S[(tau+n) % (n+1)], A[(tau+n) % (n+1)])
# Adjust the weights based on gradient of the error
# The update function takes the state and the action to find the active tiles
# Then updates each tile by alpha * error
q_hat.update(S[tau % (n+1)],
A[tau % (n+1)],
alpha,
G - q_hat(S[tau % (n+1)], A[tau % (n+1)]))
if tau == T - 1:
print('Exiting at tau = {:d}'.format(tau))
break
t += 1
if __name__ == '__main__':
main()
| [
"click.option",
"numpy.array2string",
"QFunctions.LinearTilingQApproximator",
"math.log",
"numpy.zeros",
"numpy.random.uniform",
"math.exp",
"click.command",
"gym.make"
] | [((1727, 1742), 'click.command', 'click.command', ([], {}), '()\n', (1740, 1742), False, 'import click\n'), ((1744, 1782), 'click.option', 'click.option', (['"""--episodes"""'], {'default': '(10)'}), "('--episodes', default=10)\n", (1756, 1782), False, 'import click\n'), ((1784, 1820), 'click.option', 'click.option', (['"""--alpha"""'], {'default': '(0.1)'}), "('--alpha', default=0.1)\n", (1796, 1820), False, 'import click\n'), ((1822, 1865), 'click.option', 'click.option', (['"""--init_epsilon"""'], {'default': '(0.1)'}), "('--init_epsilon', default=0.1)\n", (1834, 1865), False, 'import click\n'), ((1867, 1914), 'click.option', 'click.option', (['"""--eps_decay_factor"""'], {'default': '(0.1)'}), "('--eps_decay_factor', default=0.1)\n", (1879, 1914), False, 'import click\n'), ((1993, 2023), 'click.option', 'click.option', (['"""--n"""'], {'default': '(1)'}), "('--n', default=1)\n", (2005, 2023), False, 'import click\n'), ((2025, 2061), 'click.option', 'click.option', (['"""--gamma"""'], {'default': '(0.5)'}), "('--gamma', default=0.5)\n", (2037, 2061), False, 'import click\n'), ((2091, 2135), 'click.option', 'click.option', (['"""--tile_resolution"""'], {'default': '(8)'}), "('--tile_resolution', default=8)\n", (2103, 2135), False, 'import click\n'), ((2190, 2228), 'click.option', 'click.option', (['"""--n_tilings"""'], {'default': '(8)'}), "('--n_tilings', default=8)\n", (2202, 2228), False, 'import click\n'), ((2262, 2292), 'click.option', 'click.option', (['"""--d"""'], {'default': '(3)'}), "('--d', default=3)\n", (2274, 2292), False, 'import click\n'), ((2370, 2420), 'click.option', 'click.option', (['"""--render/--no-render"""'], {'default': '(True)'}), "('--render/--no-render', default=True)\n", (2382, 2420), False, 'import click\n'), ((2582, 2608), 'gym.make', 'gym.make', (['"""MountainCar-v0"""'], {}), "('MountainCar-v0')\n", (2590, 2608), False, 'import gym\n'), ((2921, 2979), 'QFunctions.LinearTilingQApproximator', 'LinearTilingQApproximator', (['iht', 'n_tilings', 'tile_resolution'], {}), '(iht, n_tilings, tile_resolution)\n', (2946, 2979), False, 'from QFunctions import LinearTilingQApproximator\n'), ((3122, 3148), 'numpy.zeros', 'np.zeros', (['(n + 1)'], {'dtype': 'int'}), '(n + 1, dtype=int)\n', (3130, 3148), True, 'import numpy as np\n'), ((3155, 3175), 'numpy.zeros', 'np.zeros', (['(n + 1, d)'], {}), '((n + 1, d))\n', (3163, 3175), True, 'import numpy as np\n'), ((3261, 3276), 'numpy.zeros', 'np.zeros', (['(n + 1)'], {}), '(n + 1)\n', (3269, 3276), True, 'import numpy as np\n'), ((3659, 3678), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (3676, 3678), True, 'import numpy as np\n'), ((4093, 4127), 'numpy.array2string', 'np.array2string', (['S[0]'], {'precision': '(2)'}), '(S[0], precision=2)\n', (4108, 4127), True, 'import numpy as np\n'), ((2805, 2820), 'math.log', 'log', (['n_tiles', '(2)'], {}), '(n_tiles, 2)\n', (2808, 2820), False, 'from math import log, ceil, exp\n'), ((4848, 4874), 'math.exp', 'exp', (['(-eps_decay_factor * t)'], {}), '(-eps_decay_factor * t)\n', (4851, 4874), False, 'from math import log, ceil, exp\n'), ((4896, 4915), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (4913, 4915), True, 'import numpy as np\n'), ((5351, 5401), 'numpy.array2string', 'np.array2string', (['S[(t + 1) % (n + 1)]'], {'precision': '(2)'}), '(S[(t + 1) % (n + 1)], precision=2)\n', (5366, 5401), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
from numpy import arange, ones, zeros
from numpy import sum as npsum
from scipy.optimize import least_squares
from scipy.special import gamma
from autocorrelation import autocorrelation
def FitFractionalIntegration(dx, l_, d0):
# Fit of a fractional integration process on X
# INPUTS
# x : [vector] (1 x t_end) data dx=diff[x]
# l_ : [scalar] fractional integration process is approximated considering the first l_ terms of its Taylor expansion
# d0 : [scalar] initial guess for the parameter d
# OUTPUTS
# d : [scalar] estimate of d (where d+1 is the order of the fractional integration process)
# epsFI : [vector] (1 x t_end) residuals of the fractional integration process fit
# The operator (1+L)**{d} is computed by means of its Taylor expansion truncated at order l_(see the note below)
# coeff : [vector] (l_+1 x 1) first l_+1 coefficients (including coeff_0=1) are considered in the Taylor expansion
#Note: If L is the lag operator the residuals are defined as eps = (1-L)**(1+d)X =(1-L)**d dX , where (1-L)**{d} \approx \sum_l=0**{l_} coeff_l L**l
# options
# if exist(OCTAVE_VERSION,builtin) == 0
# options = optimoptions(lsqnonlin, TolX, 10**-9, TolFun, 10**-9, MaxFunEvals, 1200, MaxIter, 400, Display, off)
# else:
# options = optimset(TolX, 10**-9, TolFun, 10**-9, MaxFunEvals, 1200, MaxIter, 400, Display, off)
lb = -0.5
ub = 0.5
res = least_squares(objective,d0,args=(dx,l_),bounds=(lb,ub),ftol=1e-9,xtol=1e-9)
d, exitFlag, resNorm = res.x, res.status, None
epsFI, coeff = FractIntegrProcess(d,dx,l_+1)
return d, epsFI, coeff, exitFlag, resNorm
def objective(d, dx, l_):
eps, _ = FractIntegrProcess(d,dx,l_)
F = npsum(autocorrelation(eps,10)**2)
return F
def FractIntegrProcess(d,x,l_):
# estimate a fractional integration process
#Compute the first l_ coeff and the approximated residuals of a Fractional Integration Process of order d+1
t_ = x.shape[0]
l = arange(1,l_)
coeff = ones((1,l_))
coeff[0,1:l_] = (-1)**l*gamma(1+d)/gamma(l+1)/gamma(1+d-l)
eps = zeros((1,t_-l_+1))
for t in range(l_,t_):
if t==l_:
LX = x[t-1::-1]
else:
LX = x[t-1:t-l_-1:-1]
eps[0,t-l_] = coeff@LX.T
return eps, coeff
| [
"scipy.optimize.least_squares",
"numpy.ones",
"autocorrelation.autocorrelation",
"numpy.zeros",
"scipy.special.gamma",
"numpy.arange"
] | [((1530, 1618), 'scipy.optimize.least_squares', 'least_squares', (['objective', 'd0'], {'args': '(dx, l_)', 'bounds': '(lb, ub)', 'ftol': '(1e-09)', 'xtol': '(1e-09)'}), '(objective, d0, args=(dx, l_), bounds=(lb, ub), ftol=1e-09,\n xtol=1e-09)\n', (1543, 1618), False, 'from scipy.optimize import least_squares\n'), ((2100, 2113), 'numpy.arange', 'arange', (['(1)', 'l_'], {}), '(1, l_)\n', (2106, 2113), False, 'from numpy import arange, ones, zeros\n'), ((2125, 2138), 'numpy.ones', 'ones', (['(1, l_)'], {}), '((1, l_))\n', (2129, 2138), False, 'from numpy import arange, ones, zeros\n'), ((2212, 2235), 'numpy.zeros', 'zeros', (['(1, t_ - l_ + 1)'], {}), '((1, t_ - l_ + 1))\n', (2217, 2235), False, 'from numpy import arange, ones, zeros\n'), ((2188, 2204), 'scipy.special.gamma', 'gamma', (['(1 + d - l)'], {}), '(1 + d - l)\n', (2193, 2204), False, 'from scipy.special import gamma\n'), ((1836, 1860), 'autocorrelation.autocorrelation', 'autocorrelation', (['eps', '(10)'], {}), '(eps, 10)\n', (1851, 1860), False, 'from autocorrelation import autocorrelation\n'), ((2177, 2189), 'scipy.special.gamma', 'gamma', (['(l + 1)'], {}), '(l + 1)\n', (2182, 2189), False, 'from scipy.special import gamma\n'), ((2166, 2178), 'scipy.special.gamma', 'gamma', (['(1 + d)'], {}), '(1 + d)\n', (2171, 2178), False, 'from scipy.special import gamma\n')] |
"""
Path tracking simulation with pure pursuit steering and PID speed control.
author: <NAME> (@Atsushi_twi)
<NAME> (@Gjacquenot)
modified by: <NAME> (@tkortz)
Original source: https://github.com/AtsushiSakai/PythonRobotics/blob/master/PathTracking/pure_pursuit/pure_pursuit.py
"""
import numpy as np
import math
import matplotlib.pyplot as plt
import liblitmus
# Parameters
k = 0.1 # look forward gain
Lfc = 2.0 # [m] look-ahead distance
Kp = 1.0 # speed proportional gain
dt = 0.1 # [s] time tick
WB = 2.9 # [m] wheel base of vehicle
show_animation = False
class State:
def __init__(self, x=0.0, y=0.0, yaw=0.0, v=0.0):
self.x = x
self.y = y
self.yaw = yaw
self.v = v
self.rear_x = self.x - ((WB / 2) * math.cos(self.yaw))
self.rear_y = self.y - ((WB / 2) * math.sin(self.yaw))
def update(self, a, delta):
self.x += self.v * math.cos(self.yaw) * dt
self.y += self.v * math.sin(self.yaw) * dt
self.yaw += self.v / WB * math.tan(delta) * dt
self.v += a * dt
self.rear_x = self.x - ((WB / 2) * math.cos(self.yaw))
self.rear_y = self.y - ((WB / 2) * math.sin(self.yaw))
def calc_distance(self, point_x, point_y):
dx = self.rear_x - point_x
dy = self.rear_y - point_y
return math.hypot(dx, dy)
class States:
def __init__(self):
self.x = []
self.y = []
self.yaw = []
self.v = []
self.t = []
def append(self, t, state):
self.x.append(state.x)
self.y.append(state.y)
self.yaw.append(state.yaw)
self.v.append(state.v)
self.t.append(t)
def proportional_control(target, current):
a = Kp * (target - current)
return a
class TargetCourse:
def __init__(self, cx, cy):
self.cx = cx
self.cy = cy
self.old_nearest_point_index = None
def search_target_index(self, state):
# To speed up nearest point search, doing it at only first time.
if self.old_nearest_point_index is None:
# search nearest point index
dx = [state.rear_x - icx for icx in self.cx]
dy = [state.rear_y - icy for icy in self.cy]
d = np.hypot(dx, dy)
ind = np.argmin(d)
self.old_nearest_point_index = ind
else:
ind = self.old_nearest_point_index
distance_this_index = state.calc_distance(self.cx[ind],
self.cy[ind])
while True:
distance_next_index = state.calc_distance(self.cx[ind + 1],
self.cy[ind + 1])
if distance_this_index < distance_next_index:
break
ind = ind + 1 if (ind + 1) < len(self.cx) else ind
distance_this_index = distance_next_index
self.old_nearest_point_index = ind
Lf = k * state.v + Lfc # update look ahead distance
# search look ahead target point index
while Lf > state.calc_distance(self.cx[ind], self.cy[ind]):
if (ind + 1) >= len(self.cx):
break # not exceed goal
ind += 1
return ind, Lf
def pure_pursuit_steer_control(state, trajectory, pind):
ind, Lf = trajectory.search_target_index(state)
if pind >= ind:
ind = pind
if ind < len(trajectory.cx):
tx = trajectory.cx[ind]
ty = trajectory.cy[ind]
else: # toward goal
tx = trajectory.cx[-1]
ty = trajectory.cy[-1]
ind = len(trajectory.cx) - 1
alpha = math.atan2(ty - state.rear_y, tx - state.rear_x) - state.yaw
delta = math.atan2(2.0 * WB * math.sin(alpha) / Lf, 1.0)
return delta, ind
def plot_arrow(x, y, yaw, length=1.0, width=0.5, fc="r", ec="k"):
"""
Plot arrow
"""
if not isinstance(x, float):
for ix, iy, iyaw in zip(x, y, yaw):
plot_arrow(ix, iy, iyaw)
else:
plt.arrow(x, y, length * math.cos(yaw), length * math.sin(yaw),
fc=fc, ec=ec, head_width=width, head_length=width)
plt.plot(x, y)
# https://en.wikipedia.org/wiki/Perlin_noise
def interpolate(a0, a1, w):
return (a1-a0) * (3.0 - w*2.0) * w * w + a0
def randomGradient(ix, iy):
r = 2920.0 * math.sin(ix * 21942.0 + iy * 171324.0 + 8912.0) * math.cos(ix * 23157.0 * iy * 217832.0 + 9758.0)
return (math.cos(r), math.sin(r))
def dotGridGradient(ix, iy, x, y):
g = randomGradient(ix, iy)
dx = x - ix
dy = y - iy
return (dx * g[0] + dy * g[1])
def perlin(x, y):
x0 = int(x)
x1 = x0+1
y0 = int(y)
y1 = y0+1
sx = x - x0
sy = y - y0
n0 = dotGridGradient(x0, y0, x, y)
n1 = dotGridGradient(x1, y0, x, y)
ix0 = interpolate(n0, n1, sx)
n0 = dotGridGradient(x0, y1, x, y)
n1 = dotGridGradient(x1, y1, x, y)
ix1 = interpolate(n0, n1, sx)
return interpolate(ix0, ix1, sy)
def main():
max_num_jobs = 20000
"""
max_num_jobs = 100 --> 34 jobs
max_num_jobs = 1000 --> 203 jobs
max_num_jobs = 10000 --> 1883 jobs
max_num_jobs = 20000 --> 3750 jobs
"""
T = float(max_num_jobs * dt)
xmax = int(T / 2)
print("Num jobs: {0} (T={1}, xmax={2})".format(max_num_jobs, T, xmax))
# target course
cx = np.arange(0, xmax, 0.5)
cy_f = lambda ix: math.sin(ix / 10.0) * (math.sin(ix/5)) * 2.0
cy = [cy_f(ix) + 0.05 * perlin(ix, cy_f(ix)) for ix in cx]
target_speed = 10.0 / 3.6 # [m/s]
# initial state
state = State(x=-0.0, y=-3.0, yaw=0.0, v=0.0)
lastIndex = len(cx) - 1
time = 0.0
states = States()
states.append(time, state)
target_course = TargetCourse(cx, cy)
target_ind, _ = target_course.search_target_index(state)
# Set up litmus task
wcet = 150
period = 200
deadline = 200
phase = 0
early = False
liblitmus.call_init_litmus()
print("\nPure pursuit called init_litmus.\n")
liblitmus.call_set_rt_task_param(wcet, period, deadline, phase, early)
print("\nPure pursuit finished setting rt params.\n")
liblitmus.set_task_mode_litmusrt()
print("\nPure pursuit is now a real-time task.\n")
print("\nPure pursuit is about to wait for synchronous release.\n")
liblitmus.call_wait_for_ts_release()
num_jobs = 0
while T >= time and lastIndex > target_ind:
# Start job
print("Starting job {0}".format(time / dt))
# Calc control input
ai = proportional_control(target_speed, state.v)
di, target_ind = pure_pursuit_steer_control(
state, target_course, target_ind)
state.update(ai, di) # Control vehicle
time += dt
states.append(time, state)
if show_animation: # pragma: no cover
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plot_arrow(state.x, state.y, state.yaw)
plt.plot(cx, cy, "-r", label="course")
plt.plot(states.x, states.y, "-b", label="trajectory")
plt.plot(cx[target_ind], cy[target_ind], "xg", label="target")
plt.axis("equal")
plt.grid(True)
plt.title("Speed[km/h]:" + str(state.v * 3.6)[:4])
plt.pause(0.001)
# End job
num_jobs += 1
print("Finished PP job #{0}".format(num_jobs))
liblitmus.call_sleep_next_period()
# Clean up litmus task
liblitmus.set_task_mode_background()
print("\nPure pursuit is now a background task again.\n")
# Test
assert lastIndex >= target_ind, "Cannot goal"
if show_animation: # pragma: no cover
plt.cla()
plt.plot(cx, cy, ".r", label="course")
plt.plot(states.x, states.y, "-b", label="trajectory")
plt.legend()
plt.xlabel("x[m]")
plt.ylabel("y[m]")
plt.axis("equal")
plt.grid(True)
plt.subplots(1)
plt.plot(states.t, [iv * 3.6 for iv in states.v], "-r")
plt.xlabel("Time[s]")
plt.ylabel("Speed[km/h]")
plt.grid(True)
plt.show()
if __name__ == '__main__':
print("Pure pursuit path tracking simulation start")
main()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"liblitmus.set_task_mode_background",
"math.cos",
"math.hypot",
"numpy.arange",
"liblitmus.set_task_mode_litmusrt",
"math.tan",
"liblitmus.call_wait_for_ts_release",
"liblitmus.call_sleep_next_period",
"matplotlib.pyplot.xlabel",
"matplotli... | [((5401, 5424), 'numpy.arange', 'np.arange', (['(0)', 'xmax', '(0.5)'], {}), '(0, xmax, 0.5)\n', (5410, 5424), True, 'import numpy as np\n'), ((5979, 6007), 'liblitmus.call_init_litmus', 'liblitmus.call_init_litmus', ([], {}), '()\n', (6005, 6007), False, 'import liblitmus\n'), ((6063, 6133), 'liblitmus.call_set_rt_task_param', 'liblitmus.call_set_rt_task_param', (['wcet', 'period', 'deadline', 'phase', 'early'], {}), '(wcet, period, deadline, phase, early)\n', (6095, 6133), False, 'import liblitmus\n'), ((6197, 6231), 'liblitmus.set_task_mode_litmusrt', 'liblitmus.set_task_mode_litmusrt', ([], {}), '()\n', (6229, 6231), False, 'import liblitmus\n'), ((6364, 6400), 'liblitmus.call_wait_for_ts_release', 'liblitmus.call_wait_for_ts_release', ([], {}), '()\n', (6398, 6400), False, 'import liblitmus\n'), ((7676, 7712), 'liblitmus.set_task_mode_background', 'liblitmus.set_task_mode_background', ([], {}), '()\n', (7710, 7712), False, 'import liblitmus\n'), ((1330, 1348), 'math.hypot', 'math.hypot', (['dx', 'dy'], {}), '(dx, dy)\n', (1340, 1348), False, 'import math\n'), ((3667, 3715), 'math.atan2', 'math.atan2', (['(ty - state.rear_y)', '(tx - state.rear_x)'], {}), '(ty - state.rear_y, tx - state.rear_x)\n', (3677, 3715), False, 'import math\n'), ((4186, 4200), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (4194, 4200), True, 'import matplotlib.pyplot as plt\n'), ((4419, 4466), 'math.cos', 'math.cos', (['(ix * 23157.0 * iy * 217832.0 + 9758.0)'], {}), '(ix * 23157.0 * iy * 217832.0 + 9758.0)\n', (4427, 4466), False, 'import math\n'), ((4479, 4490), 'math.cos', 'math.cos', (['r'], {}), '(r)\n', (4487, 4490), False, 'import math\n'), ((4492, 4503), 'math.sin', 'math.sin', (['r'], {}), '(r)\n', (4500, 4503), False, 'import math\n'), ((7609, 7643), 'liblitmus.call_sleep_next_period', 'liblitmus.call_sleep_next_period', ([], {}), '()\n', (7641, 7643), False, 'import liblitmus\n'), ((7889, 7898), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (7896, 7898), True, 'import matplotlib.pyplot as plt\n'), ((7907, 7945), 'matplotlib.pyplot.plot', 'plt.plot', (['cx', 'cy', '""".r"""'], {'label': '"""course"""'}), "(cx, cy, '.r', label='course')\n", (7915, 7945), True, 'import matplotlib.pyplot as plt\n'), ((7954, 8008), 'matplotlib.pyplot.plot', 'plt.plot', (['states.x', 'states.y', '"""-b"""'], {'label': '"""trajectory"""'}), "(states.x, states.y, '-b', label='trajectory')\n", (7962, 8008), True, 'import matplotlib.pyplot as plt\n'), ((8017, 8029), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8027, 8029), True, 'import matplotlib.pyplot as plt\n'), ((8038, 8056), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x[m]"""'], {}), "('x[m]')\n", (8048, 8056), True, 'import matplotlib.pyplot as plt\n'), ((8065, 8083), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y[m]"""'], {}), "('y[m]')\n", (8075, 8083), True, 'import matplotlib.pyplot as plt\n'), ((8092, 8109), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (8100, 8109), True, 'import matplotlib.pyplot as plt\n'), ((8118, 8132), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (8126, 8132), True, 'import matplotlib.pyplot as plt\n'), ((8142, 8157), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (8154, 8157), True, 'import matplotlib.pyplot as plt\n'), ((8166, 8223), 'matplotlib.pyplot.plot', 'plt.plot', (['states.t', '[(iv * 3.6) for iv in states.v]', '"""-r"""'], {}), "(states.t, [(iv * 3.6) for iv in states.v], '-r')\n", (8174, 8223), True, 'import matplotlib.pyplot as plt\n'), ((8230, 8251), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time[s]"""'], {}), "('Time[s]')\n", (8240, 8251), True, 'import matplotlib.pyplot as plt\n'), ((8260, 8285), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Speed[km/h]"""'], {}), "('Speed[km/h]')\n", (8270, 8285), True, 'import matplotlib.pyplot as plt\n'), ((8294, 8308), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (8302, 8308), True, 'import matplotlib.pyplot as plt\n'), ((8317, 8327), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8325, 8327), True, 'import matplotlib.pyplot as plt\n'), ((2247, 2263), 'numpy.hypot', 'np.hypot', (['dx', 'dy'], {}), '(dx, dy)\n', (2255, 2263), True, 'import numpy as np\n'), ((2282, 2294), 'numpy.argmin', 'np.argmin', (['d'], {}), '(d)\n', (2291, 2294), True, 'import numpy as np\n'), ((4369, 4416), 'math.sin', 'math.sin', (['(ix * 21942.0 + iy * 171324.0 + 8912.0)'], {}), '(ix * 21942.0 + iy * 171324.0 + 8912.0)\n', (4377, 4416), False, 'import math\n'), ((6890, 6899), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (6897, 6899), True, 'import matplotlib.pyplot as plt\n'), ((7175, 7213), 'matplotlib.pyplot.plot', 'plt.plot', (['cx', 'cy', '"""-r"""'], {'label': '"""course"""'}), "(cx, cy, '-r', label='course')\n", (7183, 7213), True, 'import matplotlib.pyplot as plt\n'), ((7226, 7280), 'matplotlib.pyplot.plot', 'plt.plot', (['states.x', 'states.y', '"""-b"""'], {'label': '"""trajectory"""'}), "(states.x, states.y, '-b', label='trajectory')\n", (7234, 7280), True, 'import matplotlib.pyplot as plt\n'), ((7293, 7355), 'matplotlib.pyplot.plot', 'plt.plot', (['cx[target_ind]', 'cy[target_ind]', '"""xg"""'], {'label': '"""target"""'}), "(cx[target_ind], cy[target_ind], 'xg', label='target')\n", (7301, 7355), True, 'import matplotlib.pyplot as plt\n'), ((7368, 7385), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (7376, 7385), True, 'import matplotlib.pyplot as plt\n'), ((7398, 7412), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (7406, 7412), True, 'import matplotlib.pyplot as plt\n'), ((7488, 7504), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (7497, 7504), True, 'import matplotlib.pyplot as plt\n'), ((773, 791), 'math.cos', 'math.cos', (['self.yaw'], {}), '(self.yaw)\n', (781, 791), False, 'import math\n'), ((836, 854), 'math.sin', 'math.sin', (['self.yaw'], {}), '(self.yaw)\n', (844, 854), False, 'import math\n'), ((916, 934), 'math.cos', 'math.cos', (['self.yaw'], {}), '(self.yaw)\n', (924, 934), False, 'import math\n'), ((967, 985), 'math.sin', 'math.sin', (['self.yaw'], {}), '(self.yaw)\n', (975, 985), False, 'import math\n'), ((1025, 1040), 'math.tan', 'math.tan', (['delta'], {}), '(delta)\n', (1033, 1040), False, 'import math\n'), ((1114, 1132), 'math.cos', 'math.cos', (['self.yaw'], {}), '(self.yaw)\n', (1122, 1132), False, 'import math\n'), ((1177, 1195), 'math.sin', 'math.sin', (['self.yaw'], {}), '(self.yaw)\n', (1185, 1195), False, 'import math\n'), ((3763, 3778), 'math.sin', 'math.sin', (['alpha'], {}), '(alpha)\n', (3771, 3778), False, 'import math\n'), ((4070, 4083), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (4078, 4083), False, 'import math\n'), ((4094, 4107), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (4102, 4107), False, 'import math\n'), ((5447, 5466), 'math.sin', 'math.sin', (['(ix / 10.0)'], {}), '(ix / 10.0)\n', (5455, 5466), False, 'import math\n'), ((5470, 5486), 'math.sin', 'math.sin', (['(ix / 5)'], {}), '(ix / 5)\n', (5478, 5486), False, 'import math\n'), ((6968, 6977), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (6975, 6977), True, 'import matplotlib.pyplot as plt\n')] |
import conx as cx
import numpy as np
from keras.datasets import mnist
from keras.utils import (to_categorical, get_file)
description = """
Original source: http://yann.lecun.com/exdb/mnist/
The MNIST dataset contains 70,000 images of handwritten digits (zero
to nine) that have been size-normalized and centered in a square grid
of pixels. Each image is a 28 × 28 × 1 array of floating-point numbers
representing grayscale intensities ranging from 0 (black) to 1
(white). The target data consists of one-hot binary vectors of size
10, corresponding to the digit classification categories zero through
nine. Some example MNIST images are shown below:

"""
def mnist_h5(*args, **kwargs):
"""
Load the Keras MNIST dataset from an H5 file.
"""
import h5py
path = "mnist.h5"
url = "https://raw.githubusercontent.com/Calysto/conx-data/master/mnist/mnist.h5"
path = get_file(path, origin=url)
h5 = h5py.File(path, "r")
dataset = cx.Dataset()
dataset._inputs = h5["inputs"]
dataset._targets = h5["targets"]
dataset._labels = h5["labels"]
dataset.h5 = h5
dataset.name = "MNIST-H5"
dataset.description = description
dataset._cache_values()
return dataset
def mnist(*args, **kwargs):
from keras.datasets import mnist
import keras.backend as K
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float16')
x_test = x_test.astype('float16')
inputs = np.concatenate((x_train,x_test)) / 255
labels = np.concatenate((y_train,y_test)) # ints, 0 to 10
###########################################
# fix mis-labeled image(s) in Keras dataset
labels[10994] = 9
###########################################
targets = to_categorical(labels).astype("uint8")
labels = np.array([str(label) for label in labels], dtype=str)
dataset = cx.Dataset()
dataset.load_direct([inputs], [targets], [labels])
return dataset
| [
"keras.datasets.mnist.load_data",
"conx.Dataset",
"h5py.File",
"keras.utils.to_categorical",
"keras.utils.get_file",
"numpy.concatenate"
] | [((982, 1008), 'keras.utils.get_file', 'get_file', (['path'], {'origin': 'url'}), '(path, origin=url)\n', (990, 1008), False, 'from keras.utils import to_categorical, get_file\n'), ((1018, 1038), 'h5py.File', 'h5py.File', (['path', '"""r"""'], {}), "(path, 'r')\n", (1027, 1038), False, 'import h5py\n'), ((1053, 1065), 'conx.Dataset', 'cx.Dataset', ([], {}), '()\n', (1063, 1065), True, 'import conx as cx\n'), ((1572, 1589), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (1587, 1589), False, 'from keras.datasets import mnist\n'), ((1914, 1947), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {}), '((y_train, y_test))\n', (1928, 1947), True, 'import numpy as np\n'), ((2263, 2275), 'conx.Dataset', 'cx.Dataset', ([], {}), '()\n', (2273, 2275), True, 'import conx as cx\n'), ((1862, 1895), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_test)'], {}), '((x_train, x_test))\n', (1876, 1895), True, 'import numpy as np\n'), ((2143, 2165), 'keras.utils.to_categorical', 'to_categorical', (['labels'], {}), '(labels)\n', (2157, 2165), False, 'from keras.utils import to_categorical, get_file\n')] |
import sys
sys.path.insert(0, "\\Users\\Gebruiker\\Documents\\GitHub\\parcels\\") # Set path to find the newest parcels code
import time as ostime
import numpy as np
from parcels import FieldSet, ParticleSet, AdvectionRK4_3D, ErrorCode
from datetime import timedelta
from netCDF4 import Dataset,num2date,date2num
from functions import deleteparticle, removeNaNs, DistParticle, FinalDistance, Samples, boundary_advectionRK4_3D
def run(flow,dt,bconstant,init=False,repeat=True,repeatdt = 0.1,foldername ='21objects',spinup=6):
DistParticle.setLastID(0)
filename = flow
fb = 'forward' # variable to determine whether the flowfields are analysed 'forward' or 'backward' in time
bconstant = bconstant
corald = Dataset(foldername + '/' + filename + '.nc', 'r+') # read netcdf file with input
# Extract all variables into np arrays --> in the future xarray will be used
T = corald.variables['T'][:]
X = corald.variables['X'][:]
Y = corald.variables['Y'][:]
U = corald.variables['U'][:]
V = corald.variables['V'][:]
corald.close()
U = np.asarray(U)
U = np.expand_dims(U, 2) # add a third dimension
V = np.asarray(V)
V = np.expand_dims(V, 2) # add a third dimension
t = num2date(T[:61], units='seconds since 2000-01-01 00:00:00.0') # make t a datetime object
t = date2num(t, units='seconds since 2000-01-01 00:00:00.0')
times = t
xs = X
ys = np.asarray([-1, 0,
1]) # third dimension with length 3. 2D flow field will be inserted on the middle value to ensure the AdvectionRK4_3D works correctly
depths = -Y # Y was height, but parcels expects depth
u = np.zeros((61,U.shape[1],U.shape[2],U.shape[3]))
u = np.concatenate((u,u,u),axis=2) # add the third dimension
u[:,:,1,:] = U[:61,:,0,:] # add the data to the middle value of the third dimension
v = np.zeros(u.shape)
w = np.zeros(u.shape)
w[:,:,1,:] = -V[:61,:,0,:] # because depth = -Y, w = -V
dist = np.zeros(u.shape)
distancemap = np.load(foldername + '/preprocessed/' + 'distancemap.npy')
dist[:, :, 1, :] = np.asarray([distancemap] * len(u))
closest = np.zeros(u.shape)
closestobject = np.load(foldername + '/preprocessed/' + 'closestobject.npy')
closest[:, :, 1, :] = np.asarray([closestobject] * len(u))
border = np.zeros(u.shape)
bordermap = np.load(foldername + '/preprocessed/' + 'bordermap.npy')
border[:, :, 1, :] = np.asarray([bordermap] * len(u))
data = {'U': u,
'V': v,
'W': w,
'B': border,
'C': closest,
'D': dist}
dimensions = {'lon': xs,
'lat': ys,
'depth': depths,
'time': times}
fieldset = FieldSet.from_data(data=data, dimensions=dimensions, mesh='flat', allow_time_extrapolation=True)
fieldset.B.interp_method = 'nearest'
fieldset.C.interp_method = 'nearest'
fieldset.add_constant('dx', X[1] - X[0])
if repeat:
fieldset.add_constant('repeatdt', repeatdt)
fieldset.add_constant('beaching', bconstant)
fieldset.add_constant('x0', xs[0])
fieldset.add_constant('y0', ys[0])
fieldset.add_constant('z0', depths[0])
lons, ds = np.meshgrid(xs, depths) # meshgrid at all gridpoints in the flow data
um = np.ma.masked_invalid(u[0, :, 1, :]) # retrieve mask from flowfield to take out points over coral objects
lons = np.ma.masked_array(lons, mask=um.mask) # mask points in meshgrid
loni = lons.flatten()
ds = np.ma.masked_array(ds, mask=um.mask) # mask points in meshgrid
di = ds.flatten()
outputdt = timedelta(seconds=0.1) # timesteps to create output at
dt = timedelta(seconds=dt) # timesteps to calculate particle trajectories
runtime = timedelta(seconds=60) # total time to execute the particleset
lati = np.asarray([0] * len(loni)) # all particles must start and stay on the middle value of the extra dimension
inittime = np.asarray([0] * len(loni)).flatten() # default time to start the particles is zero
if repeat:
repeatdt = repeatdt
lonr = np.ma.array(lons[:,0],mask=[False]*len(lons[:,0]))
dr = np.ma.array(ds[:,0],mask=[False]*len(ds[:,0]))
r_steps = int((runtime.total_seconds())/ repeatdt)
if not init:
loni = lonr
di = dr
lati = np.ma.array(np.asarray([0] * len(lonr)), mask=[False]* len(lonr))
inittime = np.ma.array(np.asarray([0] * len(lonr)), mask=[False]* len(lonr))
for i in range(r_steps):
loni = np.ma.concatenate((loni, lonr))
di = np.ma.concatenate((di, dr))
lati = np.concatenate((lati, np.asarray([0] * len(lonr))))
inittime = np.concatenate((inittime, np.asarray([inittime[-1] + repeatdt] * len(lonr))))
pset = ParticleSet(fieldset=fieldset, pclass=DistParticle, lon=loni, lat=lati, depth=di,time=inittime)
n1_part = pset.size
k_removeNaNs = pset.Kernel(removeNaNs)
k_sample = pset.Kernel(Samples) # Casting the SampleP function to a kernel.
pset.execute(k_removeNaNs+k_sample, runtime = timedelta(seconds=0))
n2_part = pset.size
k_dist = pset.Kernel(FinalDistance) # Casting the FinalDistance function to a kernel.
k_bound = pset.Kernel(boundary_advectionRK4_3D) # Casting the Boundary_Advection function to a kernel.
output_file=pset.ParticleFile(name=foldername+'/pfiles/'+'r'+str(repeatdt)[2:]+'-B'+str(fieldset.beaching)+'-'+flow+'-'+str(abs(dt.total_seconds()))[2:]+'-'+fb, outputdt=outputdt)
stime = ostime.time()
pset.execute(k_bound + k_dist + k_sample,
runtime=runtime,
dt=dt,
recovery = {ErrorCode.ErrorOutOfBounds:deleteparticle},
output_file=output_file)
etime = ostime.time()
output_file.add_metadata('outputdt',str(outputdt.total_seconds())+' in seconds')
output_file.add_metadata('repeatdt', str(repeatdt) + ' in seconds')
output_file.add_metadata('runtime',str(runtime.total_seconds())+' in seconds')
output_file.add_metadata('dt',str(dt.total_seconds())+' in seconds')
output_file.add_metadata('dx', float(np.abs(X[1] - X[0])))
output_file.add_metadata('executiontime',str(etime-stime)+' in seconds')
output_file.add_metadata('beaching_strategy',fieldset.beaching)
output_file.close()
n3_part = pset.size
print('Amount of particles at initialisation, 0th timestep and after execution respectively:' + str(
n1_part) + ', ' + str(n2_part) + ', ' + str(n3_part))
if __name__ == "__main__":
run('waveparabolic', 0.001, 2,foldername='21objects')
run('parabolic', 0.001, 2, foldername='21objects')
| [
"sys.path.insert",
"datetime.timedelta",
"functions.DistParticle.setLastID",
"netCDF4.num2date",
"netCDF4.Dataset",
"numpy.asarray",
"numpy.concatenate",
"numpy.ma.masked_invalid",
"numpy.meshgrid",
"numpy.ma.masked_array",
"netCDF4.date2num",
"numpy.abs",
"parcels.FieldSet.from_data",
"nu... | [((11, 81), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""\\\\Users\\\\Gebruiker\\\\Documents\\\\GitHub\\\\parcels\\\\"""'], {}), "(0, '\\\\Users\\\\Gebruiker\\\\Documents\\\\GitHub\\\\parcels\\\\')\n", (26, 81), False, 'import sys\n'), ((533, 558), 'functions.DistParticle.setLastID', 'DistParticle.setLastID', (['(0)'], {}), '(0)\n', (555, 558), False, 'from functions import deleteparticle, removeNaNs, DistParticle, FinalDistance, Samples, boundary_advectionRK4_3D\n'), ((731, 781), 'netCDF4.Dataset', 'Dataset', (["(foldername + '/' + filename + '.nc')", '"""r+"""'], {}), "(foldername + '/' + filename + '.nc', 'r+')\n", (738, 781), False, 'from netCDF4 import Dataset, num2date, date2num\n'), ((1089, 1102), 'numpy.asarray', 'np.asarray', (['U'], {}), '(U)\n', (1099, 1102), True, 'import numpy as np\n'), ((1111, 1131), 'numpy.expand_dims', 'np.expand_dims', (['U', '(2)'], {}), '(U, 2)\n', (1125, 1131), True, 'import numpy as np\n'), ((1165, 1178), 'numpy.asarray', 'np.asarray', (['V'], {}), '(V)\n', (1175, 1178), True, 'import numpy as np\n'), ((1187, 1207), 'numpy.expand_dims', 'np.expand_dims', (['V', '(2)'], {}), '(V, 2)\n', (1201, 1207), True, 'import numpy as np\n'), ((1241, 1302), 'netCDF4.num2date', 'num2date', (['T[:61]'], {'units': '"""seconds since 2000-01-01 00:00:00.0"""'}), "(T[:61], units='seconds since 2000-01-01 00:00:00.0')\n", (1249, 1302), False, 'from netCDF4 import Dataset, num2date, date2num\n'), ((1339, 1395), 'netCDF4.date2num', 'date2num', (['t'], {'units': '"""seconds since 2000-01-01 00:00:00.0"""'}), "(t, units='seconds since 2000-01-01 00:00:00.0')\n", (1347, 1395), False, 'from netCDF4 import Dataset, num2date, date2num\n'), ((1431, 1453), 'numpy.asarray', 'np.asarray', (['[-1, 0, 1]'], {}), '([-1, 0, 1])\n', (1441, 1453), True, 'import numpy as np\n'), ((1674, 1724), 'numpy.zeros', 'np.zeros', (['(61, U.shape[1], U.shape[2], U.shape[3])'], {}), '((61, U.shape[1], U.shape[2], U.shape[3]))\n', (1682, 1724), True, 'import numpy as np\n'), ((1730, 1763), 'numpy.concatenate', 'np.concatenate', (['(u, u, u)'], {'axis': '(2)'}), '((u, u, u), axis=2)\n', (1744, 1763), True, 'import numpy as np\n'), ((1894, 1911), 'numpy.zeros', 'np.zeros', (['u.shape'], {}), '(u.shape)\n', (1902, 1911), True, 'import numpy as np\n'), ((1920, 1937), 'numpy.zeros', 'np.zeros', (['u.shape'], {}), '(u.shape)\n', (1928, 1937), True, 'import numpy as np\n'), ((2020, 2037), 'numpy.zeros', 'np.zeros', (['u.shape'], {}), '(u.shape)\n', (2028, 2037), True, 'import numpy as np\n'), ((2056, 2114), 'numpy.load', 'np.load', (["(foldername + '/preprocessed/' + 'distancemap.npy')"], {}), "(foldername + '/preprocessed/' + 'distancemap.npy')\n", (2063, 2114), True, 'import numpy as np\n'), ((2188, 2205), 'numpy.zeros', 'np.zeros', (['u.shape'], {}), '(u.shape)\n', (2196, 2205), True, 'import numpy as np\n'), ((2226, 2286), 'numpy.load', 'np.load', (["(foldername + '/preprocessed/' + 'closestobject.npy')"], {}), "(foldername + '/preprocessed/' + 'closestobject.npy')\n", (2233, 2286), True, 'import numpy as np\n'), ((2364, 2381), 'numpy.zeros', 'np.zeros', (['u.shape'], {}), '(u.shape)\n', (2372, 2381), True, 'import numpy as np\n'), ((2398, 2454), 'numpy.load', 'np.load', (["(foldername + '/preprocessed/' + 'bordermap.npy')"], {}), "(foldername + '/preprocessed/' + 'bordermap.npy')\n", (2405, 2454), True, 'import numpy as np\n'), ((2790, 2890), 'parcels.FieldSet.from_data', 'FieldSet.from_data', ([], {'data': 'data', 'dimensions': 'dimensions', 'mesh': '"""flat"""', 'allow_time_extrapolation': '(True)'}), "(data=data, dimensions=dimensions, mesh='flat',\n allow_time_extrapolation=True)\n", (2808, 2890), False, 'from parcels import FieldSet, ParticleSet, AdvectionRK4_3D, ErrorCode\n'), ((3267, 3290), 'numpy.meshgrid', 'np.meshgrid', (['xs', 'depths'], {}), '(xs, depths)\n', (3278, 3290), True, 'import numpy as np\n'), ((3347, 3382), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['u[0, :, 1, :]'], {}), '(u[0, :, 1, :])\n', (3367, 3382), True, 'import numpy as np\n'), ((3465, 3503), 'numpy.ma.masked_array', 'np.ma.masked_array', (['lons'], {'mask': 'um.mask'}), '(lons, mask=um.mask)\n', (3483, 3503), True, 'import numpy as np\n'), ((3566, 3602), 'numpy.ma.masked_array', 'np.ma.masked_array', (['ds'], {'mask': 'um.mask'}), '(ds, mask=um.mask)\n', (3584, 3602), True, 'import numpy as np\n'), ((3668, 3690), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0.1)'}), '(seconds=0.1)\n', (3677, 3690), False, 'from datetime import timedelta\n'), ((3733, 3754), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'dt'}), '(seconds=dt)\n', (3742, 3754), False, 'from datetime import timedelta\n'), ((3817, 3838), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(60)'}), '(seconds=60)\n', (3826, 3838), False, 'from datetime import timedelta\n'), ((4883, 4983), 'parcels.ParticleSet', 'ParticleSet', ([], {'fieldset': 'fieldset', 'pclass': 'DistParticle', 'lon': 'loni', 'lat': 'lati', 'depth': 'di', 'time': 'inittime'}), '(fieldset=fieldset, pclass=DistParticle, lon=loni, lat=lati,\n depth=di, time=inittime)\n', (4894, 4983), False, 'from parcels import FieldSet, ParticleSet, AdvectionRK4_3D, ErrorCode\n'), ((5625, 5638), 'time.time', 'ostime.time', ([], {}), '()\n', (5636, 5638), True, 'import time as ostime\n'), ((5870, 5883), 'time.time', 'ostime.time', ([], {}), '()\n', (5881, 5883), True, 'import time as ostime\n'), ((4621, 4652), 'numpy.ma.concatenate', 'np.ma.concatenate', (['(loni, lonr)'], {}), '((loni, lonr))\n', (4638, 4652), True, 'import numpy as np\n'), ((4670, 4697), 'numpy.ma.concatenate', 'np.ma.concatenate', (['(di, dr)'], {}), '((di, dr))\n', (4687, 4697), True, 'import numpy as np\n'), ((5181, 5201), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (5190, 5201), False, 'from datetime import timedelta\n'), ((6240, 6259), 'numpy.abs', 'np.abs', (['(X[1] - X[0])'], {}), '(X[1] - X[0])\n', (6246, 6259), True, 'import numpy as np\n')] |
# <https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.html>
import cv2
import numpy as np
img = cv2.imread('../src/opencv_logo.png',0)
img = cv2.medianBlur(img,5)
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,20,param1=50,param2=30,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
cv2.imshow('detected circles',cimg)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"cv2.medianBlur",
"cv2.HoughCircles",
"cv2.imshow",
"cv2.waitKey",
"cv2.circle",
"cv2.destroyAllWindows",
"numpy.around",
"cv2.cvtColor",
"cv2.imread"
] | [((141, 180), 'cv2.imread', 'cv2.imread', (['"""../src/opencv_logo.png"""', '(0)'], {}), "('../src/opencv_logo.png', 0)\n", (151, 180), False, 'import cv2\n'), ((186, 208), 'cv2.medianBlur', 'cv2.medianBlur', (['img', '(5)'], {}), '(img, 5)\n', (200, 208), False, 'import cv2\n'), ((215, 252), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2BGR'], {}), '(img, cv2.COLOR_GRAY2BGR)\n', (227, 252), False, 'import cv2\n'), ((263, 363), 'cv2.HoughCircles', 'cv2.HoughCircles', (['img', 'cv2.HOUGH_GRADIENT', '(1)', '(20)'], {'param1': '(50)', 'param2': '(30)', 'minRadius': '(0)', 'maxRadius': '(0)'}), '(img, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30,\n minRadius=0, maxRadius=0)\n', (279, 363), False, 'import cv2\n'), ((573, 609), 'cv2.imshow', 'cv2.imshow', (['"""detected circles"""', 'cimg'], {}), "('detected circles', cimg)\n", (583, 609), False, 'import cv2\n'), ((609, 623), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (620, 623), False, 'import cv2\n'), ((624, 647), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (645, 647), False, 'import cv2\n'), ((373, 391), 'numpy.around', 'np.around', (['circles'], {}), '(circles)\n', (382, 391), True, 'import numpy as np\n'), ((445, 497), 'cv2.circle', 'cv2.circle', (['cimg', '(i[0], i[1])', 'i[2]', '(0, 255, 0)', '(2)'], {}), '(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)\n', (455, 497), False, 'import cv2\n'), ((527, 576), 'cv2.circle', 'cv2.circle', (['cimg', '(i[0], i[1])', '(2)', '(0, 0, 255)', '(3)'], {}), '(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)\n', (537, 576), False, 'import cv2\n')] |
import os
import rnnSMAP
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy
import imp
import statsmodels.api as sm
from rnnSMAP import funPost
imp.reload(rnnSMAP)
rnnSMAP.reload()
trainName = 'CONUSv2f1'
out = trainName + '_y15_Forcing_dr60'
rootDB = rnnSMAP.kPath['DB_L3_NA']
rootOut = rnnSMAP.kPath['OutSigma_L3_NA']
saveFolder = os.path.join(rnnSMAP.kPath['dirResult'], 'paperSigma', 'regComb')
doOpt = []
doOpt.append('loadData')
doOpt.append('print')
# doOpt.append('plotConf')
# doOpt.append('doTest')
# doOpt.append('plotCorr')
# doOpt.append('plotTemp')
# doOpt.append('plotBin')
# doOpt.append('plotProb')
optLst = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# optLst = [5, 6]
optEquLst = [
'{:.2f} sigma_mc^2 + {:.2f} sigma_mc * sigma_x^2 + sigma_x^2',
'{:.2f} sigma_mc^2 + sigma_x^2',
'{:.2f} sigma_mc^2 + {:.2f} sigma_x^2 + {:.2f} sigma_mc * sigma_x + {:.5f}',
'{:.2f} sigma_mc^2 + {:.2f} sigma_x^2 + {:.5f}',
'{:.2f} sigma_mc^2 + {:.2f} sigma_x^2 + {:.2f} sigma_mc * sigma_x',
'{:.2f} sigma_mc^2 + {:.2f} sigma_x^2', '{:.2f} sigma_mc^2',
'{:.2f} sigma_x^2', 'sigma_mc^2 + sigma_x^2 + {:.8f}'
]
fTestLst = [[1, 1, 0, 0], [1, 1, 0]]
matplotlib.rcParams.update({'font.size': 12})
matplotlib.rcParams.update({'lines.linewidth': 2})
matplotlib.rcParams.update({'lines.markersize': 6})
plt.tight_layout()
#################################################
# load data
if 'loadData' in doOpt:
testSigmaLst = list()
testConfLst = list()
valSigmaLst = list()
valConfLst = list()
modelLst = list()
testName = 'CONUSv2f2'
yr = [2015]
valName = 'CONUSv2fy2'
valYr = [2015]
ds = rnnSMAP.classDB.DatasetPost(rootDB=rootDB,
subsetName=testName,
yrLst=yr)
ds.readData(var='SMAP_AM', field='SMAP')
ds.readPred(rootOut=rootOut, out=out, drMC=100, field='LSTM')
dsVal = rnnSMAP.classDB.DatasetPost(rootDB=rootDB,
subsetName=valName,
yrLst=valYr)
dsVal.readData(var='SMAP_AM', field='SMAP')
dsVal.readPred(rootOut=rootOut, out=out, drMC=100, field='LSTM')
testErr = ds.statCalError(predField='LSTM', targetField='SMAP')
valErr = dsVal.statCalError(predField='LSTM', targetField='SMAP')
for (dsTemp, sigmaTempLst, confTempLst) in zip([ds, dsVal],
[testSigmaLst, valSigmaLst],
[testConfLst, valConfLst]):
sigmaTemp = dsTemp.statCalSigma(field='LSTM')
confTemp = dsTemp.statCalConf(predField='LSTM', targetField='SMAP')
for sigmaStr in ['sigmaMC', 'sigmaX', 'sigma']:
sigmaTempLst.append(getattr(sigmaTemp, sigmaStr + '_mat'))
confTempLst.append(getattr(confTemp, 'conf_' + sigmaStr))
for opt in optLst:
print('doing option ' + str(opt))
sigmaTemp, model = dsTemp.statRegSigma2(dsVal, opt=opt)
confTemp = dsTemp.statCalConf(predField='LSTM', targetField='SMAP')
sigmaTempLst.append(sigmaTemp.sigmaReg_mat)
confTempLst.append(confTemp.conf_sigmaReg)
modelLst.append(model)
if 'print' in doOpt:
labelLst = ['sigmaMC', 'sigmaX', 'sigmaComb'] +\
['sigmaReg opt '+str(x) for x in optLst]
# calculate cdf distance
testRmseLst, testKsdLst = funPost.distCDF(testConfLst)
valRmseLst, valKsdLst = funPost.distCDF(valConfLst)
print('# Regressed equation')
for k in range(len(optLst)):
wLst = modelLst[k].params.tolist()
print('opt {}: '.format(optLst[k]) + optEquLst[k].format(*wLst))
# residual
valSsrLst = list()
testSsrLst = list()
for (ssrLst, sigmaLst, dsTemp) in zip([testSsrLst, valSsrLst],
[testSigmaLst, valSigmaLst],
[ds, dsVal]):
for (sigma, label) in zip(sigmaLst, labelLst):
res = np.square(dsTemp.LSTM - dsTemp.SMAP) - np.square(sigma)
ssr = np.nansum(np.square(res.flatten()))
ssrLst.append(ssr)
# corr
valCorrLst = list()
testCorrLst = list()
for (corrLst, sigmaLst, err) in zip([testCorrLst, valCorrLst],
[testSigmaLst, valSigmaLst],
[testErr, valErr]):
for (sigma, label) in zip(sigmaLst, labelLst):
ubRMSE = err.ubRMSE
corr = np.corrcoef(ubRMSE, np.nanmean(sigma, axis=1))[0, 1]
corrLst.append(corr)
# print('{}: R(ubRMSE,sigma) = {}'.format(label, corr))
print('# Validation set')
for k in range(len(valSigmaLst)):
print('validation {}: R = {:.4f}, KS = {:.4f}'.format(
labelLst[k], valCorrLst[k], valKsdLst[k]))
print('# testing set')
for k in range(len(testSigmaLst)):
print('test {}: R = {:.4f}, KS = {:.4f}'.format(labelLst[k],
testCorrLst[k],
testKsdLst[k]))
#################################################
# plot confidence figure
if 'plotConf' in doOpt:
figTitleLst = ['Validation yr' + str(valYr[0]), 'Test yr' + str(yr[0])]
fig, axes = plt.subplots(ncols=len(figTitleLst),
figsize=(12, 6),
sharey=True)
sigmaStrLst = ['sigmaX', 'sigmaMC', 'sigma']
legLst = [r'$p_{mc}$', r'$p_{x}$', r'$p_{comb}$'] +\
[r'$p_{reg}$ opt '+str(x) for x in optLst]
for iFig in range(len(figTitleLst)):
statConfLst = testConfLst if iFig == 1 else valConfLst
_, _, out = rnnSMAP.funPost.plotCDF(
statConfLst,
ax=axes[iFig],
legendLst=legLst,
xlabel='Error Exceedance Probablity',
ylabel=None,
showDiff='KS')
axes[iFig].set_title(figTitleLst[iFig])
print(out['rmseLst'])
axes[0].set_ylabel('Frequency')
# axes[1].get_legend().remove()
fig.tight_layout()
fig.show()
saveFile = os.path.join(saveFolder, 'regComb_conf_reg')
fig.savefig(saveFile)
# fig.savefig(saveFile+'.eps')
if 'doTest' in doOpt:
# import statsmodels.api as sm
testName = 'CONUSv2f1'
yr = [2016]
valName = 'CONUSv2f1'
valYr = [2017]
trainName = 'CONUSv2f1'
out = trainName + '_y15_Forcing_dr60'
rootDB = rnnSMAP.kPath['DB_L3_NA']
rootOut = rnnSMAP.kPath['OutSigma_L3_NA']
ds = rnnSMAP.classDB.DatasetPost(rootDB=rootDB,
subsetName=testName,
yrLst=yr)
ds.readData(var='SMAP_AM', field='SMAP')
ds.readPred(rootOut=rootOut, out=out, drMC=100, field='LSTM')
dsVal = rnnSMAP.classDB.DatasetPost(rootDB=rootDB,
subsetName=valName,
yrLst=valYr)
dsVal.readData(var='SMAP_AM', field='SMAP')
dsVal.readPred(rootOut=rootOut, out=out, drMC=100, field='LSTM')
dsReg = dsVal
predField = 'LSTM'
targetField = 'SMAP'
statSigma = dsReg.statCalSigma(field=predField)
statErr = ds.statCalError(predField=predField, targetField=targetField)
y = statErr.ubRMSE
x1 = np.square(statSigma.sigmaMC)
x2 = statSigma.sigmaMC * statSigma.sigmaX
xx = np.stack((x1, x2), axis=1)
yy = y - np.square(statSigma.sigmaX)
ind = np.where(~np.isnan(yy))[0]
xf = xx[ind, :]
yf = yy[ind]
model = sm.OLS(yf, xf)
result = model.fit()
yp = result.predict(xf).flatten().astype(np.float32)
yf = yf.flatten().astype(np.float32)
| [
"matplotlib.rcParams.update",
"rnnSMAP.reload",
"imp.reload",
"os.path.join",
"rnnSMAP.funPost.distCDF",
"numpy.square",
"numpy.stack",
"numpy.nanmean",
"rnnSMAP.funPost.plotCDF",
"numpy.isnan",
"matplotlib.pyplot.tight_layout",
"statsmodels.api.OLS",
"rnnSMAP.classDB.DatasetPost"
] | [((176, 195), 'imp.reload', 'imp.reload', (['rnnSMAP'], {}), '(rnnSMAP)\n', (186, 195), False, 'import imp\n'), ((196, 212), 'rnnSMAP.reload', 'rnnSMAP.reload', ([], {}), '()\n', (210, 212), False, 'import rnnSMAP\n'), ((366, 431), 'os.path.join', 'os.path.join', (["rnnSMAP.kPath['dirResult']", '"""paperSigma"""', '"""regComb"""'], {}), "(rnnSMAP.kPath['dirResult'], 'paperSigma', 'regComb')\n", (378, 431), False, 'import os\n'), ((1193, 1238), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 12}"], {}), "({'font.size': 12})\n", (1219, 1238), False, 'import matplotlib\n'), ((1239, 1289), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'lines.linewidth': 2}"], {}), "({'lines.linewidth': 2})\n", (1265, 1289), False, 'import matplotlib\n'), ((1290, 1341), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'lines.markersize': 6}"], {}), "({'lines.markersize': 6})\n", (1316, 1341), False, 'import matplotlib\n'), ((1342, 1360), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1358, 1360), True, 'import matplotlib.pyplot as plt\n'), ((1669, 1742), 'rnnSMAP.classDB.DatasetPost', 'rnnSMAP.classDB.DatasetPost', ([], {'rootDB': 'rootDB', 'subsetName': 'testName', 'yrLst': 'yr'}), '(rootDB=rootDB, subsetName=testName, yrLst=yr)\n', (1696, 1742), False, 'import rnnSMAP\n'), ((1941, 2016), 'rnnSMAP.classDB.DatasetPost', 'rnnSMAP.classDB.DatasetPost', ([], {'rootDB': 'rootDB', 'subsetName': 'valName', 'yrLst': 'valYr'}), '(rootDB=rootDB, subsetName=valName, yrLst=valYr)\n', (1968, 2016), False, 'import rnnSMAP\n'), ((3454, 3482), 'rnnSMAP.funPost.distCDF', 'funPost.distCDF', (['testConfLst'], {}), '(testConfLst)\n', (3469, 3482), False, 'from rnnSMAP import funPost\n'), ((3511, 3538), 'rnnSMAP.funPost.distCDF', 'funPost.distCDF', (['valConfLst'], {}), '(valConfLst)\n', (3526, 3538), False, 'from rnnSMAP import funPost\n'), ((6185, 6229), 'os.path.join', 'os.path.join', (['saveFolder', '"""regComb_conf_reg"""'], {}), "(saveFolder, 'regComb_conf_reg')\n", (6197, 6229), False, 'import os\n'), ((6604, 6677), 'rnnSMAP.classDB.DatasetPost', 'rnnSMAP.classDB.DatasetPost', ([], {'rootDB': 'rootDB', 'subsetName': 'testName', 'yrLst': 'yr'}), '(rootDB=rootDB, subsetName=testName, yrLst=yr)\n', (6631, 6677), False, 'import rnnSMAP\n'), ((6876, 6951), 'rnnSMAP.classDB.DatasetPost', 'rnnSMAP.classDB.DatasetPost', ([], {'rootDB': 'rootDB', 'subsetName': 'valName', 'yrLst': 'valYr'}), '(rootDB=rootDB, subsetName=valName, yrLst=valYr)\n', (6903, 6951), False, 'import rnnSMAP\n'), ((7377, 7405), 'numpy.square', 'np.square', (['statSigma.sigmaMC'], {}), '(statSigma.sigmaMC)\n', (7386, 7405), True, 'import numpy as np\n'), ((7461, 7487), 'numpy.stack', 'np.stack', (['(x1, x2)'], {'axis': '(1)'}), '((x1, x2), axis=1)\n', (7469, 7487), True, 'import numpy as np\n'), ((7616, 7630), 'statsmodels.api.OLS', 'sm.OLS', (['yf', 'xf'], {}), '(yf, xf)\n', (7622, 7630), True, 'import statsmodels.api as sm\n'), ((5773, 5912), 'rnnSMAP.funPost.plotCDF', 'rnnSMAP.funPost.plotCDF', (['statConfLst'], {'ax': 'axes[iFig]', 'legendLst': 'legLst', 'xlabel': '"""Error Exceedance Probablity"""', 'ylabel': 'None', 'showDiff': '"""KS"""'}), "(statConfLst, ax=axes[iFig], legendLst=legLst,\n xlabel='Error Exceedance Probablity', ylabel=None, showDiff='KS')\n", (5796, 5912), False, 'import rnnSMAP\n'), ((7501, 7528), 'numpy.square', 'np.square', (['statSigma.sigmaX'], {}), '(statSigma.sigmaX)\n', (7510, 7528), True, 'import numpy as np\n'), ((4049, 4085), 'numpy.square', 'np.square', (['(dsTemp.LSTM - dsTemp.SMAP)'], {}), '(dsTemp.LSTM - dsTemp.SMAP)\n', (4058, 4085), True, 'import numpy as np\n'), ((4088, 4104), 'numpy.square', 'np.square', (['sigma'], {}), '(sigma)\n', (4097, 4104), True, 'import numpy as np\n'), ((7550, 7562), 'numpy.isnan', 'np.isnan', (['yy'], {}), '(yy)\n', (7558, 7562), True, 'import numpy as np\n'), ((4573, 4598), 'numpy.nanmean', 'np.nanmean', (['sigma'], {'axis': '(1)'}), '(sigma, axis=1)\n', (4583, 4598), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import os
from sklearn import preprocessing
from sklearn.ensemble import RandomForestRegressor
from fbprophet import Prophet
DATA_URL = "https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv"
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(ROOT_DIR, 'data')
DATA_FILE_PATH = os.path.join(DATA_PATH, 'OxCGRT_latest.csv')
ADDITIONAL_CONTEXT_FILE = os.path.join(DATA_PATH, "Additional_Context_Data_Global.csv")
ADDITIONAL_US_STATES_CONTEXT = os.path.join(DATA_PATH, "US_states_populations.csv")
ADDITIONAL_UK_CONTEXT = os.path.join(DATA_PATH, "uk_populations.csv")
NPI_COLUMNS = ['C1_School closing',
'C2_Workplace closing',
'C3_Cancel public events',
'C4_Restrictions on gatherings',
'C5_Close public transport',
'C6_Stay at home requirements',
'C7_Restrictions on internal movement',
'C8_International travel controls',
'H1_Public information campaigns',
'H2_Testing policy',
'H3_Contact tracing',
'H6_Facial Coverings']
CONTEXT_COLUMNS = ['CountryName',
'RegionName',
'GeoID',
'Date',
'ConfirmedCases',
'ConfirmedDeaths',
'Population']
NB_LOOKBACK_DAYS = 21
NB_TEST_DAYS = 14
WINDOW_SIZE = 7
US_PREFIX = "United States / "
NUM_TRIALS = 1
MAX_NB_COUNTRIES = 20
class DataProcessor:
def __init__(self, data_url='data/OxCGRT_latest.csv'):
self.df = self._prepare_dataframe(data_url)
geos = self.df.GeoID.unique()
# print(geos)
self._geo_id_encoder = None
self.geo_id_encoder = geos
self.country_samples = self._create_country_samples(self.df, geos)
def _prepare_dataframe(self, data_url: str) -> pd.DataFrame:
"""
Loads the Oxford dataset, cleans it up and prepares the necessary columns. Depending on options, also
loads the Johns Hopkins dataset and merges that in.
:param data_url: the url containing the original data
:return: a Pandas DataFrame with the historical data
"""
# Original df from Oxford
df = self._load_original_data(data_url)
df = self._attach_population_context_df(df)
# Drop countries with no population data
df.dropna(subset=['Population'], inplace=True)
# Keep only needed columns
columns = CONTEXT_COLUMNS + NPI_COLUMNS
df = df[columns]
# Fill in missing values
self._fill_missing_values(df)
# 从开始有病例算起
# df = df[df['ConfirmedCases'] > 0]
# Compute number of new cases and deaths each day
df['NewCases'] = df.groupby('GeoID').ConfirmedCases.diff().fillna(0)
df['NewDeaths'] = df.groupby('GeoID').ConfirmedDeaths.diff().fillna(0)
# Replace negative values (which do not make sense for these columns) with 0
df['NewCases'] = df['NewCases'].clip(lower=0)
df['NewDeaths'] = df['NewDeaths'].clip(lower=0)
# Compute smoothed versions of new cases and deaths each day
df['SmoothNewCases'] = df.groupby('GeoID')['NewCases'].rolling(
WINDOW_SIZE, center=False).mean().fillna(0).reset_index(0, drop=True)
df['SmoothNewDeaths'] = df.groupby('GeoID')['NewDeaths'].rolling(
WINDOW_SIZE, center=False).mean().fillna(0).reset_index(0, drop=True)
# Compute percent change in new cases and deaths each day
df['CaseRatio'] = df.groupby('GeoID').SmoothNewCases.pct_change(
).fillna(0).replace(np.inf, 0) + 1
df['DeathRatio'] = df.groupby('GeoID').SmoothNewDeaths.pct_change(
).fillna(0).replace(np.inf, 0) + 1
# Add column for proportion of population infected
df['ProportionInfected'] = df['ConfirmedCases'] / df['Population']
# Create column of value to predict
# 最终需要预测的值
df['PredictionRatio'] = df['CaseRatio'] / (1 - df['ProportionInfected'])
return df
@staticmethod
def _load_original_data(data_url):
latest_df = pd.read_csv(data_url,
parse_dates=['Date'],
encoding="ISO-8859-1",
dtype={"RegionName": str,
"RegionCode": str},
error_bad_lines=False)
has_region_countries = latest_df[latest_df['RegionName'].notnull()]['CountryName'].unique()
# ['Brazil', 'United Kingdom', 'United States']
print(has_region_countries)
# GeoID is CountryName / RegionName
# np.where usage: if A then B else C
latest_df["GeoID"] = np.where(latest_df["RegionName"].isnull(),
latest_df["CountryName"],
latest_df["CountryName"] + ' / ' + latest_df["RegionName"])
return latest_df
@staticmethod
def _fill_missing_values(df):
"""
# Fill missing values by interpolation, ffill, and filling NaNs
:param df: Dataframe to be filled
"""
df.update(df.groupby('GeoID').ConfirmedCases.apply(
lambda group: group.interpolate(limit_area='inside')))
# Drop country / regions for which no number of cases is available
df.dropna(subset=['ConfirmedCases'], inplace=True)
df.update(df.groupby('GeoID').ConfirmedDeaths.apply(
lambda group: group.interpolate(limit_area='inside')))
# Drop country / regions for which no number of deaths is available
df.dropna(subset=['ConfirmedDeaths'], inplace=True)
for npi_column in NPI_COLUMNS:
df.update(df.groupby('GeoID')[npi_column].ffill().fillna(0))
@staticmethod
def _attach_population_context_df(df):
# File containing the population for each country
# Note: this file contains only countries population, not regions
additional_context_df = pd.read_csv(ADDITIONAL_CONTEXT_FILE,
usecols=['CountryName', 'Population'])
additional_context_df['GeoID'] = additional_context_df['CountryName']
# US states population
additional_us_states_df = pd.read_csv(ADDITIONAL_US_STATES_CONTEXT,
usecols=['NAME', 'POPESTIMATE2019'])
# Rename the columns to match measures_df ones
additional_us_states_df.rename(columns={'POPESTIMATE2019': 'Population'}, inplace=True)
# Prefix with country name to match measures_df
additional_us_states_df['GeoID'] = US_PREFIX + additional_us_states_df['NAME']
# Append the new data to additional_df
additional_context_df = additional_context_df.append(additional_us_states_df)
# UK population
additional_uk_df = pd.read_csv(ADDITIONAL_UK_CONTEXT)
# Append the new data to additional_df
additional_context_df = additional_context_df.append(additional_uk_df)
# Merge the 2 DataFrames
df = df.merge(additional_context_df, on=['GeoID'], how='left', suffixes=('', '_y'))
return df
@staticmethod
def _create_country_samples(df: pd.DataFrame, geos: list) -> dict:
"""
For each country, creates numpy arrays for Keras
:param df: a Pandas DataFrame with historical data for countries (the "Oxford" dataset)
:param geos: a list of geo names
:return: a dictionary of train and test sets, for each specified country
"""
context_column = 'PredictionRatio'
action_columns = NPI_COLUMNS
outcome_column = 'PredictionRatio'
country_samples = {}
for g in geos:
cdf = df[df.GeoID == g]
cdf = cdf[cdf.ConfirmedCases.notnull()]
context_data = np.array(cdf[context_column])
action_data = np.array(cdf[action_columns])
outcome_data = np.array(cdf[outcome_column])
context_samples = []
action_samples = []
outcome_samples = []
nb_total_days = outcome_data.shape[0]
for d in range(NB_LOOKBACK_DAYS, nb_total_days):
context_samples.append(context_data[d - NB_LOOKBACK_DAYS:d])
action_samples.append(action_data[d - NB_LOOKBACK_DAYS:d])
outcome_samples.append(outcome_data[d])
if len(outcome_samples) > 0:
X_context = np.expand_dims(np.stack(context_samples, axis=0), axis=2)
X_action = np.stack(action_samples, axis=0)
y = np.stack(outcome_samples, axis=0)
country_samples[g] = {
'X_context': X_context,
'X_action': X_action,
'y': y,
'X_train_context': X_context[:-NB_TEST_DAYS],
'X_train_action': X_action[:-NB_TEST_DAYS],
'y_train': y[:-NB_TEST_DAYS],
'X_test_context': X_context[-NB_TEST_DAYS:],
'X_test_action': X_action[-NB_TEST_DAYS:],
'y_test': y[-NB_TEST_DAYS:],
}
return country_samples
@property
def geo_id_encoder(self, ):
return self._geo_id_encoder
@geo_id_encoder.setter
def geo_id_encoder(self, geos):
encoder = preprocessing.LabelEncoder()
encoder.fit(geos)
self._geo_id_encoder = encoder
class RandomForestPredictor:
def __init__(self, processor):
self.model = RandomForestRegressor(max_depth=10, max_features=4, n_estimators=500)
self.data_processor = processor
class ProphetPredictor:
def __init__(self):
m = Prophet(growth='logistic', weekly_seasonality=False)
m.add_regressor()
m.add_seasonality(name='halfly', period=180, fourier_order=5)
m.add_country_holidays('US')
self.model = m
def train(self, df):
df['ds'] = df['Date']
df['y'] = df['NewCases']
self.model.fit(df)
def add_regressor(self, column_name):
pass
if __name__ == '__main__':
data_processor = DataProcessor()
df = data_processor.df
df = df[df['GeoID'] == 'United States / New York']
predictor = ProphetPredictor()
cap = max(df.iloc[0]['Population'] * 0.001, df['NewCases'].max()*1.2)
df['cap'] = cap
df['floor'] = 0
predictor.train(df)
future = predictor.model.make_future_dataframe(periods=30)
print('cap:', cap)
future['cap'] = cap
future['floor'] = 0
future.tail()
forecast = predictor.model.predict(future)
forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail()
fig1 = predictor.model.plot(forecast)
fig1.show()
fig2 = predictor.model.plot_components(forecast)
| [
"sklearn.preprocessing.LabelEncoder",
"sklearn.ensemble.RandomForestRegressor",
"pandas.read_csv",
"os.path.join",
"numpy.array",
"numpy.stack",
"fbprophet.Prophet",
"os.path.abspath"
] | [((337, 367), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""data"""'], {}), "(ROOT_DIR, 'data')\n", (349, 367), False, 'import os\n'), ((385, 429), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""OxCGRT_latest.csv"""'], {}), "(DATA_PATH, 'OxCGRT_latest.csv')\n", (397, 429), False, 'import os\n'), ((456, 517), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""Additional_Context_Data_Global.csv"""'], {}), "(DATA_PATH, 'Additional_Context_Data_Global.csv')\n", (468, 517), False, 'import os\n'), ((549, 601), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""US_states_populations.csv"""'], {}), "(DATA_PATH, 'US_states_populations.csv')\n", (561, 601), False, 'import os\n'), ((626, 671), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""uk_populations.csv"""'], {}), "(DATA_PATH, 'uk_populations.csv')\n", (638, 671), False, 'import os\n'), ((298, 323), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (313, 323), False, 'import os\n'), ((4229, 4369), 'pandas.read_csv', 'pd.read_csv', (['data_url'], {'parse_dates': "['Date']", 'encoding': '"""ISO-8859-1"""', 'dtype': "{'RegionName': str, 'RegionCode': str}", 'error_bad_lines': '(False)'}), "(data_url, parse_dates=['Date'], encoding='ISO-8859-1', dtype={\n 'RegionName': str, 'RegionCode': str}, error_bad_lines=False)\n", (4240, 4369), True, 'import pandas as pd\n'), ((6126, 6201), 'pandas.read_csv', 'pd.read_csv', (['ADDITIONAL_CONTEXT_FILE'], {'usecols': "['CountryName', 'Population']"}), "(ADDITIONAL_CONTEXT_FILE, usecols=['CountryName', 'Population'])\n", (6137, 6201), True, 'import pandas as pd\n'), ((6390, 6468), 'pandas.read_csv', 'pd.read_csv', (['ADDITIONAL_US_STATES_CONTEXT'], {'usecols': "['NAME', 'POPESTIMATE2019']"}), "(ADDITIONAL_US_STATES_CONTEXT, usecols=['NAME', 'POPESTIMATE2019'])\n", (6401, 6468), True, 'import pandas as pd\n'), ((6995, 7029), 'pandas.read_csv', 'pd.read_csv', (['ADDITIONAL_UK_CONTEXT'], {}), '(ADDITIONAL_UK_CONTEXT)\n', (7006, 7029), True, 'import pandas as pd\n'), ((9504, 9532), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (9530, 9532), False, 'from sklearn import preprocessing\n'), ((9687, 9756), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'max_depth': '(10)', 'max_features': '(4)', 'n_estimators': '(500)'}), '(max_depth=10, max_features=4, n_estimators=500)\n', (9708, 9756), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((9859, 9911), 'fbprophet.Prophet', 'Prophet', ([], {'growth': '"""logistic"""', 'weekly_seasonality': '(False)'}), "(growth='logistic', weekly_seasonality=False)\n", (9866, 9911), False, 'from fbprophet import Prophet\n'), ((7979, 8008), 'numpy.array', 'np.array', (['cdf[context_column]'], {}), '(cdf[context_column])\n', (7987, 8008), True, 'import numpy as np\n'), ((8035, 8064), 'numpy.array', 'np.array', (['cdf[action_columns]'], {}), '(cdf[action_columns])\n', (8043, 8064), True, 'import numpy as np\n'), ((8092, 8121), 'numpy.array', 'np.array', (['cdf[outcome_column]'], {}), '(cdf[outcome_column])\n', (8100, 8121), True, 'import numpy as np\n'), ((8693, 8725), 'numpy.stack', 'np.stack', (['action_samples'], {'axis': '(0)'}), '(action_samples, axis=0)\n', (8701, 8725), True, 'import numpy as np\n'), ((8746, 8779), 'numpy.stack', 'np.stack', (['outcome_samples'], {'axis': '(0)'}), '(outcome_samples, axis=0)\n', (8754, 8779), True, 'import numpy as np\n'), ((8623, 8656), 'numpy.stack', 'np.stack', (['context_samples'], {'axis': '(0)'}), '(context_samples, axis=0)\n', (8631, 8656), True, 'import numpy as np\n')] |
"""
Adapted from NiftyNet
"""
import numpy as np
import numpy.ma as ma
DEFAULT_CUTOFF = (0.01, 0.99)
# Functions from NiftyNet
def __compute_percentiles(img, mask, cutoff):
"""
Creates the list of percentile values to be used as landmarks for the
linear fitting.
:param img: Image on which to determine the percentiles
:param mask: Mask to use over the image to constraint to the relevant
information
:param cutoff: Values of the minimum and maximum percentiles to use for
the linear fitting
:return perc_results: list of percentiles value for the given image over
the mask
"""
perc = [cutoff[0],
0.1, 0.2, 0.25, 0.3, 0.4, 0.5, 0.6, 0.7, 0.75, 0.8, 0.9,
cutoff[1]]
masked_img = ma.masked_array(img, np.logical_not(mask)).compressed()
perc_results = np.percentile(masked_img, 100 * np.array(perc))
return perc_results
def __standardise_cutoff(cutoff, type_hist='percentile'):
"""
Standardises the cutoff values given in the configuration
:param cutoff:
:param type_hist: Type of landmark normalisation chosen (median,
quartile, percentile)
:return cutoff: cutoff with appropriate adapted values
"""
cutoff = np.asarray(cutoff)
if cutoff is None:
return DEFAULT_CUTOFF
if len(cutoff) > 2:
cutoff = np.unique([np.min(cutoff), np.max(cutoff)])
if len(cutoff) < 2:
return DEFAULT_CUTOFF
if cutoff[0] > cutoff[1]:
cutoff[0], cutoff[1] = cutoff[1], cutoff[0]
cutoff[0] = max(0., cutoff[0])
cutoff[1] = min(1., cutoff[1])
if type_hist == 'quartile':
cutoff[0] = np.min([cutoff[0], 0.24])
cutoff[1] = np.max([cutoff[1], 0.76])
else:
cutoff[0] = np.min([cutoff[0], 0.09])
cutoff[1] = np.max([cutoff[1], 0.91])
return cutoff
def create_standard_range():
return 0., 100.
def __averaged_mapping(perc_database, s1, s2):
"""
Map the landmarks of the database to the chosen range
:param perc_database: perc_database over which to perform the averaging
:param s1, s2: limits of the mapping range
:return final_map: the average mapping
"""
# assuming shape: n_data_points = perc_database.shape[0]
# n_percentiles = perc_database.shape[1]
slope = (s2 - s1) / (perc_database[:, -1] - perc_database[:, 0])
slope = np.nan_to_num(slope)
final_map = slope.dot(perc_database) / perc_database.shape[0]
intercept = np.mean(s1 - slope * perc_database[:, 0])
final_map = final_map + intercept
return final_map
def normalize(data, landmarks, cutoff=DEFAULT_CUTOFF, masking_function=None):
mapping = landmarks
img = data
image_shape = img.shape
img = img.reshape(-1).astype(np.float32)
if masking_function is not None:
mask = masking_function(img)
else:
mask = np.ones_like(img, dtype=np.bool)
mask = mask.reshape(-1)
range_to_use = [0, 1, 2, 4, 5, 6, 7, 8, 10, 11, 12]
cutoff = __standardise_cutoff(cutoff)
perc = __compute_percentiles(img, mask, cutoff)
# Apply linear histogram standardisation
range_mapping = mapping[range_to_use]
range_perc = perc[range_to_use]
diff_mapping = range_mapping[1:] - range_mapping[:-1]
diff_perc = range_perc[1:] - range_perc[:-1]
# handling the case where two landmarks are the same
# for a given input image. This usually happens when
# image background is not removed from the image.
diff_perc[diff_perc == 0] = np.inf
affine_map = np.zeros([2, len(range_to_use) - 1])
# compute slopes of the linear models
affine_map[0] = diff_mapping / diff_perc
# compute intercepts of the linear models
affine_map[1] = range_mapping[:-1] - affine_map[0] * range_perc[:-1]
bin_id = np.digitize(img, range_perc[1:-1], right=False)
lin_img = affine_map[0, bin_id]
aff_img = affine_map[1, bin_id]
new_img = lin_img * img + aff_img
new_img = new_img.reshape(image_shape)
return new_img
| [
"numpy.mean",
"numpy.ones_like",
"numpy.digitize",
"numpy.logical_not",
"numpy.asarray",
"numpy.max",
"numpy.array",
"numpy.min",
"numpy.nan_to_num"
] | [((1233, 1251), 'numpy.asarray', 'np.asarray', (['cutoff'], {}), '(cutoff)\n', (1243, 1251), True, 'import numpy as np\n'), ((2383, 2403), 'numpy.nan_to_num', 'np.nan_to_num', (['slope'], {}), '(slope)\n', (2396, 2403), True, 'import numpy as np\n'), ((2486, 2527), 'numpy.mean', 'np.mean', (['(s1 - slope * perc_database[:, 0])'], {}), '(s1 - slope * perc_database[:, 0])\n', (2493, 2527), True, 'import numpy as np\n'), ((3807, 3854), 'numpy.digitize', 'np.digitize', (['img', 'range_perc[1:-1]'], {'right': '(False)'}), '(img, range_perc[1:-1], right=False)\n', (3818, 3854), True, 'import numpy as np\n'), ((1648, 1673), 'numpy.min', 'np.min', (['[cutoff[0], 0.24]'], {}), '([cutoff[0], 0.24])\n', (1654, 1673), True, 'import numpy as np\n'), ((1694, 1719), 'numpy.max', 'np.max', (['[cutoff[1], 0.76]'], {}), '([cutoff[1], 0.76])\n', (1700, 1719), True, 'import numpy as np\n'), ((1750, 1775), 'numpy.min', 'np.min', (['[cutoff[0], 0.09]'], {}), '([cutoff[0], 0.09])\n', (1756, 1775), True, 'import numpy as np\n'), ((1796, 1821), 'numpy.max', 'np.max', (['[cutoff[1], 0.91]'], {}), '([cutoff[1], 0.91])\n', (1802, 1821), True, 'import numpy as np\n'), ((2880, 2912), 'numpy.ones_like', 'np.ones_like', (['img'], {'dtype': 'np.bool'}), '(img, dtype=np.bool)\n', (2892, 2912), True, 'import numpy as np\n'), ((868, 882), 'numpy.array', 'np.array', (['perc'], {}), '(perc)\n', (876, 882), True, 'import numpy as np\n'), ((782, 802), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (796, 802), True, 'import numpy as np\n'), ((1357, 1371), 'numpy.min', 'np.min', (['cutoff'], {}), '(cutoff)\n', (1363, 1371), True, 'import numpy as np\n'), ((1373, 1387), 'numpy.max', 'np.max', (['cutoff'], {}), '(cutoff)\n', (1379, 1387), True, 'import numpy as np\n')] |
from __future__ import print_function
from orphics import maps,io,cosmology,symcoupling as sc,stats,lensing
from enlib import enmap,bench
import numpy as np
import os,sys
cache = True
hdv = False
deg = 5
px = 1.5
shape,wcs = maps.rect_geometry(width_deg = deg,px_res_arcmin=px)
mc = sc.LensingModeCoupling(shape,wcs)
pol = "TE"
# for t in mc.integrands['test']:
# print(t['l1'])
# print(t['l2'])
# print(t['other'])
# print("----")
# print(len(mc.integrands['test']))
theory = cosmology.default_theory(lpad=20000)
noise_t = 27.0
noise_p = 40.0*np.sqrt(2.)
fwhm = 7.0
# noise_t = 10.0
# noise_p = 14.0*np.sqrt(2.)
# fwhm = 2.0
kbeam = maps.gauss_beam(fwhm,mc.modlmap)
ells = np.arange(0,3000,1)
lbeam = maps.gauss_beam(fwhm,ells)
ntt = np.nan_to_num((noise_t*np.pi/180./60.)**2./kbeam**2.)
nee = np.nan_to_num((noise_p*np.pi/180./60.)**2./kbeam**2.)
nbb = np.nan_to_num((noise_p*np.pi/180./60.)**2./kbeam**2.)
lntt = np.nan_to_num((noise_t*np.pi/180./60.)**2./lbeam**2.)
lnee = np.nan_to_num((noise_p*np.pi/180./60.)**2./lbeam**2.)
lnbb = np.nan_to_num((noise_p*np.pi/180./60.)**2./lbeam**2.)
ellmin = 20
ellmax = 3000
xmask = maps.mask_kspace(shape,wcs,lmin=ellmin,lmax=ellmax)
ymask = xmask
with bench.show("ALcalc"):
AL = mc.AL(pol,xmask,ymask,ntt,nee,nbb,theory=theory,hdv=hdv,cache=cache)
val = mc.NL_from_AL(AL)
bin_edges = np.arange(10,2000,40)
cents,nkk = stats.bin_in_annuli(val,mc.modlmap,bin_edges)
ls,hunls = np.loadtxt("../alhazen/data/hu_"+pol.lower()+".csv",delimiter=',',unpack=True)
pl = io.Plotter(yscale='log')
pl.add(ells,theory.gCl('kk',ells),lw=3,color='k')
pl.add(cents,nkk,ls="--")
pl.add(ls,hunls*2.*np.pi/4.,ls="-.")
oest = ['TE','ET'] if pol=='TE' else [pol]
ls,nlkks,theory,qest = lensing.lensing_noise(ells,lntt,lnee,lnbb,
ellmin,ellmin,ellmin,
ellmax,ellmax,ellmax,
bin_edges,
theory=theory,
estimators = oest,
unlensed_equals_lensed=False,
width_deg=10.,px_res_arcmin=1.0)
pl.add(ls,nlkks['mv'],ls="-")
with bench.show("ALcalc"):
cross = mc.cross(pol,pol,theory,xmask,ymask,noise_t=ntt,noise_e=nee,noise_b=nbb,
ynoise_t=None,ynoise_e=None,ynoise_b=None,
cross_xnoise_t=None,cross_ynoise_t=None,
cross_xnoise_e=None,cross_ynoise_e=None,
cross_xnoise_b=None,cross_ynoise_b=None,
theory_norm=None,hdv=hdv,save_expression="current",validate=True,cache=True)
# cross = mc.cross(pol,pol,theory,xmask,ymask,noise_t=ntt,noise_e=nee,noise_b=nbb,
# ynoise_t=None,ynoise_e=None,ynoise_b=None,
# cross_xnoise_t=0,cross_ynoise_t=0,
# cross_xnoise_e=0,cross_ynoise_e=0,
# cross_xnoise_b=0,cross_ynoise_b=0,
# theory_norm=None,hdv=hdv,save_expression="current",validate=True,cache=True)
Nlalt = mc.NL(AL,AL,cross)
cents,nkkalt = stats.bin_in_annuli(Nlalt,mc.modlmap,bin_edges)
pl.add(cents,nkkalt,marker="o",alpha=0.2)
pl.done()
print("nffts : ",mc.nfft,mc.nifft)
| [
"orphics.lensing.lensing_noise",
"numpy.sqrt",
"numpy.nan_to_num",
"enlib.bench.show",
"orphics.maps.rect_geometry",
"orphics.maps.mask_kspace",
"orphics.stats.bin_in_annuli",
"orphics.io.Plotter",
"orphics.cosmology.default_theory",
"orphics.symcoupling.LensingModeCoupling",
"orphics.maps.gauss... | [((228, 279), 'orphics.maps.rect_geometry', 'maps.rect_geometry', ([], {'width_deg': 'deg', 'px_res_arcmin': 'px'}), '(width_deg=deg, px_res_arcmin=px)\n', (246, 279), False, 'from orphics import maps, io, cosmology, symcoupling as sc, stats, lensing\n'), ((286, 320), 'orphics.symcoupling.LensingModeCoupling', 'sc.LensingModeCoupling', (['shape', 'wcs'], {}), '(shape, wcs)\n', (308, 320), True, 'from orphics import maps, io, cosmology, symcoupling as sc, stats, lensing\n'), ((498, 534), 'orphics.cosmology.default_theory', 'cosmology.default_theory', ([], {'lpad': '(20000)'}), '(lpad=20000)\n', (522, 534), False, 'from orphics import maps, io, cosmology, symcoupling as sc, stats, lensing\n'), ((655, 688), 'orphics.maps.gauss_beam', 'maps.gauss_beam', (['fwhm', 'mc.modlmap'], {}), '(fwhm, mc.modlmap)\n', (670, 688), False, 'from orphics import maps, io, cosmology, symcoupling as sc, stats, lensing\n'), ((695, 716), 'numpy.arange', 'np.arange', (['(0)', '(3000)', '(1)'], {}), '(0, 3000, 1)\n', (704, 716), True, 'import numpy as np\n'), ((723, 750), 'orphics.maps.gauss_beam', 'maps.gauss_beam', (['fwhm', 'ells'], {}), '(fwhm, ells)\n', (738, 750), False, 'from orphics import maps, io, cosmology, symcoupling as sc, stats, lensing\n'), ((756, 825), 'numpy.nan_to_num', 'np.nan_to_num', (['((noise_t * np.pi / 180.0 / 60.0) ** 2.0 / kbeam ** 2.0)'], {}), '((noise_t * np.pi / 180.0 / 60.0) ** 2.0 / kbeam ** 2.0)\n', (769, 825), True, 'import numpy as np\n'), ((816, 885), 'numpy.nan_to_num', 'np.nan_to_num', (['((noise_p * np.pi / 180.0 / 60.0) ** 2.0 / kbeam ** 2.0)'], {}), '((noise_p * np.pi / 180.0 / 60.0) ** 2.0 / kbeam ** 2.0)\n', (829, 885), True, 'import numpy as np\n'), ((876, 945), 'numpy.nan_to_num', 'np.nan_to_num', (['((noise_p * np.pi / 180.0 / 60.0) ** 2.0 / kbeam ** 2.0)'], {}), '((noise_p * np.pi / 180.0 / 60.0) ** 2.0 / kbeam ** 2.0)\n', (889, 945), True, 'import numpy as np\n'), ((937, 1006), 'numpy.nan_to_num', 'np.nan_to_num', (['((noise_t * np.pi / 180.0 / 60.0) ** 2.0 / lbeam ** 2.0)'], {}), '((noise_t * np.pi / 180.0 / 60.0) ** 2.0 / lbeam ** 2.0)\n', (950, 1006), True, 'import numpy as np\n'), ((998, 1067), 'numpy.nan_to_num', 'np.nan_to_num', (['((noise_p * np.pi / 180.0 / 60.0) ** 2.0 / lbeam ** 2.0)'], {}), '((noise_p * np.pi / 180.0 / 60.0) ** 2.0 / lbeam ** 2.0)\n', (1011, 1067), True, 'import numpy as np\n'), ((1059, 1128), 'numpy.nan_to_num', 'np.nan_to_num', (['((noise_p * np.pi / 180.0 / 60.0) ** 2.0 / lbeam ** 2.0)'], {}), '((noise_p * np.pi / 180.0 / 60.0) ** 2.0 / lbeam ** 2.0)\n', (1072, 1128), True, 'import numpy as np\n'), ((1149, 1203), 'orphics.maps.mask_kspace', 'maps.mask_kspace', (['shape', 'wcs'], {'lmin': 'ellmin', 'lmax': 'ellmax'}), '(shape, wcs, lmin=ellmin, lmax=ellmax)\n', (1165, 1203), False, 'from orphics import maps, io, cosmology, symcoupling as sc, stats, lensing\n'), ((1358, 1381), 'numpy.arange', 'np.arange', (['(10)', '(2000)', '(40)'], {}), '(10, 2000, 40)\n', (1367, 1381), True, 'import numpy as np\n'), ((1392, 1439), 'orphics.stats.bin_in_annuli', 'stats.bin_in_annuli', (['val', 'mc.modlmap', 'bin_edges'], {}), '(val, mc.modlmap, bin_edges)\n', (1411, 1439), False, 'from orphics import maps, io, cosmology, symcoupling as sc, stats, lensing\n'), ((1534, 1558), 'orphics.io.Plotter', 'io.Plotter', ([], {'yscale': '"""log"""'}), "(yscale='log')\n", (1544, 1558), False, 'from orphics import maps, io, cosmology, symcoupling as sc, stats, lensing\n'), ((1739, 1948), 'orphics.lensing.lensing_noise', 'lensing.lensing_noise', (['ells', 'lntt', 'lnee', 'lnbb', 'ellmin', 'ellmin', 'ellmin', 'ellmax', 'ellmax', 'ellmax', 'bin_edges'], {'theory': 'theory', 'estimators': 'oest', 'unlensed_equals_lensed': '(False)', 'width_deg': '(10.0)', 'px_res_arcmin': '(1.0)'}), '(ells, lntt, lnee, lnbb, ellmin, ellmin, ellmin,\n ellmax, ellmax, ellmax, bin_edges, theory=theory, estimators=oest,\n unlensed_equals_lensed=False, width_deg=10.0, px_res_arcmin=1.0)\n', (1760, 1948), False, 'from orphics import maps, io, cosmology, symcoupling as sc, stats, lensing\n'), ((2996, 3045), 'orphics.stats.bin_in_annuli', 'stats.bin_in_annuli', (['Nlalt', 'mc.modlmap', 'bin_edges'], {}), '(Nlalt, mc.modlmap, bin_edges)\n', (3015, 3045), False, 'from orphics import maps, io, cosmology, symcoupling as sc, stats, lensing\n'), ((565, 577), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (572, 577), True, 'import numpy as np\n'), ((1221, 1241), 'enlib.bench.show', 'bench.show', (['"""ALcalc"""'], {}), "('ALcalc')\n", (1231, 1241), False, 'from enlib import enmap, bench\n'), ((2101, 2121), 'enlib.bench.show', 'bench.show', (['"""ALcalc"""'], {}), "('ALcalc')\n", (2111, 2121), False, 'from enlib import enmap, bench\n')] |
import os
import sys
import pickle
import matplotlib.pyplot as plt
import numpy as np
import h5py
import argparse
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from matplotlib.colors import Normalize
sys.path.insert(0, os.getcwd())
from src.utils.helpers import *
import statsmodels.stats.multitest as smm
from gprofiler import GProfiler
import multiprocess as mp
from tqdm import tqdm
import time
from pebble import ProcessPool, ProcessExpired
from concurrent.futures import TimeoutError
GTEx_directory = '/hps/nobackup/research/stegle/users/willj/GTEx'
parser = argparse.ArgumentParser(description='Collection of experiments. Runs on the cluster.')
parser.add_argument('-g', '--group', help='Experiment group', required=True)
parser.add_argument('-n', '--name', help='Experiment name', required=True)
parser.add_argument('-p', '--params', help='Parameters')
parser.add_argument('-l', '--parallel', help='Parallel')
args = vars(parser.parse_args())
group = args['group']
name = args['name']
parameter_key = args['params']
parallel = args['parallel']
class TFCorrectedFeatureAssociations():
@staticmethod
def compute_pvalues():
os.makedirs(GTEx_directory + '/intermediate_results/{}'.format(group), exist_ok=True)
M = 2000
k = 1
print('Computing technical factor corrected associations')
t, a, m, s = parameter_key.split('_')
Y, X, dIDs, filt_tIDs, tfs, ths, t_idx = filter_and_correct_expression_and_image_features(t, m, a, s, M, k, pc_correction=False, tf_correction=True)
N = Y.shape[1]
print('Computing {} x {} = {} associations for: '.format(N, M, N*M), t, a, m, s)
print ("Normalising data")
n_Y = np.zeros_like(Y)
for i in range(1024):
original_feature = Y[:,i]
normalized_feature = normalize_feature(original_feature)
n_Y[:,i] = normalized_feature
res = compute_pearsonR(n_Y, X)
results = [res, filt_tIDs]
pickle.dump(results, open(GTEx_directory + '/intermediate_results/{group}/{name}_{key}.pickle'.format(group=group, name=name, key=parameter_key), 'wb'))
Rs_real, pvs_real, pvs_1 = res
Rs_real[np.isnan(Rs_real)] = 0
sorted_idx = np.argsort(Rs_real.flatten()**2)[::-1]
top10associations = []
for i in range(10):
position = sorted_idx[i]
expected_R = Rs_real.flatten()[sorted_idx[i]]
f, t = np.argwhere(Rs_real == expected_R)[0]
print (f,t)
feature = n_Y[:,f]
transcript = X[:,t]
R, pv = pearsonr(feature, transcript)
assert R == expected_R
transcript_name = get_gene_name(filt_tIDs[t])
association_data = [feature, f, transcript, transcript_name, pv, R]
top10associations.append(association_data)
pickle.dump(top10associations, open(GTEx_directory + '/results/{group}/top10associations_{key}.pickle'.format(group=group, key=parameter_key), 'wb'))
@staticmethod
def top10associations():
t, a, m, s = parameter_key.split('_')
results = pickle.load(open(GTEx_directory + '/intermediate_results/{group}/compute_pvalues_{key}.pickle'.format(group=group, key=parameter_key), 'rb'))
res, filt_tIDs = results
Rs_real, pvs_real, pvs_1 = res
Rs_real[np.isnan(Rs_real)] = 0
sorted_idx = np.argsort(Rs_real.flatten()**2)[::-1]
import pdb; pdb.set_trace()
top10associations = []
for i in range(10):
position = sorted_idx[i]
expected_R = Rs_real.flatten()[sorted_idx[i]]
f, t = np.argwhere(Rs_real == expected_R)[0]
print (f,t)
feature = n_Y[:,f]
transcript = X[:,t]
R, pv = pearsonr(feature, transcript)
assert R == expected_R
transcript_name = get_gene_name(filt_tIDs[t])
association_data = [feature, f, transcript, transcript_name, pv, R]
top10associations.append(association_data)
pickle.dump(top10associations, open(GTEx_directory + '/results/{group}/top10associations_{key}.pickle'.format(group=group, key=parameter_key), 'wb'))
@staticmethod
def associations_across_patchsizes():
import statsmodels.stats.multitest as smm
os.makedirs(GTEx_directory + '/results/{}'.format(group), exist_ok=True)
print ("Loading association data")
association_results, most_varying_feature_idx, filt_transcriptIDs = pickle.load(open(GTEx_directory + '/intermediate_results/TFCorrectedFeatureAssociations/corrected_pvalues.pickle', 'rb'))
SIZES = [128, 256, 512, 1024, 2048, 4096]
ALPHAS = [0.01, 0.001, 0.0001,0.00001]
print ("Calculating Bonferroni significant associations:")
all_counts = []
for alph in ALPHAS:
print ("Alpha: ", alph)
size_counts = []
for s in SIZES:
print ("Patch size: ", s)
pvalues = association_results['{}_{}_{}_{}'.format('Lung','mean','retrained',s)][1].flatten()
counts = sum(smm.multipletests(pvalues, method='bonferroni',alpha=alph)[0])
size_counts.append(counts)
all_counts.append(size_counts)
print ("Saving results")
pickle.dump(all_counts, open(GTEx_directory + '/results/{group}/{name}.pickle'.format(group=group, name=name), 'wb'))
@staticmethod
def associations_raw_vs_retrained():
import statsmodels.stats.multitest as smm
os.makedirs(GTEx_directory + '/results/{}'.format(group), exist_ok=True)
print ("Loading association data")
association_results, most_varying_feature_idx, filt_transcriptIDs = pickle.load(open(GTEx_directory + '/intermediate_results/TFCorrectedFeatureAssociations/corrected_pvalues.pickle', 'rb'))
alpha = 0.0001
SIZES = [128, 256, 512, 1024, 2048, 4096]
MODELS = ['retrained', 'raw']
print ("Calculating Bonferroni significant associations:")
all_counts = []
for m in MODELS:
print ("Model: ", m)
model_counts = []
for s in SIZES:
print ("Patch size: ", s)
pvalues = association_results['{}_{}_{}_{}'.format('Lung','mean',m,s)][1].flatten()
counts = sum(smm.multipletests(pvalues, method='bonferroni',alpha=alpha)[0])
model_counts.append(counts)
all_counts.append(model_counts)
print ("Saving results")
pickle.dump(all_counts, open(GTEx_directory + '/results/{group}/{name}.pickle'.format(group=group, name=name), 'wb'))
@staticmethod
def associations_mean_vs_median():
import statsmodels.stats.multitest as smm
os.makedirs(GTEx_directory + '/results/{}'.format(group), exist_ok=True)
print ("Loading association data")
association_results, most_varying_feature_idx, filt_transcriptIDs = pickle.load(open(GTEx_directory + '/intermediate_results/TFCorrectedFeatureAssociations/corrected_pvalues.pickle', 'rb'))
alpha = 0.0001
SIZES = [128, 256, 512, 1024, 2048, 4096]
AGGREGATIONS = ['mean', 'median']
print ("Calculating Bonferroni significant associations:")
all_counts = []
for a in AGGREGATIONS:
print ("Aggregation: ", a)
aggregation_counts = []
for s in SIZES:
print ("Patch size: ", s)
pvalues = association_results['{}_{}_{}_{}'.format('Lung',a,'retrained',s)][1].flatten()
counts = sum(smm.multipletests(pvalues, method='bonferroni',alpha=alpha)[0])
aggregation_counts.append(counts)
all_counts.append(aggregation_counts)
print ("Saving results")
pickle.dump(all_counts, open(GTEx_directory + '/results/{group}/{name}.pickle'.format(group=group, name=name), 'wb'))
@staticmethod
def features_with_significant_transcripts():
import statsmodels.stats.multitest as smm
os.makedirs(GTEx_directory + '/results/{}'.format(group), exist_ok=True)
print ("Loading association data")
association_results, most_varying_feature_idx, filt_transcriptIDs = pickle.load(open(GTEx_directory + '/intermediate_results/TFCorrectedFeatureAssociations/corrected_pvalues.pickle', 'rb'))
alpha = 0.0001
SIZES = [128, 256, 512, 1024, 2048, 4096]
print ("Calculating Bonferroni significant associations:")
size_counts = []
for s in SIZES:
print ("Patch size: ", s)
pvalues = association_results['{}_{}_{}_{}'.format('Lung','mean','retrained',s)][1]
original_shape = pvalues.shape
counts = sum(np.sum(smm.multipletests(pvalues.flatten(),method='bonferroni',alpha=alpha)[0].reshape(original_shape),axis=1) > 0)
size_counts.append(counts)
print ("Saving results")
pickle.dump(size_counts, open(GTEx_directory + '/results/{group}/{name}.pickle'.format(group=group, name=name), 'wb'))
@staticmethod
def transcripts_with_significant_features():
import statsmodels.stats.multitest as smm
os.makedirs(GTEx_directory + '/results/{}'.format(group), exist_ok=True)
print ("Loading association data")
association_results, most_varying_feature_idx, filt_transcriptIDs = pickle.load(open(GTEx_directory + '/intermediate_results/TFCorrectedFeatureAssociations/corrected_pvalues.pickle', 'rb'))
alpha = 0.0001
SIZES = [128, 256, 512, 1024, 2048, 4096]
print ("Calculating Bonferroni significant associations:")
size_counts = []
for s in SIZES:
print ("Patch size: ", s)
pvalues = association_results['{}_{}_{}_{}'.format('Lung','mean','retrained',s)][1]
original_shape = pvalues.shape
counts = sum(np.sum(smm.multipletests(pvalues.flatten(),method='bonferroni',alpha=alpha)[0].reshape(original_shape),axis=0) > 0)
size_counts.append(counts)
print ("Saving results")
pickle.dump(size_counts, open(GTEx_directory + '/results/{group}/{name}.pickle'.format(group=group, name=name), 'wb'))
if __name__ == '__main__':
eval(group + '().' + name + '()')
| [
"argparse.ArgumentParser",
"statsmodels.stats.multitest.multipletests",
"os.getcwd",
"numpy.argwhere",
"numpy.isnan",
"pdb.set_trace",
"numpy.zeros_like"
] | [((609, 700), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Collection of experiments. Runs on the cluster."""'}), "(description=\n 'Collection of experiments. Runs on the cluster.')\n", (632, 700), False, 'import argparse\n'), ((261, 272), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (270, 272), False, 'import os\n'), ((1747, 1763), 'numpy.zeros_like', 'np.zeros_like', (['Y'], {}), '(Y)\n', (1760, 1763), True, 'import numpy as np\n'), ((3510, 3525), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (3523, 3525), False, 'import pdb\n'), ((2236, 2253), 'numpy.isnan', 'np.isnan', (['Rs_real'], {}), '(Rs_real)\n', (2244, 2253), True, 'import numpy as np\n'), ((3406, 3423), 'numpy.isnan', 'np.isnan', (['Rs_real'], {}), '(Rs_real)\n', (3414, 3423), True, 'import numpy as np\n'), ((2495, 2529), 'numpy.argwhere', 'np.argwhere', (['(Rs_real == expected_R)'], {}), '(Rs_real == expected_R)\n', (2506, 2529), True, 'import numpy as np\n'), ((3702, 3736), 'numpy.argwhere', 'np.argwhere', (['(Rs_real == expected_R)'], {}), '(Rs_real == expected_R)\n', (3713, 3736), True, 'import numpy as np\n'), ((5198, 5257), 'statsmodels.stats.multitest.multipletests', 'smm.multipletests', (['pvalues'], {'method': '"""bonferroni"""', 'alpha': 'alph'}), "(pvalues, method='bonferroni', alpha=alph)\n", (5215, 5257), True, 'import statsmodels.stats.multitest as smm\n'), ((6432, 6492), 'statsmodels.stats.multitest.multipletests', 'smm.multipletests', (['pvalues'], {'method': '"""bonferroni"""', 'alpha': 'alpha'}), "(pvalues, method='bonferroni', alpha=alpha)\n", (6449, 6492), True, 'import statsmodels.stats.multitest as smm\n'), ((7694, 7754), 'statsmodels.stats.multitest.multipletests', 'smm.multipletests', (['pvalues'], {'method': '"""bonferroni"""', 'alpha': 'alpha'}), "(pvalues, method='bonferroni', alpha=alpha)\n", (7711, 7754), True, 'import statsmodels.stats.multitest as smm\n')] |
# -*- coding: utf-8 -*-
"""
Extract slides from course video
Method: detect frame difference
Pckage need to be installed:
opencv:
opt 1: conda install -c menpo opencv
opt 2: conda install -c conda-forge opencv
<NAME>, 2020-04-15
"""
import os
import cv2
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import glob
from pathlib import Path
import img2pdf
import argparse
# setup parameters
pattern = 'slide-{0:03}.jpg'
blk_size = 500
def showimg(img):
image = Image.fromarray(img, 'RGB');
image.show()
def imgs2gif(imgs, gifname):
images = [Image.open(img).convert('RGB') for img in imgs]
images[0].save(gifname, save_all=True, append_images=images[1:])
def imgs2pdf(imgs, pdfname, verbose=True):
with open(pdfname, 'wb') as f:
f.write(img2pdf.convert(imgs))
if verbose:
print('wrote images to {}'.format(pdfname))
def plot_diff(diffs, pngname, verbose=True):
plt.plot(diffs)
plt.xlabel('frame index (1 frame / sec)')
plt.ylabel('mean difference')
plt.title('frame differences')
plt.savefig(pngname)
#diffs_sorted = sorted(diffs)[::-1]
#plt.plot(diffs_sorted)
if verbose:
print('wrote diff plot to {}'.format(pngname))
def extract_slides(video_path, pdfname):
"""
check frames at rate of 1 frame per second, and if diff between previous and
current frame is greater than 0, extract that frame as slide, merge all
extracted slide images into pdf file.
"""
# get output dir
output_dir = os.path.splitext(video_path)[0]
# get video file handler
vidcap = cv2.VideoCapture(video_path)
nframes = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = vidcap.get(cv2.CAP_PROP_FPS)
idxs = list(range(0,nframes,int(fps))) # get 1 image per second
nidxs = len(idxs)
# read the first image
success, img1 = vidcap.read()
#showimg(img1)
#height, width = img1.shape[:2]
# write the first slide
nslides = 0 # count #slides extracted
print('writing slide {} (frame {}) ...'.format(nslides, 0))
output_path = os.path.join(output_dir, pattern.format(nslides))
cv2.imwrite(output_path, img1);
diffs = []
for i in range(1,nidxs):
# track status
if i % blk_size == 1:
lower, upper = i, min(i+blk_size-1, nidxs)
print('processing: {}/{} ~ {}/{} ...'.format(lower, nidxs, upper, nidxs))
# extract frame with specific frame index
vidcap.set(cv2.CAP_PROP_POS_FRAMES, idxs[i])
sucess, img2 = vidcap.read()
#showimg(img2)
# pass black screen
if np.max(img2) <= 1:
continue
# write frame as slide if mean diff > 0
# note: np.mean() != sum(sum())/(width x height)
diff = np.mean(abs(img1 - img2))
if diff > 0:
nslides += 1
print('writing slide {} (frame {}) ...'.format(nslides, idxs[i]))
output_path = os.path.join(output_dir, pattern.format(nslides))
cv2.imwrite(output_path, img2);
# post-processing
diffs.append(diff)
img1 = img2[:]
# get smallest non-zero diff value (diff between the 2 most similar slides)
diffs_no_zeros = [d for d in diffs if d!=0]
print('smallest non-zero diff: {}'.format(min(diffs_no_zeros)))
# plot and save diff plot
pngname = os.path.join(output_dir, 'diff.png')
plot_diff(diffs, pngname)
# merge slide images into pdf file
imgs = glob.glob(os.path.join(output_dir, 'slide*.jpg'))
imgs2pdf(imgs, pdfname)
def parse_args():
usage = "usage: extract slides from video frames by comparing the difference" \
+ " within the adjacent frames"
parser = argparse.ArgumentParser(description=usage)
parser.add_argument('--video-dir', default=os.getcwd())
return parser.parse_args()
def main():
args = parse_args()
# setup dir and file path (paramenters need to be specified)
video_dir = args.video_dir
#video_dir = r'C:\Users\zge\Dropbox\Video\Courses\edX_IBM_DeepLearning2'
print('processing videos in video dir: {}'.format(video_dir))
video_paths = glob.glob(os.path.join(video_dir, '*.mp4'))
nvideos = len(video_paths)
for i, video_path in enumerate(video_paths):
print('[{}/{}] processing {} ...'.format(i+1, nvideos, video_path))
# specify the output dir
output_dir = os.path.splitext(video_path)[0]
if not os.path.isdir(output_dir):
print('creating dir: {}'.format(output_dir))
os.makedirs(output_dir)
# get the target pdf name
pdfname = os.path.join(str(Path(output_dir).parent),
'{}.pdf'.format(os.path.basename(output_dir)))
# extract slides if the target pdf file does not exist
if not os.path.isfile(pdfname):
extract_slides(video_path, pdfname)
else:
print('{} already exist, skip!'.format(pdfname))
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.ylabel",
"argparse.ArgumentParser",
"pathlib.Path",
"img2pdf.convert",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"os.path.isdir",
"matplotlib.pyplot.savefig",
"os.path.splitext",
"os.path.isfile",
"matplotlib.pyplot.title",
"cv2.imwrite",
"PIL.Im... | [((502, 529), 'PIL.Image.fromarray', 'Image.fromarray', (['img', '"""RGB"""'], {}), "(img, 'RGB')\n", (517, 529), False, 'from PIL import Image\n'), ((925, 940), 'matplotlib.pyplot.plot', 'plt.plot', (['diffs'], {}), '(diffs)\n', (933, 940), True, 'import matplotlib.pyplot as plt\n'), ((943, 984), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""frame index (1 frame / sec)"""'], {}), "('frame index (1 frame / sec)')\n", (953, 984), True, 'import matplotlib.pyplot as plt\n'), ((987, 1016), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mean difference"""'], {}), "('mean difference')\n", (997, 1016), True, 'import matplotlib.pyplot as plt\n'), ((1019, 1049), 'matplotlib.pyplot.title', 'plt.title', (['"""frame differences"""'], {}), "('frame differences')\n", (1028, 1049), True, 'import matplotlib.pyplot as plt\n'), ((1052, 1072), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pngname'], {}), '(pngname)\n', (1063, 1072), True, 'import matplotlib.pyplot as plt\n'), ((1555, 1583), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (1571, 1583), False, 'import cv2\n'), ((2067, 2097), 'cv2.imwrite', 'cv2.imwrite', (['output_path', 'img1'], {}), '(output_path, img1)\n', (2078, 2097), False, 'import cv2\n'), ((3177, 3213), 'os.path.join', 'os.path.join', (['output_dir', '"""diff.png"""'], {}), "(output_dir, 'diff.png')\n", (3189, 3213), False, 'import os\n'), ((3514, 3556), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'usage'}), '(description=usage)\n', (3537, 3556), False, 'import argparse\n'), ((1484, 1512), 'os.path.splitext', 'os.path.splitext', (['video_path'], {}), '(video_path)\n', (1500, 1512), False, 'import os\n'), ((3299, 3337), 'os.path.join', 'os.path.join', (['output_dir', '"""slide*.jpg"""'], {}), "(output_dir, 'slide*.jpg')\n", (3311, 3337), False, 'import os\n'), ((3939, 3971), 'os.path.join', 'os.path.join', (['video_dir', '"""*.mp4"""'], {}), "(video_dir, '*.mp4')\n", (3951, 3971), False, 'import os\n'), ((792, 813), 'img2pdf.convert', 'img2pdf.convert', (['imgs'], {}), '(imgs)\n', (807, 813), False, 'import img2pdf\n'), ((2495, 2507), 'numpy.max', 'np.max', (['img2'], {}), '(img2)\n', (2501, 2507), True, 'import numpy as np\n'), ((2848, 2878), 'cv2.imwrite', 'cv2.imwrite', (['output_path', 'img2'], {}), '(output_path, img2)\n', (2859, 2878), False, 'import cv2\n'), ((3602, 3613), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3611, 3613), False, 'import os\n'), ((4170, 4198), 'os.path.splitext', 'os.path.splitext', (['video_path'], {}), '(video_path)\n', (4186, 4198), False, 'import os\n'), ((4213, 4238), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (4226, 4238), False, 'import os\n'), ((4297, 4320), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (4308, 4320), False, 'import os\n'), ((4533, 4556), 'os.path.isfile', 'os.path.isfile', (['pdfname'], {}), '(pdfname)\n', (4547, 4556), False, 'import os\n'), ((588, 603), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (598, 603), False, 'from PIL import Image\n'), ((4431, 4459), 'os.path.basename', 'os.path.basename', (['output_dir'], {}), '(output_dir)\n', (4447, 4459), False, 'import os\n'), ((4383, 4399), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (4387, 4399), False, 'from pathlib import Path\n')] |
from typing import Callable, Any, List
import numpy as np
import math
import autograd
from .dataset import Dataset
class DataLoader(object):
"""
Dataloader class
"""
def __init__(self, dataset: Dataset, batch_size: int = 1, shuffle: bool = True,
collate_fn: Callable[[List], Any] = None) -> None:
"""
Constructor
:param dataset: (Dataset) Dataset
:param batch_size: (int) Batch size to be utilized
:param shuffle: (bool) If true dataset is shuffled
:param collate_fn: (Callable) Function to perform batching
"""
# Check parameter
assert batch_size > 0, 'Batch size must be bigger than 0.'
# Save parameters
self.dataset = dataset
self.dataset_len = len(dataset)
self.batch_size = batch_size
self.shuffle = shuffle
self.collate_fn = collate_fn
# Make indexes
self.indexes = np.arange(self.dataset_len)
# Shuffle indexes if utilized
if self.shuffle:
np.random.shuffle(self.indexes)
def __len__(self) -> int:
"""
Returns the length of the dataloader
:return: (int) Length
"""
return len(self.dataset) // self.batch_size
def __iter__(self) -> Any:
"""
Iter method iterates over the whole dataset and batches the dataset output
:return: (Any) Batch objects
"""
for index in range(math.ceil(self.dataset_len / self.batch_size)):
for batch in range(self.batch_size):
if index * self.batch_size + batch < self.dataset_len:
if batch == 0:
instances = self.dataset[self.indexes[index * self.batch_size + batch]]
return_values = list(instances)
for index in range(len(return_values)):
return_values[index] = [return_values[index]]
else:
instances = self.dataset[self.indexes[index * self.batch_size + batch]]
for index, instance in enumerate(instances):
return_values[index].append(instance)
# Apply collate operation
if self.collate_fn is None:
yield tuple([autograd.stack(return_values[index]) for index in range(len(return_values))])
else:
yield self.collate_fn(return_values)
| [
"autograd.stack",
"math.ceil",
"numpy.arange",
"numpy.random.shuffle"
] | [((947, 974), 'numpy.arange', 'np.arange', (['self.dataset_len'], {}), '(self.dataset_len)\n', (956, 974), True, 'import numpy as np\n'), ((1050, 1081), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indexes'], {}), '(self.indexes)\n', (1067, 1081), True, 'import numpy as np\n'), ((1467, 1512), 'math.ceil', 'math.ceil', (['(self.dataset_len / self.batch_size)'], {}), '(self.dataset_len / self.batch_size)\n', (1476, 1512), False, 'import math\n'), ((2324, 2360), 'autograd.stack', 'autograd.stack', (['return_values[index]'], {}), '(return_values[index])\n', (2338, 2360), False, 'import autograd\n')] |
import numpy as np
from yellowbrick.cluster import KElbowVisualizer
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
class AnalyzerSNR:
def __init__(self, data):
self.data = data
self.snr = None
self.k_clusters = None
self.snr_cutoff = None
self.clustered = None
self.cluster_indices_by_snr = None
def get_snr(self, plot=False):
""" Given a single trial, compute the SNR image for this trial """
self.snr = np.mean(self.data, axis=2) / np.std(self.data, axis=2)
if plot:
plt.imshow(self.snr, cmap='jet', interpolation='nearest')
plt.show()
return self.snr
def cluster_on_snr(self, k_clusters=3, snr_cutoff=0.7, plot=False):
""" Perform 1-D clustering on SNR after masking out the pixels
whose snr is below snr_cutoff (a percentile in range [0,1]) """
self.k_clusters = k_clusters
self.snr_cutoff = np.percentile(self.snr, snr_cutoff * 100)
if self.snr is None:
raise ValueError("No SNR data found.")
mask = (self.snr >= self.snr_cutoff).astype(np.float)
if plot:
# masked image: reasonability check
plt.imshow(self.snr * mask, cmap='jet', interpolation='nearest')
plt.show()
# +1 for the masked 0's
snr_copy = self.snr
snr_orig_shape = self.snr.shape
km = KMeans(n_clusters=k_clusters+1).fit(self.snr.reshape(-1,1))
self.clustered = np.array(km.labels_).reshape(self.snr.shape) + 1
self.clustered = self.clustered.astype(np.int)
self.snr.reshape(snr_orig_shape)
if plot:
plt.imshow(self.clustered * mask, cmap='viridis', interpolation='nearest')
plt.show()
return self.clustered
def get_average_snr_by_cluster(self):
""" Returns a list of average SNR values by cluster, where
the float at index i is the average SNR for cluster i+1 """
if self.k_clusters is None:
raise ValueError("must call method cluster_on_snr() before getting average SNRs for clusters")
return [np.average(self.snr[np.where(self.clustered==i)[0]])
for i in range(1, self.k_clusters+2)]
def get_kth_cluster(self, k, plot=False):
"""
Returns iterable of indexes of pixels in the kth cluster
(k=0,...,k_clusters)
"""
if self.k_clusters is None:
raise ValueError("must call method cluster_on_snr() before getting kth cluster")
if k > self.k_clusters:
raise ValueError("k is greater than number of clusters")
# sort clusters by SNR (which can differ from cluster label)
if self.cluster_indices_by_snr is None:
# SNR by cluster
avg_snr_by_cluster = self.get_average_snr_by_cluster()
self.cluster_indices_by_snr = np.argsort(np.array(avg_snr_by_cluster)) + 1
k_selection = self.cluster_indices_by_snr[-1-k]
mask = (self.snr >= self.snr_cutoff).astype(np.float)
# Select the pixels in this SNR cluster, above SNR cutoff
arg_selection = np.stack(np.where(self.clustered * mask == k_selection))
if plot:
for i in range(arg_selection.shape[1]):
x_max = arg_selection[0][i]
y_max = arg_selection[1][i]
mask[x_max, y_max] *= 3 # highlight
plt.imshow(self.clustered * mask, cmap='jet', interpolation='nearest')
plt.show()
return arg_selection
def get_silhouette_score(self, plot_elbow=True):
""" Return silhouette score and plot Elbow plot for this K-means clustering """
raise NotImplementedError
print("Silhouette score:", silhouette_score(features, label))
# Instantiate a scikit-learn K-Means model
model = KMeans(random_state=0)
# Instantiate the KElbowVisualizer with the number of clusters and the metric
visualizer = KElbowVisualizer(model, k=(2,6), metric='silhouette', timings=False)
# Fit the data and visualize
visualizer.fit(features)
visualizer.poof()
| [
"sklearn.cluster.KMeans",
"numpy.mean",
"matplotlib.pyplot.imshow",
"numpy.where",
"numpy.array",
"yellowbrick.cluster.KElbowVisualizer",
"numpy.std",
"numpy.percentile",
"sklearn.metrics.silhouette_score",
"matplotlib.pyplot.show"
] | [((1129, 1170), 'numpy.percentile', 'np.percentile', (['self.snr', '(snr_cutoff * 100)'], {}), '(self.snr, snr_cutoff * 100)\n', (1142, 1170), True, 'import numpy as np\n'), ((4215, 4237), 'sklearn.cluster.KMeans', 'KMeans', ([], {'random_state': '(0)'}), '(random_state=0)\n', (4221, 4237), False, 'from sklearn.cluster import KMeans\n'), ((4347, 4416), 'yellowbrick.cluster.KElbowVisualizer', 'KElbowVisualizer', (['model'], {'k': '(2, 6)', 'metric': '"""silhouette"""', 'timings': '(False)'}), "(model, k=(2, 6), metric='silhouette', timings=False)\n", (4363, 4416), False, 'from yellowbrick.cluster import KElbowVisualizer\n'), ((626, 652), 'numpy.mean', 'np.mean', (['self.data'], {'axis': '(2)'}), '(self.data, axis=2)\n', (633, 652), True, 'import numpy as np\n'), ((655, 680), 'numpy.std', 'np.std', (['self.data'], {'axis': '(2)'}), '(self.data, axis=2)\n', (661, 680), True, 'import numpy as np\n'), ((719, 776), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.snr'], {'cmap': '"""jet"""', 'interpolation': '"""nearest"""'}), "(self.snr, cmap='jet', interpolation='nearest')\n", (729, 776), True, 'import matplotlib.pyplot as plt\n'), ((789, 799), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (797, 799), True, 'import matplotlib.pyplot as plt\n'), ((1412, 1476), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(self.snr * mask)'], {'cmap': '"""jet"""', 'interpolation': '"""nearest"""'}), "(self.snr * mask, cmap='jet', interpolation='nearest')\n", (1422, 1476), True, 'import matplotlib.pyplot as plt\n'), ((1489, 1499), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1497, 1499), True, 'import matplotlib.pyplot as plt\n'), ((1913, 1987), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(self.clustered * mask)'], {'cmap': '"""viridis"""', 'interpolation': '"""nearest"""'}), "(self.clustered * mask, cmap='viridis', interpolation='nearest')\n", (1923, 1987), True, 'import matplotlib.pyplot as plt\n'), ((2000, 2010), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2008, 2010), True, 'import matplotlib.pyplot as plt\n'), ((3480, 3526), 'numpy.where', 'np.where', (['(self.clustered * mask == k_selection)'], {}), '(self.clustered * mask == k_selection)\n', (3488, 3526), True, 'import numpy as np\n'), ((3760, 3830), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(self.clustered * mask)'], {'cmap': '"""jet"""', 'interpolation': '"""nearest"""'}), "(self.clustered * mask, cmap='jet', interpolation='nearest')\n", (3770, 3830), True, 'import matplotlib.pyplot as plt\n'), ((3843, 3853), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3851, 3853), True, 'import matplotlib.pyplot as plt\n'), ((4112, 4145), 'sklearn.metrics.silhouette_score', 'silhouette_score', (['features', 'label'], {}), '(features, label)\n', (4128, 4145), False, 'from sklearn.metrics import silhouette_samples, silhouette_score\n'), ((1626, 1659), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(k_clusters + 1)'}), '(n_clusters=k_clusters + 1)\n', (1632, 1659), False, 'from sklearn.cluster import KMeans\n'), ((1721, 1741), 'numpy.array', 'np.array', (['km.labels_'], {}), '(km.labels_)\n', (1729, 1741), True, 'import numpy as np\n'), ((3210, 3238), 'numpy.array', 'np.array', (['avg_snr_by_cluster'], {}), '(avg_snr_by_cluster)\n', (3218, 3238), True, 'import numpy as np\n'), ((2415, 2444), 'numpy.where', 'np.where', (['(self.clustered == i)'], {}), '(self.clustered == i)\n', (2423, 2444), True, 'import numpy as np\n')] |
import numpy as np
def predict_density(model, Xtest, feature=0, data_gen_fun=None, mean=False, density=False, burnin=0, MC=None, save=None):
"""
Predict the density and compute error for a DP or EDP model for the Isotropic example, where the data generating
function is a function of the mean of the input samples.
:param model: EDP or DP model
:param Xtest: The matrix of test covariates
:param feature: The feature to plot, (one dimensional example so default 0)
:param data_gen_fun: The true data generating function
:param mean: Boolean for if we should plot the model predictive mean
:param density: Boolean for if we should plot the model predictive density
:param burnin: The burn-in used from the MCMC chain (that must already have been ran)
:param MC: How far along the chain to go, if None defaults to the full chain
:param save: Save or plot the results
:return:
"""
try:
import matplotlib.pyplot as plt
except:
raise ImportError('cannot import matplotlib')
boundl = np.min(model.y[:, feature])
boundu = np.max(model.y[:, feature])
numStates = len(model.states)
assert numStates > 0, 'plot_predict: Need to iterate over points at least once.' # otherwise theta == 0
assert model.ydim > feature, 'plot_predict: Cannot plot feature {0} as we have {1} features'.format(
feature, model.ydim)
# data generating function
if data_gen_fun is not None:
plt.plot(np.mean(Xtest, axis=1), data_gen_fun(Xtest, 0), 'k--')
# mean predictions of the state
if mean is True:
M = model._predict_expectation(Xtest, burnin=burnin, MC=MC)
plt.plot(np.mean(Xtest, axis=1), M[feature], 'b')
if density is True:
[Xgrid, Ygrid, D] = model._predict_density(Xtest, burnin=burnin, MC=MC)
Dgrid = D[feature]
plt.pcolormesh(np.asarray(Xgrid), Ygrid, Dgrid, cmap='Reds')
plt.colorbar()
boundl = np.min(Ygrid[:, 0])
boundu = np.max(Ygrid[:, 0])
# scatter plot data points
plt.scatter([np.mean(model.x, axis=1)], [model.y[:, feature]], c='k')
plt.axis([np.min(model.x), np.max(model.x), boundl, boundu])
plt.title('Monte Carlo predictive density')
plt.xlabel('x mean')
plt.ylabel('y_{0}'.format(feature))
if save is not None:
plt.savefig(save)
else:
plt.show()
plt.close() | [
"numpy.mean",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"numpy.asarray",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.min",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((1292, 1319), 'numpy.min', 'np.min', (['model.y[:, feature]'], {}), '(model.y[:, feature])\n', (1298, 1319), True, 'import numpy as np\n'), ((1333, 1360), 'numpy.max', 'np.max', (['model.y[:, feature]'], {}), '(model.y[:, feature])\n', (1339, 1360), True, 'import numpy as np\n'), ((2434, 2477), 'matplotlib.pyplot.title', 'plt.title', (['"""Monte Carlo predictive density"""'], {}), "('Monte Carlo predictive density')\n", (2443, 2477), True, 'import matplotlib.pyplot as plt\n'), ((2482, 2502), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x mean"""'], {}), "('x mean')\n", (2492, 2502), True, 'import matplotlib.pyplot as plt\n'), ((2629, 2640), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2638, 2640), True, 'import matplotlib.pyplot as plt\n'), ((2169, 2183), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2181, 2183), True, 'import matplotlib.pyplot as plt\n'), ((2201, 2220), 'numpy.min', 'np.min', (['Ygrid[:, 0]'], {}), '(Ygrid[:, 0])\n', (2207, 2220), True, 'import numpy as np\n'), ((2238, 2257), 'numpy.max', 'np.max', (['Ygrid[:, 0]'], {}), '(Ygrid[:, 0])\n', (2244, 2257), True, 'import numpy as np\n'), ((2577, 2594), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save'], {}), '(save)\n', (2588, 2594), True, 'import matplotlib.pyplot as plt\n'), ((2613, 2623), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2621, 2623), True, 'import matplotlib.pyplot as plt\n'), ((1721, 1743), 'numpy.mean', 'np.mean', (['Xtest'], {'axis': '(1)'}), '(Xtest, axis=1)\n', (1728, 1743), True, 'import numpy as np\n'), ((1919, 1941), 'numpy.mean', 'np.mean', (['Xtest'], {'axis': '(1)'}), '(Xtest, axis=1)\n', (1926, 1941), True, 'import numpy as np\n'), ((2115, 2132), 'numpy.asarray', 'np.asarray', (['Xgrid'], {}), '(Xgrid)\n', (2125, 2132), True, 'import numpy as np\n'), ((2307, 2331), 'numpy.mean', 'np.mean', (['model.x'], {'axis': '(1)'}), '(model.x, axis=1)\n', (2314, 2331), True, 'import numpy as np\n'), ((2379, 2394), 'numpy.min', 'np.min', (['model.x'], {}), '(model.x)\n', (2385, 2394), True, 'import numpy as np\n'), ((2396, 2411), 'numpy.max', 'np.max', (['model.x'], {}), '(model.x)\n', (2402, 2411), True, 'import numpy as np\n')] |
# Tutorial "Regresion Basica: Predecir eficiencia de gasolina"
# https://www.tensorflow.org/tutorials/keras/regression?hl=es-419
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np # noqa: E402
# from scipy import stats # noqa: E402
import matplotlib.pyplot as plt # noqa: E402
import pandas as pd # noqa: E402
# import tensorflow as tf # noqa: E402
from keras import layers, backend # noqa: E402
from keras.utils.vis_utils import plot_model # noqa: E402
from tensorflow import keras # noqa: E402
from keras_visualizer import visualizer # noqa: E402
output_dir = ""
def c_mae(p_true, p_pred):
"""
https://stackoverflow.com/questions/69240680/how-to-get-mean-absolute-errors-mae-for-deep-learning-model
:param p_true: original target values
:param p_pred: predicted values
:return: MAE
"""
return np.mean(np.abs(p_pred - p_true)) # -1 is correct, using None gives different result '''
def c_mse(p_true, p_pred):
"""
https://stackoverflow.com/questions/69240680/how-to-get-mean-absolute-errors-mae-for-deep-learning-model
:param p_true: original target values
:param p_pred: predicted values
:return: MSE
"""
return np.mean(np.square(p_pred - p_true))
def c_determination(p_true, p_pred):
"""
Original posted in: https://jmlb.github.io/ml/2017/03/20/CoeffDetermination_CustomMetric4Keras/
:param p_true: original target values
:param p_pred: predicted values
:return: R^2 coefficient
"""
ss_res = np.sum(np.square(p_true - p_pred))
ss_tot = np.sum(np.square(p_true - np.mean(p_pred)))
return 1 - ss_res / (ss_tot + backend.epsilon())
def preprocess_data(p_x, p_stats):
"""
Make data standard/normal.
More info: https://dataakkadian.medium.com/standardization-vs-normalization-da7a3a308c64
https://www.kdnuggets.com/2020/04/data-transformation-standardization-normalization.html
:param p_x: input data
:param p_stats: input data statistics
:return: standardized and normalized data
"""
p_standardized = (p_x - p_stats["mean"]) / p_stats["std"] # mean = 0, std = 1
p_normalized = (p_x - p_stats["min"]) / (p_stats["max"] - p_stats["min"]) # range 0-1
return p_standardized, p_normalized
def build_rnn(p_input, p_output):
"""
Build Keras Recurrent Neural Network model
:param p_input: input size
:param p_output: output size
:return: model
"""
n_inter = np.abs(p_input - p_output) / 2
p_model = keras.Sequential([
layers.GRU(int(p_output + np.ceil(n_inter)), activation='linear', input_shape=(2, p_input)),
layers.Dense(p_output, activation='relu')
])
p_model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mae', 'mse'])
print_model(p_model)
return p_model
def build_ffnn(p_input, p_output):
"""
Build Keras Feed-Forward Neural Network model
:param p_input: input size
:param p_output: output size
:return: model
"""
n_inter = int(np.abs(p_input - p_output) / 2)
p_model = keras.Sequential([
layers.Dense(p_output + np.ceil(n_inter), activation='linear', input_shape=[p_input]),
layers.Dense(p_output, activation='relu')
])
# optimizer = tf.keras.optimizers.RMSprop(0.001)
optimizer = keras.optimizers.Adam(learning_rate=0.001)
p_model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse'])
print_model(p_model)
return p_model
def print_model(p_model):
"""
Print Keras model to node graph
:param p_model: Keras model
:return:
"""
try:
plot_model(p_model, to_file=output_dir + "/model_scheme.png", show_shapes=True,
show_layer_names=True)
visualizer(p_model, filename=output_dir + "/model_graph", format='png')
except Exception as ex:
print(ex)
class PrintDot(keras.callbacks.Callback):
"""
Display training progress by printing a single dot for each completed epoch
"""
@staticmethod
def on_epoch_end(epoch, _logs):
print('') if epoch % 100 == 0 else 0
print('.', end='')
def plot_history(p_history, p_k):
"""
Plot epoch history (MAE and MSE)
:param p_history: input registers
:param p_k: climatic zone
:return:
"""
hist = pd.DataFrame(p_history.history)
hist['epoch'] = p_history.epoch
fig, (ax1, ax2) = plt.subplots(1, 2)
plt.setp(ax1, xlabel="Epoch", ylabel="Mean Abs Error [PCI]")
plt.setp(ax2, xlabel="Epoch", ylabel="Mean Square Error [$PCI^2$]")
ax1.plot(hist['epoch'], hist['mae'], label='Train Error')
ax1.plot(hist['epoch'], hist['val_mae'], label='Val Error')
ax1.legend()
ax1.set_xlim([min(hist['epoch']), max(hist['epoch'])])
ax1.set_ylim([min(hist['val_mae']), max(hist['val_mae'])])
ax2.plot(hist['epoch'], hist['mse'], label='Train Error')
ax2.plot(hist['epoch'], hist['val_mse'], label='Val Error')
ax2.legend()
ax2.set_xlim([min(hist['epoch']), max(hist['epoch'])])
ax2.set_ylim([min(hist['val_mse']), max(hist['val_mse'])])
plt.tight_layout()
plt.savefig(output_dir + "/model_history_" + p_k + ".png")
plt.clf()
def plot_data(p_1, p_2, p_3, p_4, p_5, p_6, p_k):
"""
:param p_1:
:param p_2:
:param p_3:
:param p_4:
:param p_5:
:param p_6:
:param p_k:
:return:
"""
fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2)
plt.setp(ax1, xlabel="Train (PCI)", ylabel="Count")
plt.setp(ax2, xlabel="Test (PCI)", ylabel="Count")
plt.setp(ax3, xlabel="Train standard (PCI)", ylabel="Count")
plt.setp(ax4, xlabel="Test standard (PCI)", ylabel="Count")
plt.setp(ax5, xlabel="Train normal (PCI)", ylabel="Count")
plt.setp(ax6, xlabel="Test normal (PCI)", ylabel="Count")
n, bins, _ = ax1.hist(p_1["PCI"], bins=25, rwidth=0.8)
# density = stats.gaussian_kde(p_4["PCI"])
# ax4.plot(bins, density(bins) * max(n) / max(density(bins)), "r-")
n, bins, _ = ax2.hist(p_2["PCI"], bins=25, rwidth=0.8)
n, bins, _ = ax3.hist(p_3["PCI"], bins=25, rwidth=0.8)
n, bins, _ = ax4.hist(p_4["PCI"], bins=25, rwidth=0.8)
n, bins, _ = ax5.hist(p_5["PCI"], bins=25, rwidth=0.8)
n, bins, _ = ax6.hist(p_6["PCI"], bins=25, rwidth=0.8)
plt.tight_layout()
plt.savefig(output_dir + "/model_train_" + p_k + ".png")
plt.clf()
def plot_evaluation(p_test_predictions, p_test_labels, p_k):
"""
Plot error evaluation
:param p_test_predictions: input predicted data
:param p_test_labels: input actual data
:param p_k: climatic zone
:return:
"""
fig, (ax1, ax2) = plt.subplots(1, 2)
plt.setp(ax1, xlabel="True Values [PCI_F]", ylabel="Predictions [PCI_F]")
plt.setp(ax2, xlabel="Prediction Error [PCI_F]", ylabel="Count")
# print(np.shape(test_labels["PCI_F"]), np.shape(test_predictions))
ax1.scatter(p_test_labels["PCI_F"], p_test_predictions, c="r", marker="2")
ax1.plot([0, 100], [0, 100])
ax1.set_xlim([0, 100])
ax1.set_ylim([0, 100])
# https://stackoverflow.com/questions/27872723/is-there-a-clean-way-to-generate-a-line-histogram-chart-in-python
error = p_test_predictions - p_test_labels["PCI_F"]
n, bins, _ = ax2.hist(error, bins=25, rwidth=0.8, color="blue")
# density = stats.gaussian_kde(error)
# plt.plot(bins, density(bins) * max(n) / max(density(bins)), "r-")
plt.tight_layout()
plt.savefig(output_dir + "/model_evaluation_" + p_k + ".png")
plt.clf()
def model_parameters(model, p_columns, p_k):
"""
Generate a summary of weights or NN parameters
:param model: input model
:param p_columns: input columns
:param p_k: climatic zone
:return:
"""
with open(output_dir + "/weight_" + p_k + ".txt", "w",
encoding="utf-8") as txt_file:
for i, row in enumerate(model.layers[0].get_weights()[0]):
value = p_columns[i] + ", " + np.array2string(row[0])
txt_file.write("".join(value) + "\n")
# a_file = open(output_dir + "/summary_" + k + ".txt", "w",
# encoding="utf-8")
# for row in model.summary():
# np.savetxt(a_file, row)
# a_file.close()
def main(p_output, p_table, p_columns, p_targets, keras_model="ffnn", n_tests=1):
"""
Main function
:param p_output: output directory
:param p_table: input table
:param p_columns: input columns
:param p_targets: input targets
:param keras_model: keras model
:return:
"""
global output_dir
output_dir = p_output
p_columns.extend(p_targets)
dataset_raw = pd.read_csv(p_table, sep=";", encoding="unicode_escape", low_memory=False)
# Filter desired columns
dataset_raw = dataset_raw[p_columns + ["CLIMATIC ZONE"]]
# Delete unknown data
dataset = dataset_raw.dropna()
print("- Original size:", np.shape(dataset_raw)[0], "rows\n- After drop NA:", np.shape(dataset)[0], "rows")
climatic_zones = [
"ALL",
"LLUVIOSA - CÁLIDA",
"LLUVIOSA - MEDIA",
"LLUVIOSA - TEMPLADA",
"LLUVIOSA - FUERA DE RANGO",
"POCO LLUVIOSA - CÁLIDA",
"POCO LLUVIOSA - MEDIA",
"POCO LLUVIOSA - TEMPLADA",
"POCO LLUVIOSA - FUERA DE RANGO",
]
train_perc = 0.7
array_results = []
for zone in climatic_zones:
n_mae = 0
n_mse = 0
n_det = 0
dataset_cz = dataset
if zone != "ALL":
dataset_cz = dataset[dataset["CLIMATIC ZONE"] == zone]
print("Number of rows:", np.shape(dataset_cz)[0], "rows")
dataset_cz.pop("CLIMATIC ZONE")
# dataset_cz.pop("AADT_CUM")
# dataset_cz.pop("AADTT_CUM")
# dataset_cz.pop("KESAL_CUM")
if np.shape(dataset_cz)[0] > 1:
# Convert data to float
dataset_cz = dataset_cz.applymap(str).replace([","], ["."], regex=True).applymap(float)
# Divide dataset in train and test groups
train_dataset = dataset_cz.sample(frac=train_perc)
test_dataset = dataset_cz.drop(train_dataset.index)
# General statistics
train_stats = train_dataset.describe()
[train_stats.pop(x) for x in p_targets]
train_stats = train_stats.transpose()
# Objective value
train_labels = pd.concat([train_dataset.pop(x) for x in p_targets], axis=1)
test_labels = pd.concat([test_dataset.pop(x) for x in p_targets], axis=1)
# Normalising data
# normed_train_data = preprocess_data(train_dataset, train_stats)[0].fillna(0) # Standardization
# normed_test_data = preprocess_data(test_dataset, train_stats)[0].fillna(0)
normed_train_data = preprocess_data(train_dataset, train_stats)[1].fillna(0) # Normalization
normed_test_data = preprocess_data(test_dataset, train_stats)[1].fillna(0)
plot_data(train_dataset, test_dataset,
preprocess_data(train_dataset, train_stats)[0].fillna(0),
preprocess_data(test_dataset, train_stats)[0].fillna(0),
preprocess_data(train_dataset, train_stats)[1].fillna(0),
preprocess_data(test_dataset, train_stats)[1].fillna(0), zone)
for n in range(0, n_tests):
print("[[%s (%d/%d)]]" % (zone, n + 1, n_tests))
# Keras model
if keras_model == "rnn":
model = build_rnn(len(normed_train_data.keys()), len(p_targets))
else:
model = build_ffnn(len(normed_train_data.keys()), len(p_targets))
# model_parameters(model, p_columns, k)
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=100)
# Number of rows accessed in each epoch
batch_size = np.shape(normed_train_data)[0] if np.shape(normed_train_data)[0] < 500 else 500
count = 0
while True:
count += 1
history = model.fit(normed_train_data, train_labels,
epochs=5000,
batch_size=batch_size,
validation_split=(1 - train_perc),
verbose=0, # to see the training process
callbacks=[early_stop, PrintDot()])
# Model evaluation
print("Model evaluation")
# loss, mae, mse, c_det = model.evaluate(normed_test_data, test_labels, verbose=2)
# Predictions
test_predictions = model.predict(normed_test_data)
predictions_array = []
for row in test_predictions:
predictions_array.append(row[-1])
test_predictions = np.array(predictions_array)
mae = c_mae(test_labels["PCI_F"], test_predictions)
mse = c_mse(test_labels["PCI_F"], test_predictions)
det = c_determination(test_labels["PCI_F"], test_predictions)
if not np.isnan(mae) or np.isinf(mae) or count > 5:
break
if np.isnan(mae) or np.isinf(mae):
print("Fit unreachable on %s (%d/%d)" % (zone, n + 1, n_tests))
break
n_mae = n_mae + mae
n_mse = n_mae + mse
n_det = n_det + det
# Visualise training progress
try:
plot_history(history, zone)
plot_evaluation(test_predictions, test_labels, zone)
except Exception as e:
print(e)
array_results.append([zone, np.shape(dataset_cz)[0],
float(n_mae / n_tests),
float(n_mse / n_tests),
float(n_det / n_tests)])
original_stdout = sys.stdout
with open(output_dir + "/results.txt", "w", encoding="utf-8") as f:
sys.stdout = f
for k in array_results:
print("\U0000256D\U00002500Climatic zone:", k[0])
print("\U0000251C\U00002500Number of rows:", k[1])
print("\U0000251C\U00002500MAE: %.2f PCI" % k[2])
print("\U0000251C\U00002500MSE: %.2f PCI" % k[3])
print("\U00002570\U00002500DET: R\U000000B2 = %.2f" % k[4], "\n")
sys.stdout = original_stdout
| [
"pandas.read_csv",
"keras_visualizer.visualizer",
"numpy.array2string",
"keras.utils.vis_utils.plot_model",
"tensorflow.keras.callbacks.EarlyStopping",
"numpy.array",
"keras.layers.Dense",
"numpy.mean",
"pandas.DataFrame",
"keras.backend.epsilon",
"numpy.isinf",
"numpy.abs",
"numpy.ceil",
... | [((3318, 3360), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (3339, 3360), False, 'from tensorflow import keras\n'), ((4321, 4352), 'pandas.DataFrame', 'pd.DataFrame', (['p_history.history'], {}), '(p_history.history)\n', (4333, 4352), True, 'import pandas as pd\n'), ((4412, 4430), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (4424, 4430), True, 'import matplotlib.pyplot as plt\n'), ((4436, 4496), 'matplotlib.pyplot.setp', 'plt.setp', (['ax1'], {'xlabel': '"""Epoch"""', 'ylabel': '"""Mean Abs Error [PCI]"""'}), "(ax1, xlabel='Epoch', ylabel='Mean Abs Error [PCI]')\n", (4444, 4496), True, 'import matplotlib.pyplot as plt\n'), ((4501, 4568), 'matplotlib.pyplot.setp', 'plt.setp', (['ax2'], {'xlabel': '"""Epoch"""', 'ylabel': '"""Mean Square Error [$PCI^2$]"""'}), "(ax2, xlabel='Epoch', ylabel='Mean Square Error [$PCI^2$]')\n", (4509, 4568), True, 'import matplotlib.pyplot as plt\n'), ((5106, 5124), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5122, 5124), True, 'import matplotlib.pyplot as plt\n'), ((5129, 5187), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_dir + '/model_history_' + p_k + '.png')"], {}), "(output_dir + '/model_history_' + p_k + '.png')\n", (5140, 5187), True, 'import matplotlib.pyplot as plt\n'), ((5192, 5201), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5199, 5201), True, 'import matplotlib.pyplot as plt\n'), ((5444, 5462), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2)'], {}), '(3, 2)\n', (5456, 5462), True, 'import matplotlib.pyplot as plt\n'), ((5467, 5518), 'matplotlib.pyplot.setp', 'plt.setp', (['ax1'], {'xlabel': '"""Train (PCI)"""', 'ylabel': '"""Count"""'}), "(ax1, xlabel='Train (PCI)', ylabel='Count')\n", (5475, 5518), True, 'import matplotlib.pyplot as plt\n'), ((5523, 5573), 'matplotlib.pyplot.setp', 'plt.setp', (['ax2'], {'xlabel': '"""Test (PCI)"""', 'ylabel': '"""Count"""'}), "(ax2, xlabel='Test (PCI)', ylabel='Count')\n", (5531, 5573), True, 'import matplotlib.pyplot as plt\n'), ((5578, 5638), 'matplotlib.pyplot.setp', 'plt.setp', (['ax3'], {'xlabel': '"""Train standard (PCI)"""', 'ylabel': '"""Count"""'}), "(ax3, xlabel='Train standard (PCI)', ylabel='Count')\n", (5586, 5638), True, 'import matplotlib.pyplot as plt\n'), ((5643, 5702), 'matplotlib.pyplot.setp', 'plt.setp', (['ax4'], {'xlabel': '"""Test standard (PCI)"""', 'ylabel': '"""Count"""'}), "(ax4, xlabel='Test standard (PCI)', ylabel='Count')\n", (5651, 5702), True, 'import matplotlib.pyplot as plt\n'), ((5707, 5765), 'matplotlib.pyplot.setp', 'plt.setp', (['ax5'], {'xlabel': '"""Train normal (PCI)"""', 'ylabel': '"""Count"""'}), "(ax5, xlabel='Train normal (PCI)', ylabel='Count')\n", (5715, 5765), True, 'import matplotlib.pyplot as plt\n'), ((5770, 5827), 'matplotlib.pyplot.setp', 'plt.setp', (['ax6'], {'xlabel': '"""Test normal (PCI)"""', 'ylabel': '"""Count"""'}), "(ax6, xlabel='Test normal (PCI)', ylabel='Count')\n", (5778, 5827), True, 'import matplotlib.pyplot as plt\n'), ((6312, 6330), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6328, 6330), True, 'import matplotlib.pyplot as plt\n'), ((6336, 6392), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_dir + '/model_train_' + p_k + '.png')"], {}), "(output_dir + '/model_train_' + p_k + '.png')\n", (6347, 6392), True, 'import matplotlib.pyplot as plt\n'), ((6397, 6406), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6404, 6406), True, 'import matplotlib.pyplot as plt\n'), ((6673, 6691), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (6685, 6691), True, 'import matplotlib.pyplot as plt\n'), ((6696, 6769), 'matplotlib.pyplot.setp', 'plt.setp', (['ax1'], {'xlabel': '"""True Values [PCI_F]"""', 'ylabel': '"""Predictions [PCI_F]"""'}), "(ax1, xlabel='True Values [PCI_F]', ylabel='Predictions [PCI_F]')\n", (6704, 6769), True, 'import matplotlib.pyplot as plt\n'), ((6774, 6838), 'matplotlib.pyplot.setp', 'plt.setp', (['ax2'], {'xlabel': '"""Prediction Error [PCI_F]"""', 'ylabel': '"""Count"""'}), "(ax2, xlabel='Prediction Error [PCI_F]', ylabel='Count')\n", (6782, 6838), True, 'import matplotlib.pyplot as plt\n'), ((7440, 7458), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7456, 7458), True, 'import matplotlib.pyplot as plt\n'), ((7463, 7524), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_dir + '/model_evaluation_' + p_k + '.png')"], {}), "(output_dir + '/model_evaluation_' + p_k + '.png')\n", (7474, 7524), True, 'import matplotlib.pyplot as plt\n'), ((7529, 7538), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7536, 7538), True, 'import matplotlib.pyplot as plt\n'), ((8649, 8723), 'pandas.read_csv', 'pd.read_csv', (['p_table'], {'sep': '""";"""', 'encoding': '"""unicode_escape"""', 'low_memory': '(False)'}), "(p_table, sep=';', encoding='unicode_escape', low_memory=False)\n", (8660, 8723), True, 'import pandas as pd\n'), ((875, 898), 'numpy.abs', 'np.abs', (['(p_pred - p_true)'], {}), '(p_pred - p_true)\n', (881, 898), True, 'import numpy as np\n'), ((1224, 1250), 'numpy.square', 'np.square', (['(p_pred - p_true)'], {}), '(p_pred - p_true)\n', (1233, 1250), True, 'import numpy as np\n'), ((1534, 1560), 'numpy.square', 'np.square', (['(p_true - p_pred)'], {}), '(p_true - p_pred)\n', (1543, 1560), True, 'import numpy as np\n'), ((2467, 2493), 'numpy.abs', 'np.abs', (['(p_input - p_output)'], {}), '(p_input - p_output)\n', (2473, 2493), True, 'import numpy as np\n'), ((3625, 3732), 'keras.utils.vis_utils.plot_model', 'plot_model', (['p_model'], {'to_file': "(output_dir + '/model_scheme.png')", 'show_shapes': '(True)', 'show_layer_names': '(True)'}), "(p_model, to_file=output_dir + '/model_scheme.png', show_shapes=\n True, show_layer_names=True)\n", (3635, 3732), False, 'from keras.utils.vis_utils import plot_model\n'), ((3755, 3826), 'keras_visualizer.visualizer', 'visualizer', (['p_model'], {'filename': "(output_dir + '/model_graph')", 'format': '"""png"""'}), "(p_model, filename=output_dir + '/model_graph', format='png')\n", (3765, 3826), False, 'from keras_visualizer import visualizer\n'), ((2641, 2682), 'keras.layers.Dense', 'layers.Dense', (['p_output'], {'activation': '"""relu"""'}), "(p_output, activation='relu')\n", (2653, 2682), False, 'from keras import layers, backend\n'), ((3030, 3056), 'numpy.abs', 'np.abs', (['(p_input - p_output)'], {}), '(p_input - p_output)\n', (3036, 3056), True, 'import numpy as np\n'), ((3199, 3240), 'keras.layers.Dense', 'layers.Dense', (['p_output'], {'activation': '"""relu"""'}), "(p_output, activation='relu')\n", (3211, 3240), False, 'from keras import layers, backend\n'), ((8907, 8928), 'numpy.shape', 'np.shape', (['dataset_raw'], {}), '(dataset_raw)\n', (8915, 8928), True, 'import numpy as np\n'), ((8959, 8976), 'numpy.shape', 'np.shape', (['dataset'], {}), '(dataset)\n', (8967, 8976), True, 'import numpy as np\n'), ((1601, 1616), 'numpy.mean', 'np.mean', (['p_pred'], {}), '(p_pred)\n', (1608, 1616), True, 'import numpy as np\n'), ((1653, 1670), 'keras.backend.epsilon', 'backend.epsilon', ([], {}), '()\n', (1668, 1670), False, 'from keras import layers, backend\n'), ((7975, 7998), 'numpy.array2string', 'np.array2string', (['row[0]'], {}), '(row[0])\n', (7990, 7998), True, 'import numpy as np\n'), ((9595, 9615), 'numpy.shape', 'np.shape', (['dataset_cz'], {}), '(dataset_cz)\n', (9603, 9615), True, 'import numpy as np\n'), ((9794, 9814), 'numpy.shape', 'np.shape', (['dataset_cz'], {}), '(dataset_cz)\n', (9802, 9814), True, 'import numpy as np\n'), ((11882, 11945), 'tensorflow.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(100)'}), "(monitor='val_loss', patience=100)\n", (11911, 11945), False, 'from tensorflow import keras\n'), ((3128, 3144), 'numpy.ceil', 'np.ceil', (['n_inter'], {}), '(n_inter)\n', (3135, 3144), True, 'import numpy as np\n'), ((13107, 13134), 'numpy.array', 'np.array', (['predictions_array'], {}), '(predictions_array)\n', (13115, 13134), True, 'import numpy as np\n'), ((13485, 13498), 'numpy.isnan', 'np.isnan', (['mae'], {}), '(mae)\n', (13493, 13498), True, 'import numpy as np\n'), ((13502, 13515), 'numpy.isinf', 'np.isinf', (['mae'], {}), '(mae)\n', (13510, 13515), True, 'import numpy as np\n'), ((14029, 14049), 'numpy.shape', 'np.shape', (['dataset_cz'], {}), '(dataset_cz)\n', (14037, 14049), True, 'import numpy as np\n'), ((2566, 2582), 'numpy.ceil', 'np.ceil', (['n_inter'], {}), '(n_inter)\n', (2573, 2582), True, 'import numpy as np\n'), ((12032, 12059), 'numpy.shape', 'np.shape', (['normed_train_data'], {}), '(normed_train_data)\n', (12040, 12059), True, 'import numpy as np\n'), ((13407, 13420), 'numpy.isinf', 'np.isinf', (['mae'], {}), '(mae)\n', (13415, 13420), True, 'import numpy as np\n'), ((12066, 12093), 'numpy.shape', 'np.shape', (['normed_train_data'], {}), '(normed_train_data)\n', (12074, 12093), True, 'import numpy as np\n'), ((13390, 13403), 'numpy.isnan', 'np.isnan', (['mae'], {}), '(mae)\n', (13398, 13403), True, 'import numpy as np\n')] |
__author__ = 'Dante'
import os
import math
import machines
import density_weight as dw
from structures.isozyme import BrendaIsozyme as bi
from databases import db_queries as dbq
from structures import fingerprinter as fptr
import numpy as np
import routines
import pybel
CHEMPATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "chem")
def al_run(org, ec, neg, k, beta=1, pos=None, ent=False, kernel='rbf', degree=3, zinc=True, zinc_tol_l=1, zinc_tol_r=1, greedy=False, vl=None, simfp=fptr.integer_sim, C=5, target_bits=None, screen=None):
#Collects isozyme data into the Isozyme class.
a = bi(org, ec)
if pos:
a.add_from_sdf(pos, k, pos=True)
a.add_from_sdf(neg, k, pos=False)
#Two branches here; one pulls potential test data from ZINC, another pulls from KEGG.
if zinc:
res_ = [(page["smiles"], fptr.integer_fp(str(page["smiles"])), page["vendors"], page["_id"]) for page in dbq.zinc_pull(target_bits, a.mass_avg[k], a.mass_std[k], zinc_tol_l=zinc_tol_l, zinc_tol_r=zinc_tol_r) if u'R' not in page["smiles"] and 'vendors' in page]
res_s = [rr for rr in res_ if rr[1] is not None]
if screen is not None:
patt = [pybel.Smarts(smarts) for smarts in screen.split('|')]
if len(patt) > 2:
raise IOError('al_run only supports OR filters for two SMARTS queries at this time.')
res = [rr for rr in res_s if len(patt[0].findall(pybel.readstring('smi', str(rr[0])))) > 0 or len(patt[1].findall(pybel.readstring('smi', str(rr[0])))) > 0]
else:
res = res_s
else:
res = [(page["SMILES"], np.array(fptr.integer_fp(str(page["SMILES"])))) for page in dbq.kegg_pull(target_bits) if u'R' not in page["SMILES"] and np.array(fptr.integer_fp(str(page["SMILES"]))) is not None]
labels = machines.svm_clf(a.pos[k], a.neg[k], res, kernel=kernel, degree=degree, ent=ent, C=C)
test_a = np.vstack(tuple([np.array(x[1]) for x in res if x[1] is not None and len(x[1]) == 313]))
tc_u = dw.avg_proximity(test_a, test_a, f=simfp)
if greedy:
if ent:
xis = [l * dw.weight(dw.entropy(p), tc_u[i], beta=beta) for i, (l, p) in enumerate(labels)]
else:
xis = [l * dw.weight(dw.hyper_distance(d), tc_u[i], beta=beta) for i, (l, d) in enumerate(labels)]
else:
if ent:
xis = [dw.weight(dw.entropy(p), tc_u[i], beta=beta) for i, (l, p) in enumerate(labels)]
else:
xis = [dw.weight(dw.hyper_distance(d), tc_u[i], beta=beta) for i, (l, d) in enumerate(labels)]
if zinc:
dw.generate_report(sorted(zip([s for s, fp, vend, z in res if fp is not None], xis, [lab[0] for lab in labels], [vend for s, fp, vend, z in res if fp is not None], [z for s, fp, vend, z in res if fp is not None]), key=lambda y: y[1], reverse=True), vendors_list=vl, outfile="%s_ec%s_beta%s_%s_zinc%s%s_C%s.sdf" % (org, ec.replace('.', '_'), str(beta), kernel, str(zinc_tol_l).replace('.', '_'), str(zinc_tol_r).replace('.', '_'), str(C)))
f = open("%s_ec%s_beta%s_%s_zinc%s%s_C%s.txt" % (org, ec.replace('.', '_'), str(beta), kernel, str(zinc_tol_l).replace('.', '_'), str(zinc_tol_r).replace('.', '_'), str(C)), 'w')
else:
dw.generate_report(sorted(zip([s for s, fp in res], xis, [lab[0] for lab in labels]), key=lambda y: y[1], reverse=True), outfile="%s_ec%s_beta%s_%s.sdf" % (org, ec.replace('.', '_'), str(beta), kernel), zinc=False)
f = open("%s_ec%s_beta%s_%s.txt" % (org, ec.replace('.', '_'), str(beta), kernel), 'w')
for score in xis:
f.write(str(score) + '\n')
f.close()
def dissim_run(org, ec, neg, k, pos=None, zinc=False, zinc_tol_l=1, zinc_tol_r=1, vl=None, simfp=fptr.integer_sim, target_bits=None, screen=None):
# Collects isozyme data into the Isozyme class.
a = bi(org, ec)
bits = a.analyze_reactions()
if pos:
a.add_from_sdf(pos, k, pos=True)
a.add_from_sdf(neg, k, pos=False)
#Two branches here; one pulls potential test data from ZINC, another pulls from KEGG.
res_ = [(page["smiles"], fptr.integer_fp(str(page["smiles"])), page["vendors"], page["_id"]) for page in dbq.zinc_pull(target_bits, a.mass_avg[k], a.mass_std[k], zinc_tol_l=zinc_tol_l, zinc_tol_r=zinc_tol_r) if u'R' not in page["smiles"] and 'vendors' in page]
res_s = [rr for rr in res_ if rr[1] is not None]
if screen is not None:
patt = [pybel.Smarts(smarts) for smarts in screen.split('|')]
if len(patt) > 2:
raise IOError('al_run only supports OR filters for two SMARTS queries at this time.')
res = [rr for rr in res_s if len(patt[0].findall(pybel.readstring('smi', str(rr[0])))) > 0 or len(patt[1].findall(pybel.readstring('smi', str(rr[0])))) > 0]
else:
res = res_s
x_pos_array = np.vstack(tuple([t[1] for t in a.pos[k]]))
x_neg_array = np.vstack(tuple([t[1] for t in a.neg[k]]))
x_array = np.vstack((x_pos_array, x_neg_array))
centroid = np.mean(x_array, axis=0)
test_a = np.vstack(tuple([np.array(x[1]) for x in res if x[1] is not None]))
test_centroid = np.mean(test_a, axis=0)
tc_u = dw.avg_proximity(test_a, test_a, f=simfp)
xis_a = [(x[0], fptr.integer_sim(centroid, x[1]), 1, x[2], x[3]) for x in res if x[1] is not None]
xis_b = [(x[0], tc_u[i] * (-math.log(fptr.integer_sim(centroid, x[1]), 2)), 1, x[2], x[3]) for i, x in enumerate(res) if x[1] is not None]
dw.generate_report(sorted(xis_a, key=lambda y: y[1]), vendors_list=vl, outfile="%s_ec%s_dissim_zinc%s%s.sdf" % (org, ec.replace('.', '_'), str(zinc_tol_l).replace('.', '_'), str(zinc_tol_r).replace('.', '_')))
dw.generate_report(sorted(xis_b, key=lambda y: y[1]), vendors_list=vl, outfile="%s_ec%s_dissimcentral_zinc%s%s.sdf" % (org, ec.replace('.', '_'), str(zinc_tol_l).replace('.', '_'), str(zinc_tol_r).replace('.', '_')))
def al_xval_ins(org, ec, k, neg=None, pos=None, beta=1.0, kernel='rbf', gamma=0.005, iterations=100, batch=1, C=1.0, initial=2, decf=True, simfp=fptr.integer_sim):
a = bi(org, ec)
if neg is not None:
if pos:
a.add_from_sdf(pos, k, pos=True)
a.add_from_sdf(neg, k, pos=False)
a.xval_selection(k, beta=beta, batch=batch, kernel=kernel, iterations=iterations, initial=initial, c=C, gamma=gamma, decf=decf, simfp=simfp)
else:
a.xval_selection_random(k, beta=beta, batch=batch, kernel=kernel, iterations=iterations, initial=initial, c=C, gamma=gamma, decf=decf, simfp=simfp)
def al_exp_val(org, ec, k, exp, neg=None, beta=1.0, kernel='rbf', degree=3, gamma=0.005, iterations=100, batch=1, C=1.0, initial=2, decf=False, random_seed=None, pos=None, simfp=fptr.integer_sim):
a = bi(org, ec)
if neg is not None:
if pos:
a.add_from_sdf(pos, k, pos=True)
a.add_from_sdf(neg, k, pos=False)
suppl = pybel.readfile('sdf', os.path.join(CHEMPATH, exp))
excl = []
for mol in suppl:
smi = mol.write('can').strip()
cls = int(mol.data['label'])
a.add_from_smiles(smi, k, cls)
excl.append(smi)
a.expval_selection(k, excl, c=C, gamma=gamma, iterations=iterations, batch=batch, degree=degree, kernel=kernel, beta=beta, decf=decf, seed=random_seed, simfp=simfp, initial=initial)
else:
a.expval_selection_random(k, exp, c=C, gamma=gamma, iterations=iterations, batch=batch, degree=degree, kernel=kernel, beta=beta, decf=decf, seed=random_seed, simfp=simfp, initial=initial)
def al_exp_ins(org, ec, k, exp, neg=None, beta=1.0, kernel='rbf', degree=3, gamma=0.005, iterations=100, batch=1, C=1.0, initial=2, decf=False, random_seed=None, fp='FP4', simfp=fptr.integer_sim):
a = bi(org, ec)
if neg is not None:
a.add_from_sdf(neg, k, pos=False)
else:
a.random_negatives(k)
suppl = pybel.readfile('sdf', os.path.join(CHEMPATH, exp))
excl = []
for mol in suppl:
smi = mol.write('can').strip()
cls = int(mol.data['label'])
a.add_from_smiles(smi, k, cls)
excl.append(smi)
smiles_access = [t[0] for t in a.pos[k]] + [t[0] for t in a.neg[k]]
n = max([len(str(x)) for x in smiles_access])
if fp == 'FP4':
x_pos_array = np.vstack(tuple([t[1] for t in a.pos[k]]))
x_neg_array = np.vstack(tuple([t[1] for t in a.neg[k]]))
y_obj = []
y_obj += [1] * x_pos_array.shape[0]
y_obj += [-1] * x_neg_array.shape[0]
x = np.vstack((x_pos_array, x_neg_array))
y = np.array(zip(y_obj, smiles_access), dtype=[('label', 'i4'), ('smiles', '|S%s' % str(n))])
elif fp == 'FP2':
x_pos_array = np.vstack(tuple([np.array(fptr.reconstruct_fp(t[0], fptype='FP2')) for t in a.pos[k]]))
x_neg_array = np.vstack(tuple([np.array(fptr.reconstruct_fp(t[0], fptype='FP2')) for t in a.neg[k]]))
y_obj = []
y_obj += [1] * x_pos_array.shape[0]
y_obj += [-1] * x_neg_array.shape[0]
x = np.vstack((x_pos_array, x_neg_array))
y = np.array(zip(y_obj, smiles_access), dtype=[('label', 'i4'), ('smiles', '|S%s' % str(n))])
else:
raise IOError("Valid values for fp are FP2 and FP4.")
outfile = "al_expins_%s_%s_beta%s_batch%s_%s_rseed%s" % (org, ec, str(beta).replace('.', ''), str(batch), kernel, str(random_seed))
out = routines.dw_exp_ins(x, y, outfile, smiles_access, excl, C=C, gamma=gamma, iterations=iterations, batch=batch, degree=degree, kernel=kernel, beta=beta, decf=decf, seed=random_seed, simfp=simfp, initial=initial)
if __name__ == "__main__":
#prog = al_xval("Xanthomonas citri", "3.1.1.43", 0, neg="AAEHNegatives.sdf", C=5, batch=1, beta=1, iterations=1000, total_tc=True, fp='FP4', kernel='rbf', decf=True)
#al_inspect("rhodochrous", "4.2.1.84", "NHaseNegatives.sdf", 0, C=5, batch=1, beta=1, iterations=1000)
#al_inspect("Xanthomonas citri", "3.1.1.43", "AAEHNegatives.sdf", 0, C=5, batch=1, beta=1, iterations=1000, decf=True)
#al_xval_ins("<NAME>", "2.2.1.9", 0, neg="MenDNegatives.sdf", pos="MenDPositives_extra.sdf", C=5, batch=1, beta=1, iterations=1000, kernel='rbf', decf=True)
#al_xval_ins("iowensis", "172.16.58.3", 0, neg="CarNegatives.sdf", pos="CarPositives.sdf", C=5, batch=1, beta=1, iterations=1000, kernel='rbf', decf=True)
#al_xval_ins("putida", "1.14.13.84", 0, neg="HAPMONegatives.sdf", C=5, batch=1, beta=1, iterations=1000, kernel='rbf', decf=True)
#al_xval_ins("<NAME>", "3.1.1.43", 0, neg="AAEHNegatives.sdf", C=5, batch=1, beta=1, iterations=1000, kernel='rbf', decf=True)
#al_inspect("<NAME>", "2.2.1.9", "MenDNegatives.sdf", 0, C=5, batch=1, beta=1, iterations=1000, decf=True)
#al_exp_val("<NAME>", "2.2.1.9", 0, "MenD_rd1.sdf", neg="MenDNegatives_extra.sdf", pos="MenDPositives_extra.sdf", C=5, batch=1, beta=1, iterations=1000, decf=True, kernel='rbf')
#al_exp_ins("<NAME>", "2.2.1.9", 0, "MenD_rd1.sdf", neg="MenDNegatives.sdf", C=5, batch=1, beta=1, iterations=1000, decf=True)
al_run("iowensis", "1.2.99.6", "CarNegatives.sdf", 0, pos="CarPositives.sdf", zinc=False, target_bits='84') | [
"pybel.Smarts",
"numpy.mean",
"routines.dw_exp_ins",
"os.path.join",
"structures.fingerprinter.reconstruct_fp",
"structures.isozyme.BrendaIsozyme",
"os.path.dirname",
"numpy.array",
"machines.svm_clf",
"numpy.vstack",
"structures.fingerprinter.integer_sim",
"density_weight.hyper_distance",
"... | [((626, 637), 'structures.isozyme.BrendaIsozyme', 'bi', (['org', 'ec'], {}), '(org, ec)\n', (628, 637), True, 'from structures.isozyme import BrendaIsozyme as bi\n'), ((1763, 1853), 'machines.svm_clf', 'machines.svm_clf', (['a.pos[k]', 'a.neg[k]', 'res'], {'kernel': 'kernel', 'degree': 'degree', 'ent': 'ent', 'C': 'C'}), '(a.pos[k], a.neg[k], res, kernel=kernel, degree=degree, ent\n =ent, C=C)\n', (1779, 1853), False, 'import machines\n'), ((1962, 2003), 'density_weight.avg_proximity', 'dw.avg_proximity', (['test_a', 'test_a'], {'f': 'simfp'}), '(test_a, test_a, f=simfp)\n', (1978, 2003), True, 'import density_weight as dw\n'), ((3705, 3716), 'structures.isozyme.BrendaIsozyme', 'bi', (['org', 'ec'], {}), '(org, ec)\n', (3707, 3716), True, 'from structures.isozyme import BrendaIsozyme as bi\n'), ((4771, 4808), 'numpy.vstack', 'np.vstack', (['(x_pos_array, x_neg_array)'], {}), '((x_pos_array, x_neg_array))\n', (4780, 4808), True, 'import numpy as np\n'), ((4823, 4847), 'numpy.mean', 'np.mean', (['x_array'], {'axis': '(0)'}), '(x_array, axis=0)\n', (4830, 4847), True, 'import numpy as np\n'), ((4949, 4972), 'numpy.mean', 'np.mean', (['test_a'], {'axis': '(0)'}), '(test_a, axis=0)\n', (4956, 4972), True, 'import numpy as np\n'), ((4983, 5024), 'density_weight.avg_proximity', 'dw.avg_proximity', (['test_a', 'test_a'], {'f': 'simfp'}), '(test_a, test_a, f=simfp)\n', (4999, 5024), True, 'import density_weight as dw\n'), ((5881, 5892), 'structures.isozyme.BrendaIsozyme', 'bi', (['org', 'ec'], {}), '(org, ec)\n', (5883, 5892), True, 'from structures.isozyme import BrendaIsozyme as bi\n'), ((6513, 6524), 'structures.isozyme.BrendaIsozyme', 'bi', (['org', 'ec'], {}), '(org, ec)\n', (6515, 6524), True, 'from structures.isozyme import BrendaIsozyme as bi\n'), ((7446, 7457), 'structures.isozyme.BrendaIsozyme', 'bi', (['org', 'ec'], {}), '(org, ec)\n', (7448, 7457), True, 'from structures.isozyme import BrendaIsozyme as bi\n'), ((8952, 9170), 'routines.dw_exp_ins', 'routines.dw_exp_ins', (['x', 'y', 'outfile', 'smiles_access', 'excl'], {'C': 'C', 'gamma': 'gamma', 'iterations': 'iterations', 'batch': 'batch', 'degree': 'degree', 'kernel': 'kernel', 'beta': 'beta', 'decf': 'decf', 'seed': 'random_seed', 'simfp': 'simfp', 'initial': 'initial'}), '(x, y, outfile, smiles_access, excl, C=C, gamma=gamma,\n iterations=iterations, batch=batch, degree=degree, kernel=kernel, beta=\n beta, decf=decf, seed=random_seed, simfp=simfp, initial=initial)\n', (8971, 9170), False, 'import routines\n'), ((326, 351), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (341, 351), False, 'import os\n'), ((7584, 7611), 'os.path.join', 'os.path.join', (['CHEMPATH', 'exp'], {}), '(CHEMPATH, exp)\n', (7596, 7611), False, 'import os\n'), ((8131, 8168), 'numpy.vstack', 'np.vstack', (['(x_pos_array, x_neg_array)'], {}), '((x_pos_array, x_neg_array))\n', (8140, 8168), True, 'import numpy as np\n'), ((4035, 4142), 'databases.db_queries.zinc_pull', 'dbq.zinc_pull', (['target_bits', 'a.mass_avg[k]', 'a.mass_std[k]'], {'zinc_tol_l': 'zinc_tol_l', 'zinc_tol_r': 'zinc_tol_r'}), '(target_bits, a.mass_avg[k], a.mass_std[k], zinc_tol_l=\n zinc_tol_l, zinc_tol_r=zinc_tol_r)\n', (4048, 4142), True, 'from databases import db_queries as dbq\n'), ((4281, 4301), 'pybel.Smarts', 'pybel.Smarts', (['smarts'], {}), '(smarts)\n', (4293, 4301), False, 'import pybel\n'), ((5046, 5078), 'structures.fingerprinter.integer_sim', 'fptr.integer_sim', (['centroid', 'x[1]'], {}), '(centroid, x[1])\n', (5062, 5078), True, 'from structures import fingerprinter as fptr\n'), ((6665, 6692), 'os.path.join', 'os.path.join', (['CHEMPATH', 'exp'], {}), '(CHEMPATH, exp)\n', (6677, 6692), False, 'import os\n'), ((8604, 8641), 'numpy.vstack', 'np.vstack', (['(x_pos_array, x_neg_array)'], {}), '((x_pos_array, x_neg_array))\n', (8613, 8641), True, 'import numpy as np\n'), ((931, 1038), 'databases.db_queries.zinc_pull', 'dbq.zinc_pull', (['target_bits', 'a.mass_avg[k]', 'a.mass_std[k]'], {'zinc_tol_l': 'zinc_tol_l', 'zinc_tol_r': 'zinc_tol_r'}), '(target_bits, a.mass_avg[k], a.mass_std[k], zinc_tol_l=\n zinc_tol_l, zinc_tol_r=zinc_tol_r)\n', (944, 1038), True, 'from databases import db_queries as dbq\n'), ((1177, 1197), 'pybel.Smarts', 'pybel.Smarts', (['smarts'], {}), '(smarts)\n', (1189, 1197), False, 'import pybel\n'), ((1629, 1655), 'databases.db_queries.kegg_pull', 'dbq.kegg_pull', (['target_bits'], {}), '(target_bits)\n', (1642, 1655), True, 'from databases import db_queries as dbq\n'), ((1879, 1893), 'numpy.array', 'np.array', (['x[1]'], {}), '(x[1])\n', (1887, 1893), True, 'import numpy as np\n'), ((4879, 4893), 'numpy.array', 'np.array', (['x[1]'], {}), '(x[1])\n', (4887, 4893), True, 'import numpy as np\n'), ((2292, 2305), 'density_weight.entropy', 'dw.entropy', (['p'], {}), '(p)\n', (2302, 2305), True, 'import density_weight as dw\n'), ((2397, 2417), 'density_weight.hyper_distance', 'dw.hyper_distance', (['d'], {}), '(d)\n', (2414, 2417), True, 'import density_weight as dw\n'), ((2059, 2072), 'density_weight.entropy', 'dw.entropy', (['p'], {}), '(p)\n', (2069, 2072), True, 'import density_weight as dw\n'), ((2168, 2188), 'density_weight.hyper_distance', 'dw.hyper_distance', (['d'], {}), '(d)\n', (2185, 2188), True, 'import density_weight as dw\n'), ((5169, 5201), 'structures.fingerprinter.integer_sim', 'fptr.integer_sim', (['centroid', 'x[1]'], {}), '(centroid, x[1])\n', (5185, 5201), True, 'from structures import fingerprinter as fptr\n'), ((8331, 8370), 'structures.fingerprinter.reconstruct_fp', 'fptr.reconstruct_fp', (['t[0]'], {'fptype': '"""FP2"""'}), "(t[0], fptype='FP2')\n", (8350, 8370), True, 'from structures import fingerprinter as fptr\n'), ((8436, 8475), 'structures.fingerprinter.reconstruct_fp', 'fptr.reconstruct_fp', (['t[0]'], {'fptype': '"""FP2"""'}), "(t[0], fptype='FP2')\n", (8455, 8475), True, 'from structures import fingerprinter as fptr\n')] |
from __future__ import division
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import (PointGenerator, multi_apply, multiclass_nms_kp,
point_target_kp)
from mmdet.ops import DeformConv
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule, bias_init_with_prob
class MultiColumnDeformConvBlock(nn.Module):
def __init__(self,
in_channels=256,
feat_channels=256,
gradient_mul=0.1):
super().__init__()
self.gradient_mul = gradient_mul
self.deform_offset_dim = 2 * (9 + 25 + 49)
# initializae dcn base offset
# DeformConv3x3
self.dcn_kernel_3 = int(np.sqrt(9))
self.dcn_pad_3 = int((self.dcn_kernel_3 - 1) / 2)
dcn_base_3 = np.arange(-self.dcn_pad_3,
self.dcn_pad_3 + 1).astype(np.float64)
dcn_base_y_3 = np.repeat(dcn_base_3, self.dcn_kernel_3)
dcn_base_x_3 = np.tile(dcn_base_3, self.dcn_kernel_3)
dcn_base_offset_3 = np.stack(
[dcn_base_y_3, dcn_base_x_3], axis=1).reshape((-1))
self.dcn_base_offset_3 = torch.tensor(dcn_base_offset_3).view(
1, -1, 1, 1)
# DeformConv5x5
self.dcn_kernel_5 = int(np.sqrt(25))
self.dcn_pad_5 = int((self.dcn_kernel_5 - 1) / 2)
dcn_base_5 = np.arange(-self.dcn_pad_5,
self.dcn_pad_5 + 1).astype(np.float64)
dcn_base_y_5 = np.repeat(dcn_base_5, self.dcn_kernel_5)
dcn_base_x_5 = np.tile(dcn_base_5, self.dcn_kernel_5)
dcn_base_offset_5 = np.stack(
[dcn_base_y_5, dcn_base_x_5], axis=1).reshape((-1))
self.dcn_base_offset_5 = torch.tensor(dcn_base_offset_5).view(1, -1, 1, 1)
# DeformConv7x7
self.dcn_kernel_7 = int(np.sqrt(49))
self.dcn_pad_7 = int((self.dcn_kernel_7 - 1) / 2)
dcn_base_7 = np.arange(-self.dcn_pad_7,
self.dcn_pad_7 + 1).astype(np.float64)
dcn_base_y_7 = np.repeat(dcn_base_7, self.dcn_kernel_7)
dcn_base_x_7 = np.tile(dcn_base_7, self.dcn_kernel_7)
dcn_base_offset_7 = np.stack(
[dcn_base_y_7, dcn_base_x_7], axis=1).reshape((-1))
self.dcn_base_offset_7 = torch.tensor(dcn_base_offset_7).view(
1, -1, 1, 1)
# initialize Layers
self.dfmconv_3 = DeformConv(in_channels, feat_channels,
self.dcn_kernel_3, 1,
self.dcn_pad_3)
self.dfmconv_5 = DeformConv(in_channels, feat_channels,
self.dcn_kernel_5, 1,
self.dcn_pad_5)
self.dfmconv_7 = DeformConv(in_channels, feat_channels,
self.dcn_kernel_7, 1,
self.dcn_pad_7)
# initialize weights
normal_init(self.dfmconv_3, std=0.01)
normal_init(self.dfmconv_5, std=0.01)
normal_init(self.dfmconv_7, std=0.01)
def forward(self, feat, deform_offset):
assert deform_offset.size(1) == self.deform_offset_dim
deform_offset_3 = deform_offset[:, :2*9, :, :]
deform_offset_5 = deform_offset[:, 2*9:2*(9+25), :, :]
deform_offset_7 = deform_offset[:, 2*(9+25):2*(9+25+49), :, :]
dcn_base_offset_3 = self.dcn_base_offset_3.type_as(feat)
dcn_base_offset_5 = self.dcn_base_offset_5.type_as(feat)
dcn_base_offset_7 = self.dcn_base_offset_7.type_as(feat)
dcn_offset_grad_mul_3 = self.gradient_mul * deform_offset_3 \
+ (1 - self.gradient_mul) * deform_offset_3.detach()
dcn_offset_3 = dcn_offset_grad_mul_3 - dcn_base_offset_3
dcn_offset_grad_mul_5 = self.gradient_mul * deform_offset_5 \
+ (1 - self.gradient_mul) * deform_offset_5.detach()
dcn_offset_5 = dcn_offset_grad_mul_5 - dcn_base_offset_5
dcn_offset_grad_mul_7 = self.gradient_mul * deform_offset_7 \
+ (1 - self.gradient_mul) * deform_offset_7.detach()
dcn_offset_7 = dcn_offset_grad_mul_7 - dcn_base_offset_7
dfmconv_feat_3 = self.dfmconv_3(feat, dcn_offset_3)
dfmconv_feat_5 = self.dfmconv_5(feat, dcn_offset_5)
dfmconv_feat_7 = self.dfmconv_7(feat, dcn_offset_7)
dfmconv_feat = torch.cat(
[dfmconv_feat_3, dfmconv_feat_5, dfmconv_feat_7],
dim=1)
return dfmconv_feat
class KpDetModule(nn.Module):
""" Sequential Block
"""
def __init__(self,
deform_conv,
cls_out_channels,
in_channels=256,
feat_channels=256,
num_reppts=9,
num_keypts=17,
gradient_mul=0.1,
transform_method='minmax',
moment_mul=0.01):
super().__init__()
self.deform_conv = deform_conv
self.gradient_mul = gradient_mul
self.transform_method = transform_method
self.moment_mul = moment_mul
keypts_out_dim = 2 * num_keypts
deform_offset_dim = 2 * (9 + 25 + 49)
self.relu = nn.ReLU(inplace=False)
# initiate conv layers
if deform_conv:
self.deform_offset_out = nn.Conv2d(
keypts_out_dim, deform_offset_dim, 1, 1, 0)
self.cls_dfm_block = MultiColumnDeformConvBlock(
in_channels, feat_channels, gradient_mul)
self.cls_out = nn.Conv2d(
feat_channels*3, cls_out_channels, 1, 1, 0)
self.bbox_param_dfm_block = MultiColumnDeformConvBlock(
in_channels, feat_channels, gradient_mul)
self.bbox_param_out = nn.Conv2d(
feat_channels*3, 4, 1, 1, 0)
self.kpt_dfm_block = MultiColumnDeformConvBlock(
in_channels, feat_channels, gradient_mul)
self.kpt_out = nn.Conv2d(
feat_channels*3, keypts_out_dim, 1, 1, 0)
else:
self.cls_conv = nn.Conv2d(
in_channels, feat_channels, 3, 1, 1)
self.cls_out = nn.Conv2d(
feat_channels, cls_out_channels, 1, 1, 0)
self.bbox_param_conv = nn.Conv2d(
in_channels, feat_channels, 3, 1, 1)
self.bbox_param_out = nn.Conv2d(
feat_channels, 4, 1, 1, 0)
self.kpt_conv = nn.Conv2d(
in_channels, feat_channels, 3, 1, 1)
self.kpt_out = nn.Conv2d(
feat_channels, keypts_out_dim, 1, 1, 0)
# init weights
bias_cls = bias_init_with_prob(0.01)
if self.deform_conv:
normal_init(self.deform_offset_out, std=0.01)
else:
normal_init(self.cls_conv, std=0.01)
normal_init(self.bbox_param_conv, std=0.01)
normal_init(self.kpt_conv, std=0.01)
normal_init(self.cls_out, std=0.01, bias=bias_cls)
normal_init(self.bbox_param_out, std=0.01)
normal_init(self.kpt_out, std=0.01)
def forward(self, cls_feat, pts_feat, kpt_offset_prev=None):
if self.deform_conv:
deform_offset = self.deform_offset_out(kpt_offset_prev)
cls_dfm_feat = self.relu(
self.cls_dfm_block(cls_feat, deform_offset))
cls_score_map = self.cls_out(cls_dfm_feat)
kpt_dfm_feat = self.relu(
self.kpt_dfm_block(pts_feat, deform_offset))
kpt_offset = self.kpt_out(kpt_dfm_feat)
kpt_offset = kpt_offset + kpt_offset_prev.detach()
bbox_param_dfm_feat = self.relu(
self.bbox_param_dfm_block(pts_feat, deform_offset))
bbox_param = self.bbox_param_out(bbox_param_dfm_feat)
bbox_offset = self.points2bbox(
kpt_offset.detach(), tranfer_param=bbox_param)
else:
cls_score_map = self.cls_out(self.relu(self.cls_conv(cls_feat)))
kpt_offset = self.kpt_out(self.relu(
self.kpt_conv(pts_feat)))
bbox_param = self.bbox_param_out(self.relu(
self.bbox_param_conv(pts_feat)))
bbox_offset = self.points2bbox(
kpt_offset.detach(), tranfer_param=bbox_param)
return cls_score_map, bbox_offset, kpt_offset
def points2bbox(self, pts, y_first=True, tranfer_param=None):
"""
Converting the points set into bounding box.
:param pts: the input points sets (fields), each points
set (fields) is represented as 2n scalar.
:param y_first: if y_fisrt=True, the point set is represented as
[y1, x1, y2, x2 ... yn, xn], otherwise the point set is
represented as [x1, y1, x2, y2 ... xn, yn].
:param transfer_param:
size: [B, 4, H, W]
Meaning of each channel:
- translate_x
- translate_y
- scale_x
- scale_y
:return: each points set is converting to a bbox [x1, y1, x2, y2].
"""
pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:])
pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1,
...]
pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0,
...]
if self.transform_method == 'minmax':
bbox_left = pts_x.min(dim=1, keepdim=True)[0]
bbox_right = pts_x.max(dim=1, keepdim=True)[0]
bbox_up = pts_y.min(dim=1, keepdim=True)[0]
bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
dim=1)
elif self.transform_method == 'moment':
pts_y_mean = pts_y.mean(dim=1, keepdim=True)
pts_x_mean = pts_x.mean(dim=1, keepdim=True)
pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True)
pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True)
moment_transfer = (self.moment_transfer * self.moment_mul) + (
self.moment_transfer.detach() * (1 - self.moment_mul))
moment_width_transfer = moment_transfer[0]
moment_height_transfer = moment_transfer[1]
half_width = pts_x_std * torch.exp(moment_width_transfer)
half_height = pts_y_std * torch.exp(moment_height_transfer)
bbox = torch.cat([
pts_x_mean - half_width, pts_y_mean - half_height,
pts_x_mean + half_width, pts_y_mean + half_height
],
dim=1)
elif self.transform_method == 'minmax_param':
assert tranfer_param is not None
bbox_left = pts_x.min(dim=1, keepdim=True)[0]
bbox_right = pts_x.max(dim=1, keepdim=True)[0]
bbox_top = pts_y.min(dim=1, keepdim=True)[0]
bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
bbox_center_x = (bbox_left + bbox_right) / 2
bbox_center_y = (bbox_top + bbox_bottom) / 2
tranfer_param = (tranfer_param * self.moment_mul) + (
tranfer_param.detach() * (1 - self.moment_mul))
half_width = (bbox_center_x - bbox_left) * torch.exp(
tranfer_param[:, 0:1, :, :])
half_height = (bbox_center_y - bbox_top) * torch.exp(
tranfer_param[:, 1:2, :, :])
bbox_center_x = bbox_center_x + tranfer_param[:, 2:3, :, :]
bbox_center_y = bbox_center_y + tranfer_param[:, 3:4, :, :]
bbox = torch.cat([
bbox_center_x - half_width, bbox_center_y - half_height,
bbox_center_x + half_width, bbox_center_y + half_height
],
dim=1)
elif self.transform_method == 'moment_param':
assert tranfer_param is not None
pts_y_mean = pts_y.mean(dim=1, keepdim=True)
pts_x_mean = pts_x.mean(dim=1, keepdim=True)
pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True)
pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True)
tranfer_param = (tranfer_param * self.moment_mul) + (
tranfer_param.detach() * (1 - self.moment_mul))
pts_x_mean = pts_x_mean + tranfer_param[:, 0:1, :, :]
pts_y_mean = pts_y_mean + tranfer_param[:, 1:2, :, :]
half_width = pts_x_std * torch.exp(tranfer_param[:, 2:3, :, :])
half_height = pts_y_std * torch.exp(tranfer_param[:, 3:4, :, :])
bbox = torch.cat([
pts_x_mean - half_width, pts_y_mean - half_height,
pts_x_mean + half_width, pts_y_mean + half_height
],
dim=1)
else:
raise NotImplementedError
return bbox
@HEADS.register_module
class CascadeKpDetHead(nn.Module):
"""RepPoint head.
Args:
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of channels of the feature map.
point_feat_channels (int): Number of channels of points features.
stacked_convs (int): How many conv layers are used.
gradient_mul (float): The multiplier to gradients from
points refinement and recognition.
point_strides (Iterable): points strides.
transform_method (str): The methods to transform RepPoints to bbox.
""" # noqa: W605
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
point_feat_channels=256,
stacked_convs=3,
num_reppts=9,
num_keypts=17,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
conv_cfg=None,
norm_cfg=None,
loss_cls_1=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=0.5),
loss_cls_2=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=0.5),
loss_cls_3=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_1=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
loss_bbox_2=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
loss_bbox_3=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
loss_kpt_1=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
loss_kpt_2=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
loss_kpt_3=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
use_grid_points=False,
center_init=True,
transform_method='moment',
moment_mul=0.01):
super().__init__()
self.in_channels = in_channels
self.num_classes = num_classes
self.feat_channels = feat_channels
self.point_feat_channels = point_feat_channels
self.stacked_convs = stacked_convs
self.num_keypts = num_keypts
self.num_reppts = 9+25+49
self.gradient_mul = gradient_mul
self.point_base_scale = point_base_scale
self.point_strides = point_strides
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.use_sigmoid_cls = loss_cls_3.get('use_sigmoid', False)
self.sampling = loss_cls_3['type'] not in ['FocalLoss']
self.loss_cls_1 = build_loss(loss_cls_1)
self.loss_cls_2 = build_loss(loss_cls_2)
self.loss_cls_3 = build_loss(loss_cls_3)
self.loss_bbox_1 = build_loss(loss_bbox_1)
self.loss_bbox_2 = build_loss(loss_bbox_2)
self.loss_bbox_3 = build_loss(loss_bbox_3)
self.loss_kpt_1 = build_loss(loss_kpt_1)
self.loss_kpt_2 = build_loss(loss_kpt_2)
self.loss_kpt_3 = build_loss(loss_kpt_3)
self.use_grid_points = use_grid_points
self.center_init = center_init
self.transform_method = transform_method
if self.transform_method == 'moment':
self.moment_transfer = nn.Parameter(
data=torch.zeros(2), requires_grad=True)
self.moment_mul = moment_mul
if self.use_sigmoid_cls:
self.cls_out_channels = self.num_classes - 1
else:
self.cls_out_channels = self.num_classes
self.point_generators = [PointGenerator() for _ in self.point_strides]
self._init_layers()
def _init_layers(self):
self.relu = nn.ReLU(inplace=False)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
# stage 1
self.kp_det_1 = KpDetModule(
False,
self.cls_out_channels,
self.feat_channels,
self.point_feat_channels,
self.num_reppts,
self.num_keypts,
self.gradient_mul,
self.transform_method,
self.moment_mul)
# stage 2
self.kp_det_2 = KpDetModule(
True,
self.cls_out_channels,
self.feat_channels,
self.point_feat_channels,
self.num_reppts,
self.num_keypts,
self.gradient_mul,
self.transform_method,
self.moment_mul)
# stage 3
self.kp_det_3 = KpDetModule(
True,
self.cls_out_channels,
self.feat_channels,
self.point_feat_channels,
self.num_reppts,
self.num_keypts,
self.gradient_mul,
self.transform_method,
self.moment_mul)
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
def points2kpt(self, pts, y_first=True):
"""
Converting the points set into keypoints.
:param pts: the input points sets (fields), each points
set (fields) is represented as 2n scalar.
:param y_first: if y_fisrt=True, the point set is represented as
[y1, x1, y2, x2 ... yn, xn], otherwise the point set is
represented as [x1, y1, x2, y2 ... xn, yn].
:return: each points set is converting to keypoint list
[x1, y1, x2, y2 ... xk, yk].
"""
pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:])
pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1,
...]
pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0,
...]
pts = torch.cat([pts_x, pts_y], dim=2).view(*pts.shape)
return pts
def forward_single(self, x):
cls_feat = x
pts_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
pts_feat = reg_conv(pts_feat)
# stage 1
cls_score_map_1, box_offset_1, kpt_offset_1 = \
self.kp_det_1(cls_feat, pts_feat)
# stage 2
cls_score_map_2, box_offset_2, kpt_offset_2 = \
self.kp_det_2(cls_feat, pts_feat, kpt_offset_1)
# stage 3
cls_score_map_3, box_offset_3, kpt_offset_3 = \
self.kp_det_3(cls_feat, pts_feat, kpt_offset_2)
return (cls_score_map_1, cls_score_map_2, cls_score_map_3,
box_offset_1, box_offset_2, box_offset_3,
kpt_offset_1, kpt_offset_2, kpt_offset_3)
def forward(self, feats):
return multi_apply(self.forward_single, feats)
def get_points(self, featmap_sizes, img_metas):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
img_metas (list[dict]): Image meta info.
Returns:
tuple: points of each image, valid flags of each image
"""
num_imgs = len(img_metas)
num_levels = len(featmap_sizes)
# since feature map sizes of all images are the same, we only compute
# points center for one time
multi_level_points = []
for i in range(num_levels):
points = self.point_generators[i].grid_points(
featmap_sizes[i], self.point_strides[i])
multi_level_points.append(points)
points_list = [[point.clone() for point in multi_level_points]
for _ in range(num_imgs)]
# for each image, we compute valid flags of multi level grids
valid_flag_list = []
for img_id, img_meta in enumerate(img_metas):
multi_level_flags = []
for i in range(num_levels):
point_stride = self.point_strides[i]
feat_h, feat_w = featmap_sizes[i]
h, w, _ = img_meta['pad_shape']
valid_feat_h = min(int(np.ceil(h / point_stride)), feat_h)
valid_feat_w = min(int(np.ceil(w / point_stride)), feat_w)
flags = self.point_generators[i].valid_flags(
(feat_h, feat_w), (valid_feat_h, valid_feat_w))
multi_level_flags.append(flags)
valid_flag_list.append(multi_level_flags)
return points_list, valid_flag_list
def offset_to_pts(self, center_list, pred_list, y_first=True):
"""Change from point offset to point coordinate.
"""
num_points = pred_list[0].size(1) // 2
pts_list = []
for i_lvl in range(len(self.point_strides)):
pts_lvl = []
for i_img in range(len(center_list)):
pts_center = center_list[i_img][i_lvl][:, :2].repeat(
1, num_points)
pts_shift = pred_list[i_lvl][i_img]
if y_first:
yx_pts_shift = pts_shift.permute(1, 2, 0).view(
-1, 2 * num_points)
y_pts_shift = yx_pts_shift[..., 0::2]
x_pts_shift = yx_pts_shift[..., 1::2]
xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1)
xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1)
else:
xy_pts_shift = pts_shift.permute(1, 2, 0).view(
-1, 2 * num_points)
xy_pts_shift = xy_pts_shift.view(*xy_pts_shift.shape[:-1], -1)
pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center
pts_lvl.append(pts)
pts_lvl = torch.stack(pts_lvl, 0)
pts_list.append(pts_lvl)
return pts_list
def loss_single(self, cls_score_1, cls_score_2, cls_score_3,
kpt_pred_1, kpt_pred_2, kpt_pred_3,
bbox_pred_1, bbox_pred_2, bbox_pred_3,
labels, label_weights,
bbox_gt, bbox_weights,
kpt_gt, kpt_weights,
stride, num_total_samples):
# classification loss
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score_1 = cls_score_1.permute(0, 2, 3, 1).reshape(
-1, self.cls_out_channels)
cls_score_2 = cls_score_2.permute(0, 2, 3, 1).reshape(
-1, self.cls_out_channels)
cls_score_3 = cls_score_3.permute(0, 2, 3, 1).reshape(
-1, self.cls_out_channels)
loss_cls_1 = self.loss_cls_1(
cls_score_1,
labels,
label_weights,
avg_factor=num_total_samples)
loss_cls_2 = self.loss_cls_2(
cls_score_2,
labels,
label_weights,
avg_factor=num_total_samples)
loss_cls_3 = self.loss_cls_3(
cls_score_3,
labels,
label_weights,
avg_factor=num_total_samples)
# bbox loss
bbox_gt = bbox_gt.reshape(-1, 4)
bbox_weights = bbox_weights.reshape(-1, 4)
bbox_pred_1 = bbox_pred_1.reshape(-1, 4)
bbox_pred_2 = bbox_pred_2.reshape(-1, 4)
bbox_pred_3 = bbox_pred_3.reshape(-1, 4)
normalize_term = self.point_base_scale * stride
loss_bbox_1 = self.loss_bbox_1(
bbox_pred_1 / normalize_term,
bbox_gt / normalize_term,
bbox_weights,
avg_factor=num_total_samples)
loss_bbox_2 = self.loss_bbox_2(
bbox_pred_2 / normalize_term,
bbox_gt / normalize_term,
bbox_weights,
avg_factor=num_total_samples)
loss_bbox_3 = self.loss_bbox_3(
bbox_pred_3 / normalize_term,
bbox_gt / normalize_term,
bbox_weights,
avg_factor=num_total_samples)
# keypoint loss
kpt_gt = kpt_gt.reshape(-1, self.num_keypts * 2)
kpt_weights = kpt_weights.reshape(-1, self.num_keypts * 2)
kpt_pos_num = kpt_weights.sum(1)
kpt_weights[kpt_pos_num > 0] /= kpt_pos_num[
kpt_pos_num > 0].unsqueeze(1)
kpt_weights *= 4
kpt_pred_1 = kpt_pred_1.reshape(-1, self.num_keypts * 2)
kpt_pred_2 = kpt_pred_2.reshape(-1, self.num_keypts * 2)
kpt_pred_3 = kpt_pred_3.reshape(-1, self.num_keypts * 2)
normalize_term = self.point_base_scale * stride
loss_kpt_1 = self.loss_kpt_1(
kpt_pred_1 / normalize_term,
kpt_gt / normalize_term,
kpt_weights,
avg_factor=num_total_samples)
loss_kpt_2 = self.loss_kpt_2(
kpt_pred_2 / normalize_term,
kpt_gt / normalize_term,
kpt_weights,
avg_factor=num_total_samples)
loss_kpt_3 = self.loss_kpt_3(
kpt_pred_3 / normalize_term,
kpt_gt / normalize_term,
kpt_weights,
avg_factor=num_total_samples)
return (loss_cls_1, loss_cls_2, loss_cls_3,
loss_bbox_1, loss_bbox_2, loss_bbox_3,
loss_kpt_1, loss_kpt_2, loss_kpt_3)
def loss(self,
cls_scores_1,
cls_scores_2,
cls_scores_3,
box_offset_preds_1,
box_offset_preds_2,
box_offset_preds_3,
kpt_preds_1,
kpt_preds_2,
kpt_preds_3,
gt_bboxes,
gt_labels,
gt_keypoints,
img_metas,
cfg,
gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores_3]
assert len(featmap_sizes) == len(self.point_generators)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
center_list, valid_flag_list = self.get_points(featmap_sizes,
img_metas)
# prediction of the 1st stage
kpt_coord_preds_1 = self.offset_to_pts(center_list, kpt_preds_1)
box_coord_preds_1 = self.offset_to_pts(
center_list, box_offset_preds_1, y_first=False)
# target for the 2nd stage
kpt_coord_preds_2 = self.offset_to_pts(center_list, kpt_preds_2)
box_coord_preds_2 = self.offset_to_pts(
center_list, box_offset_preds_2, y_first=False)
# target for the 3rd stage
kpt_coord_preds_3 = self.offset_to_pts(center_list, kpt_preds_3)
box_coord_preds_3 = self.offset_to_pts(
center_list, box_offset_preds_3, y_first=False)
# target for all stages
if cfg.uniform.assigner['type'] == 'PointAssigner':
# Assign target for center list
candidate_list = center_list
else:
raise(NotImplementedError)
cls_reg_targets = point_target_kp(
candidate_list,
valid_flag_list,
gt_bboxes,
gt_keypoints,
img_metas,
cfg.uniform,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
sampling=self.sampling)
(labels_list, label_weights_list,
bbox_gt_list, candidate_list, bbox_weights_list,
keypoint_gt_list, keypoint_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
num_total_samples = (
num_total_pos +
num_total_neg if self.sampling else num_total_pos)
# compute loss
(losses_cls_1, losses_cls_2, losses_cls_3,
losses_bbox_1, losses_bbox_2, losses_bbox_3,
losses_kpt_1, losses_kpt_2, losses_kpt_3) = multi_apply(
self.loss_single,
cls_scores_1,
cls_scores_2,
cls_scores_3,
kpt_coord_preds_1,
kpt_coord_preds_2,
kpt_coord_preds_3,
box_coord_preds_1,
box_coord_preds_2,
box_coord_preds_3,
labels_list,
label_weights_list,
bbox_gt_list,
bbox_weights_list,
keypoint_gt_list,
keypoint_weights_list,
self.point_strides,
num_total_samples=num_total_samples)
loss_dict_all = {
'loss_cls_1': losses_cls_1,
'loss_cls_2': losses_cls_2,
'loss_cls_3': losses_cls_3,
'loss_bbox_1': losses_bbox_1,
'loss_bbox_2': losses_bbox_2,
'loss_bbox_3': losses_bbox_3,
'loss_kpt_1': losses_kpt_1,
'loss_kpt_2': losses_kpt_2,
'loss_kpt_3': losses_kpt_3
}
return loss_dict_all
def get_bboxes(self,
cls_scores_1,
cls_scores_2,
cls_scores_3,
box_offset_preds_1,
box_offset_preds_2,
box_offset_preds_3,
kpt_preds_1,
kpt_preds_2,
kpt_preds_3,
img_metas,
cfg,
rescale=False,
nms=True):
cls_score_final = cls_scores_3
keypts_preds_final = kpt_preds_3
bbox_preds_final = box_offset_preds_3
assert len(cls_score_final) == len(keypts_preds_final) \
== len(bbox_preds_final)
bbox_preds = bbox_preds_final
kpt_preds = [
self.points2kpt(keypts_pred)
for keypts_pred in keypts_preds_final
]
num_levels = len(cls_score_final)
mlvl_points = [
self.point_generators[i].grid_points(cls_score_final[i].size()[-2:],
self.point_strides[i])
for i in range(num_levels)
]
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_score_final[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach()
for i in range(num_levels)
]
kpt_pred_list = [
kpt_preds[i][img_id].detach()
for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = self.get_bboxes_single(cls_score_list, bbox_pred_list,
kpt_pred_list,
mlvl_points, img_shape,
scale_factor, cfg, rescale, nms)
result_list.append(proposals)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
kpt_preds,
mlvl_points,
img_shape,
scale_factor,
cfg,
rescale=False,
nms=True):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points) \
== len(kpt_preds)
mlvl_bboxes = []
mlvl_kpts = []
mlvl_scores = []
num_kpt = self.num_keypts
num_kp_channel = kpt_preds[0].size(0) // num_kpt
assert num_kp_channel == 2 or num_kp_channel == 3
for i_lvl, (cls_score, bbox_pred, kpt_pred, points) in enumerate(
zip(cls_scores, bbox_preds, kpt_preds, mlvl_points)):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:] \
== kpt_pred.size()[-2:]
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
if num_kp_channel == 3:
kpt_pred = kpt_pred.permute(1, 2, 0).reshape(
-1, num_kpt * num_kp_channel)
# if kpt visibility is not predicted, set it to 1
elif num_kp_channel == 2:
kpt_pred = kpt_pred.permute(1, 2, 0).reshape(
-1, num_kpt, num_kp_channel)
pad_ones = kpt_pred.new_full(kpt_pred[:, :, :1].size(), 1)
kpt_pred = torch.cat([kpt_pred, pad_ones], dim=2)
kpt_pred = kpt_pred.reshape(-1, num_kpt * 3)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
if self.use_sigmoid_cls:
max_scores, _ = scores.max(dim=1)
else:
max_scores, _ = scores[:, 1:].max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
kpt_pred = kpt_pred[topk_inds, :]
scores = scores[topk_inds, :]
bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1)
kpt_pos_center = points[:, :2].unsqueeze(dim=1)
bboxes = bbox_pred * self.point_strides[i_lvl] + bbox_pos_center
kpt_pred = kpt_pred.view(-1, num_kpt, 3)
kpt_pred[:, :, :2] = kpt_pred[:, :, :2] \
* self.point_strides[i_lvl] + kpt_pos_center
kpts = kpt_pred
x1 = bboxes[:, 0].clamp(min=0, max=img_shape[1])
y1 = bboxes[:, 1].clamp(min=0, max=img_shape[0])
x2 = bboxes[:, 2].clamp(min=0, max=img_shape[1])
y2 = bboxes[:, 3].clamp(min=0, max=img_shape[0])
bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
kpts[:, 0::3] = kpts[:, 0::3].clamp(min=0, max=img_shape[1])
kpts[:, 1::3] = kpts[:, 1::3].clamp(min=0, max=img_shape[0])
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_kpts.append(kpts)
mlvl_bboxes = torch.cat(mlvl_bboxes)
mlvl_kpts = torch.cat(mlvl_kpts)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_kpts[:, :, 0:2] = mlvl_kpts[:, :, 0:2] \
/ mlvl_kpts.new_tensor(scale_factor)
mlvl_kpts = mlvl_kpts.reshape(-1, num_kpt*3)
mlvl_scores = torch.cat(mlvl_scores)
# kpt mAP increses after multipling bbox score
if self.use_sigmoid_cls:
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
if nms:
det_bboxes, det_labels, det_kpts = multiclass_nms_kp(
mlvl_bboxes,
mlvl_scores,
mlvl_kpts,
cfg.score_thr,
cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels, det_kpts
else:
return mlvl_bboxes, mlvl_scores, mlvl_kpts
| [
"torch.nn.ReLU",
"mmdet.core.point_target_kp",
"numpy.sqrt",
"mmdet.core.PointGenerator",
"torch.exp",
"mmdet.ops.DeformConv",
"numpy.arange",
"numpy.repeat",
"torch.nn.ModuleList",
"numpy.stack",
"numpy.tile",
"numpy.ceil",
"mmdet.core.multi_apply",
"torch.std",
"torch.cat",
"mmdet.co... | [((982, 1022), 'numpy.repeat', 'np.repeat', (['dcn_base_3', 'self.dcn_kernel_3'], {}), '(dcn_base_3, self.dcn_kernel_3)\n', (991, 1022), True, 'import numpy as np\n'), ((1046, 1084), 'numpy.tile', 'np.tile', (['dcn_base_3', 'self.dcn_kernel_3'], {}), '(dcn_base_3, self.dcn_kernel_3)\n', (1053, 1084), True, 'import numpy as np\n'), ((1551, 1591), 'numpy.repeat', 'np.repeat', (['dcn_base_5', 'self.dcn_kernel_5'], {}), '(dcn_base_5, self.dcn_kernel_5)\n', (1560, 1591), True, 'import numpy as np\n'), ((1615, 1653), 'numpy.tile', 'np.tile', (['dcn_base_5', 'self.dcn_kernel_5'], {}), '(dcn_base_5, self.dcn_kernel_5)\n', (1622, 1653), True, 'import numpy as np\n'), ((2107, 2147), 'numpy.repeat', 'np.repeat', (['dcn_base_7', 'self.dcn_kernel_7'], {}), '(dcn_base_7, self.dcn_kernel_7)\n', (2116, 2147), True, 'import numpy as np\n'), ((2171, 2209), 'numpy.tile', 'np.tile', (['dcn_base_7', 'self.dcn_kernel_7'], {}), '(dcn_base_7, self.dcn_kernel_7)\n', (2178, 2209), True, 'import numpy as np\n'), ((2462, 2538), 'mmdet.ops.DeformConv', 'DeformConv', (['in_channels', 'feat_channels', 'self.dcn_kernel_3', '(1)', 'self.dcn_pad_3'], {}), '(in_channels, feat_channels, self.dcn_kernel_3, 1, self.dcn_pad_3)\n', (2472, 2538), False, 'from mmdet.ops import DeformConv\n'), ((2636, 2712), 'mmdet.ops.DeformConv', 'DeformConv', (['in_channels', 'feat_channels', 'self.dcn_kernel_5', '(1)', 'self.dcn_pad_5'], {}), '(in_channels, feat_channels, self.dcn_kernel_5, 1, self.dcn_pad_5)\n', (2646, 2712), False, 'from mmdet.ops import DeformConv\n'), ((2810, 2886), 'mmdet.ops.DeformConv', 'DeformConv', (['in_channels', 'feat_channels', 'self.dcn_kernel_7', '(1)', 'self.dcn_pad_7'], {}), '(in_channels, feat_channels, self.dcn_kernel_7, 1, self.dcn_pad_7)\n', (2820, 2886), False, 'from mmdet.ops import DeformConv\n'), ((2997, 3034), 'mmcv.cnn.normal_init', 'normal_init', (['self.dfmconv_3'], {'std': '(0.01)'}), '(self.dfmconv_3, std=0.01)\n', (3008, 3034), False, 'from mmcv.cnn import normal_init\n'), ((3043, 3080), 'mmcv.cnn.normal_init', 'normal_init', (['self.dfmconv_5'], {'std': '(0.01)'}), '(self.dfmconv_5, std=0.01)\n', (3054, 3080), False, 'from mmcv.cnn import normal_init\n'), ((3089, 3126), 'mmcv.cnn.normal_init', 'normal_init', (['self.dfmconv_7'], {'std': '(0.01)'}), '(self.dfmconv_7, std=0.01)\n', (3100, 3126), False, 'from mmcv.cnn import normal_init\n'), ((4425, 4491), 'torch.cat', 'torch.cat', (['[dfmconv_feat_3, dfmconv_feat_5, dfmconv_feat_7]'], {'dim': '(1)'}), '([dfmconv_feat_3, dfmconv_feat_5, dfmconv_feat_7], dim=1)\n', (4434, 4491), False, 'import torch\n'), ((5245, 5267), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (5252, 5267), True, 'import torch.nn as nn\n'), ((6994, 7044), 'mmcv.cnn.normal_init', 'normal_init', (['self.cls_out'], {'std': '(0.01)', 'bias': 'bias_cls'}), '(self.cls_out, std=0.01, bias=bias_cls)\n', (7005, 7044), False, 'from mmcv.cnn import normal_init\n'), ((7053, 7095), 'mmcv.cnn.normal_init', 'normal_init', (['self.bbox_param_out'], {'std': '(0.01)'}), '(self.bbox_param_out, std=0.01)\n', (7064, 7095), False, 'from mmcv.cnn import normal_init\n'), ((7104, 7139), 'mmcv.cnn.normal_init', 'normal_init', (['self.kpt_out'], {'std': '(0.01)'}), '(self.kpt_out, std=0.01)\n', (7115, 7139), False, 'from mmcv.cnn import normal_init\n'), ((17361, 17383), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (17368, 17383), True, 'import torch.nn as nn\n'), ((17409, 17424), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (17422, 17424), True, 'import torch.nn as nn\n'), ((17450, 17465), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (17463, 17465), True, 'import torch.nn as nn\n'), ((21215, 21254), 'mmdet.core.multi_apply', 'multi_apply', (['self.forward_single', 'feats'], {}), '(self.forward_single, feats)\n', (21226, 21254), False, 'from mmdet.core import PointGenerator, multi_apply, multiclass_nms_kp, point_target_kp\n'), ((29375, 29606), 'mmdet.core.point_target_kp', 'point_target_kp', (['candidate_list', 'valid_flag_list', 'gt_bboxes', 'gt_keypoints', 'img_metas', 'cfg.uniform'], {'gt_bboxes_ignore_list': 'gt_bboxes_ignore', 'gt_labels_list': 'gt_labels', 'label_channels': 'label_channels', 'sampling': 'self.sampling'}), '(candidate_list, valid_flag_list, gt_bboxes, gt_keypoints,\n img_metas, cfg.uniform, gt_bboxes_ignore_list=gt_bboxes_ignore,\n gt_labels_list=gt_labels, label_channels=label_channels, sampling=self.\n sampling)\n', (29390, 29606), False, 'from mmdet.core import PointGenerator, multi_apply, multiclass_nms_kp, point_target_kp\n'), ((30225, 30595), 'mmdet.core.multi_apply', 'multi_apply', (['self.loss_single', 'cls_scores_1', 'cls_scores_2', 'cls_scores_3', 'kpt_coord_preds_1', 'kpt_coord_preds_2', 'kpt_coord_preds_3', 'box_coord_preds_1', 'box_coord_preds_2', 'box_coord_preds_3', 'labels_list', 'label_weights_list', 'bbox_gt_list', 'bbox_weights_list', 'keypoint_gt_list', 'keypoint_weights_list', 'self.point_strides'], {'num_total_samples': 'num_total_samples'}), '(self.loss_single, cls_scores_1, cls_scores_2, cls_scores_3,\n kpt_coord_preds_1, kpt_coord_preds_2, kpt_coord_preds_3,\n box_coord_preds_1, box_coord_preds_2, box_coord_preds_3, labels_list,\n label_weights_list, bbox_gt_list, bbox_weights_list, keypoint_gt_list,\n keypoint_weights_list, self.point_strides, num_total_samples=\n num_total_samples)\n', (30236, 30595), False, 'from mmdet.core import PointGenerator, multi_apply, multiclass_nms_kp, point_target_kp\n'), ((36740, 36762), 'torch.cat', 'torch.cat', (['mlvl_bboxes'], {}), '(mlvl_bboxes)\n', (36749, 36762), False, 'import torch\n'), ((36783, 36803), 'torch.cat', 'torch.cat', (['mlvl_kpts'], {}), '(mlvl_kpts)\n', (36792, 36803), False, 'import torch\n'), ((37078, 37100), 'torch.cat', 'torch.cat', (['mlvl_scores'], {}), '(mlvl_scores)\n', (37087, 37100), False, 'import torch\n'), ((771, 781), 'numpy.sqrt', 'np.sqrt', (['(9)'], {}), '(9)\n', (778, 781), True, 'import numpy as np\n'), ((1339, 1350), 'numpy.sqrt', 'np.sqrt', (['(25)'], {}), '(25)\n', (1346, 1350), True, 'import numpy as np\n'), ((1895, 1906), 'numpy.sqrt', 'np.sqrt', (['(49)'], {}), '(49)\n', (1902, 1906), True, 'import numpy as np\n'), ((5361, 5414), 'torch.nn.Conv2d', 'nn.Conv2d', (['keypts_out_dim', 'deform_offset_dim', '(1)', '(1)', '(0)'], {}), '(keypts_out_dim, deform_offset_dim, 1, 1, 0)\n', (5370, 5414), True, 'import torch.nn as nn\n'), ((5579, 5634), 'torch.nn.Conv2d', 'nn.Conv2d', (['(feat_channels * 3)', 'cls_out_channels', '(1)', '(1)', '(0)'], {}), '(feat_channels * 3, cls_out_channels, 1, 1, 0)\n', (5588, 5634), True, 'import torch.nn as nn\n'), ((5811, 5851), 'torch.nn.Conv2d', 'nn.Conv2d', (['(feat_channels * 3)', '(4)', '(1)', '(1)', '(0)'], {}), '(feat_channels * 3, 4, 1, 1, 0)\n', (5820, 5851), True, 'import torch.nn as nn\n'), ((6014, 6067), 'torch.nn.Conv2d', 'nn.Conv2d', (['(feat_channels * 3)', 'keypts_out_dim', '(1)', '(1)', '(0)'], {}), '(feat_channels * 3, keypts_out_dim, 1, 1, 0)\n', (6023, 6067), True, 'import torch.nn as nn\n'), ((6126, 6172), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'feat_channels', '(3)', '(1)', '(1)'], {}), '(in_channels, feat_channels, 3, 1, 1)\n', (6135, 6172), True, 'import torch.nn as nn\n'), ((6217, 6268), 'torch.nn.Conv2d', 'nn.Conv2d', (['feat_channels', 'cls_out_channels', '(1)', '(1)', '(0)'], {}), '(feat_channels, cls_out_channels, 1, 1, 0)\n', (6226, 6268), True, 'import torch.nn as nn\n'), ((6322, 6368), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'feat_channels', '(3)', '(1)', '(1)'], {}), '(in_channels, feat_channels, 3, 1, 1)\n', (6331, 6368), True, 'import torch.nn as nn\n'), ((6420, 6456), 'torch.nn.Conv2d', 'nn.Conv2d', (['feat_channels', '(4)', '(1)', '(1)', '(0)'], {}), '(feat_channels, 4, 1, 1, 0)\n', (6429, 6456), True, 'import torch.nn as nn\n'), ((6503, 6549), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'feat_channels', '(3)', '(1)', '(1)'], {}), '(in_channels, feat_channels, 3, 1, 1)\n', (6512, 6549), True, 'import torch.nn as nn\n'), ((6594, 6643), 'torch.nn.Conv2d', 'nn.Conv2d', (['feat_channels', 'keypts_out_dim', '(1)', '(1)', '(0)'], {}), '(feat_channels, keypts_out_dim, 1, 1, 0)\n', (6603, 6643), True, 'import torch.nn as nn\n'), ((6772, 6817), 'mmcv.cnn.normal_init', 'normal_init', (['self.deform_offset_out'], {'std': '(0.01)'}), '(self.deform_offset_out, std=0.01)\n', (6783, 6817), False, 'from mmcv.cnn import normal_init\n'), ((6844, 6880), 'mmcv.cnn.normal_init', 'normal_init', (['self.cls_conv'], {'std': '(0.01)'}), '(self.cls_conv, std=0.01)\n', (6855, 6880), False, 'from mmcv.cnn import normal_init\n'), ((6893, 6936), 'mmcv.cnn.normal_init', 'normal_init', (['self.bbox_param_conv'], {'std': '(0.01)'}), '(self.bbox_param_conv, std=0.01)\n', (6904, 6936), False, 'from mmcv.cnn import normal_init\n'), ((6949, 6985), 'mmcv.cnn.normal_init', 'normal_init', (['self.kpt_conv'], {'std': '(0.01)'}), '(self.kpt_conv, std=0.01)\n', (6960, 6985), False, 'from mmcv.cnn import normal_init\n'), ((9830, 9893), 'torch.cat', 'torch.cat', (['[bbox_left, bbox_up, bbox_right, bbox_bottom]'], {'dim': '(1)'}), '([bbox_left, bbox_up, bbox_right, bbox_bottom], dim=1)\n', (9839, 9893), False, 'import torch\n'), ((17237, 17253), 'mmdet.core.PointGenerator', 'PointGenerator', ([], {}), '()\n', (17251, 17253), False, 'from mmdet.core import PointGenerator, multi_apply, multiclass_nms_kp, point_target_kp\n'), ((19249, 19278), 'mmcv.cnn.normal_init', 'normal_init', (['m.conv'], {'std': '(0.01)'}), '(m.conv, std=0.01)\n', (19260, 19278), False, 'from mmcv.cnn import normal_init\n'), ((19324, 19353), 'mmcv.cnn.normal_init', 'normal_init', (['m.conv'], {'std': '(0.01)'}), '(m.conv, std=0.01)\n', (19335, 19353), False, 'from mmcv.cnn import normal_init\n'), ((24206, 24229), 'torch.stack', 'torch.stack', (['pts_lvl', '(0)'], {}), '(pts_lvl, 0)\n', (24217, 24229), False, 'import torch\n'), ((35774, 35822), 'torch.cat', 'torch.cat', (['[points[:, :2], points[:, :2]]'], {'dim': '(1)'}), '([points[:, :2], points[:, :2]], dim=1)\n', (35783, 35822), False, 'import torch\n'), ((36421, 36458), 'torch.stack', 'torch.stack', (['[x1, y1, x2, y2]'], {'dim': '(-1)'}), '([x1, y1, x2, y2], dim=-1)\n', (36432, 36458), False, 'import torch\n'), ((37284, 37324), 'torch.cat', 'torch.cat', (['[padding, mlvl_scores]'], {'dim': '(1)'}), '([padding, mlvl_scores], dim=1)\n', (37293, 37324), False, 'import torch\n'), ((37388, 37488), 'mmdet.core.multiclass_nms_kp', 'multiclass_nms_kp', (['mlvl_bboxes', 'mlvl_scores', 'mlvl_kpts', 'cfg.score_thr', 'cfg.nms', 'cfg.max_per_img'], {}), '(mlvl_bboxes, mlvl_scores, mlvl_kpts, cfg.score_thr, cfg.\n nms, cfg.max_per_img)\n', (37405, 37488), False, 'from mmdet.core import PointGenerator, multi_apply, multiclass_nms_kp, point_target_kp\n'), ((862, 908), 'numpy.arange', 'np.arange', (['(-self.dcn_pad_3)', '(self.dcn_pad_3 + 1)'], {}), '(-self.dcn_pad_3, self.dcn_pad_3 + 1)\n', (871, 908), True, 'import numpy as np\n'), ((1113, 1159), 'numpy.stack', 'np.stack', (['[dcn_base_y_3, dcn_base_x_3]'], {'axis': '(1)'}), '([dcn_base_y_3, dcn_base_x_3], axis=1)\n', (1121, 1159), True, 'import numpy as np\n'), ((1220, 1251), 'torch.tensor', 'torch.tensor', (['dcn_base_offset_3'], {}), '(dcn_base_offset_3)\n', (1232, 1251), False, 'import torch\n'), ((1431, 1477), 'numpy.arange', 'np.arange', (['(-self.dcn_pad_5)', '(self.dcn_pad_5 + 1)'], {}), '(-self.dcn_pad_5, self.dcn_pad_5 + 1)\n', (1440, 1477), True, 'import numpy as np\n'), ((1682, 1728), 'numpy.stack', 'np.stack', (['[dcn_base_y_5, dcn_base_x_5]'], {'axis': '(1)'}), '([dcn_base_y_5, dcn_base_x_5], axis=1)\n', (1690, 1728), True, 'import numpy as np\n'), ((1789, 1820), 'torch.tensor', 'torch.tensor', (['dcn_base_offset_5'], {}), '(dcn_base_offset_5)\n', (1801, 1820), False, 'import torch\n'), ((1987, 2033), 'numpy.arange', 'np.arange', (['(-self.dcn_pad_7)', '(self.dcn_pad_7 + 1)'], {}), '(-self.dcn_pad_7, self.dcn_pad_7 + 1)\n', (1996, 2033), True, 'import numpy as np\n'), ((2238, 2284), 'numpy.stack', 'np.stack', (['[dcn_base_y_7, dcn_base_x_7]'], {'axis': '(1)'}), '([dcn_base_y_7, dcn_base_x_7], axis=1)\n', (2246, 2284), True, 'import numpy as np\n'), ((2345, 2376), 'torch.tensor', 'torch.tensor', (['dcn_base_offset_7'], {}), '(dcn_base_offset_7)\n', (2357, 2376), False, 'import torch\n'), ((10109, 10159), 'torch.std', 'torch.std', (['(pts_y - pts_y_mean)'], {'dim': '(1)', 'keepdim': '(True)'}), '(pts_y - pts_y_mean, dim=1, keepdim=True)\n', (10118, 10159), False, 'import torch\n'), ((10184, 10234), 'torch.std', 'torch.std', (['(pts_x - pts_x_mean)'], {'dim': '(1)', 'keepdim': '(True)'}), '(pts_x - pts_x_mean, dim=1, keepdim=True)\n', (10193, 10234), False, 'import torch\n'), ((10653, 10777), 'torch.cat', 'torch.cat', (['[pts_x_mean - half_width, pts_y_mean - half_height, pts_x_mean + half_width,\n pts_y_mean + half_height]'], {'dim': '(1)'}), '([pts_x_mean - half_width, pts_y_mean - half_height, pts_x_mean +\n half_width, pts_y_mean + half_height], dim=1)\n', (10662, 10777), False, 'import torch\n'), ((20284, 20316), 'torch.cat', 'torch.cat', (['[pts_x, pts_y]'], {'dim': '(2)'}), '([pts_x, pts_y], dim=2)\n', (20293, 20316), False, 'import torch\n'), ((10529, 10561), 'torch.exp', 'torch.exp', (['moment_width_transfer'], {}), '(moment_width_transfer)\n', (10538, 10561), False, 'import torch\n'), ((10600, 10633), 'torch.exp', 'torch.exp', (['moment_height_transfer'], {}), '(moment_height_transfer)\n', (10609, 10633), False, 'import torch\n'), ((11814, 11951), 'torch.cat', 'torch.cat', (['[bbox_center_x - half_width, bbox_center_y - half_height, bbox_center_x +\n half_width, bbox_center_y + half_height]'], {'dim': '(1)'}), '([bbox_center_x - half_width, bbox_center_y - half_height, \n bbox_center_x + half_width, bbox_center_y + half_height], dim=1)\n', (11823, 11951), False, 'import torch\n'), ((16974, 16988), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (16985, 16988), False, 'import torch\n'), ((23728, 23771), 'torch.stack', 'torch.stack', (['[x_pts_shift, y_pts_shift]', '(-1)'], {}), '([x_pts_shift, y_pts_shift], -1)\n', (23739, 23771), False, 'import torch\n'), ((35113, 35151), 'torch.cat', 'torch.cat', (['[kpt_pred, pad_ones]'], {'dim': '(2)'}), '([kpt_pred, pad_ones], dim=2)\n', (35122, 35151), False, 'import torch\n'), ((11483, 11521), 'torch.exp', 'torch.exp', (['tranfer_param[:, 0:1, :, :]'], {}), '(tranfer_param[:, 0:1, :, :])\n', (11492, 11521), False, 'import torch\n'), ((11594, 11632), 'torch.exp', 'torch.exp', (['tranfer_param[:, 1:2, :, :]'], {}), '(tranfer_param[:, 1:2, :, :])\n', (11603, 11632), False, 'import torch\n'), ((12259, 12309), 'torch.std', 'torch.std', (['(pts_y - pts_y_mean)'], {'dim': '(1)', 'keepdim': '(True)'}), '(pts_y - pts_y_mean, dim=1, keepdim=True)\n', (12268, 12309), False, 'import torch\n'), ((12334, 12384), 'torch.std', 'torch.std', (['(pts_x - pts_x_mean)'], {'dim': '(1)', 'keepdim': '(True)'}), '(pts_x - pts_x_mean, dim=1, keepdim=True)\n', (12343, 12384), False, 'import torch\n'), ((12822, 12946), 'torch.cat', 'torch.cat', (['[pts_x_mean - half_width, pts_y_mean - half_height, pts_x_mean + half_width,\n pts_y_mean + half_height]'], {'dim': '(1)'}), '([pts_x_mean - half_width, pts_y_mean - half_height, pts_x_mean +\n half_width, pts_y_mean + half_height], dim=1)\n', (12831, 12946), False, 'import torch\n'), ((22558, 22583), 'numpy.ceil', 'np.ceil', (['(h / point_stride)'], {}), '(h / point_stride)\n', (22565, 22583), True, 'import numpy as np\n'), ((22633, 22658), 'numpy.ceil', 'np.ceil', (['(w / point_stride)'], {}), '(w / point_stride)\n', (22640, 22658), True, 'import numpy as np\n'), ((12687, 12725), 'torch.exp', 'torch.exp', (['tranfer_param[:, 2:3, :, :]'], {}), '(tranfer_param[:, 2:3, :, :])\n', (12696, 12725), False, 'import torch\n'), ((12764, 12802), 'torch.exp', 'torch.exp', (['tranfer_param[:, 3:4, :, :]'], {}), '(tranfer_param[:, 3:4, :, :])\n', (12773, 12802), False, 'import torch\n')] |
import logging as log
import os, sys, time
import tensorflow as tf
import numpy as np
import reader
flags = tf.app.flags
flags.DEFINE_float('learning_rate', 0.1, 'Initial learning rate.')
flags.DEFINE_integer('max_epochs', 10, 'Maximum number of epochs.')
flags.DEFINE_integer('hidden_dim', 128, 'RNN hidden state size.')
flags.DEFINE_integer('max_time_steps', 20, 'Truncated backprop length.')
flags.DEFINE_integer('vocab_size', 30000, 'Vocabulary size.')
flags.DEFINE_string('vocab_data', 'vocab.pkl', 'Vocabulary file.')
flags.DEFINE_string('train_data', 'train.shuf.txt', 'Training data.')
flags.DEFINE_string('dev_data', 'dev.txt', 'Validation data.')
flags.DEFINE_string('checkpoint_prefix',
'/nfs/topaz/lcheung/models/tf-test/model',
'Prefix of checkpoint files.')
flags.DEFINE_string('run_name',
'dyn_rnn',
'Run name in tensorboard.')
flags.DEFINE_string('output_mode', 'debug', 'verbose | debug | info')
flags.DEFINE_string('tf_log_dir', '/nfs/topaz/lcheung/tensorboard',
'Path to store tensorboard log files.')
FLAGS = flags.FLAGS
log.basicConfig(stream=sys.stderr, level=log.INFO)
def convert_id_tok(samples, id_tok):
s_raw = [ id_tok[sample] for sample in samples ]
return ' '.join(s_raw)
class RNN(object):
def __init__(self):
'''
init with hyperparameters here
'''
with tf.variable_scope('Input'):
# inputs are a sequence of token ids, target is one time step forward
self.data_input = tf.placeholder(tf.int32, [ FLAGS.max_time_steps ], 'x')
self.data_target = tf.placeholder(tf.int32, [ FLAGS.max_time_steps ], 'y')
with tf.variable_scope('Embedding'):
# map from one-hot encoding to hidden vector
self.embedding = tf.get_variable(
'W_xm', [ FLAGS.vocab_size, FLAGS.hidden_dim ], dtype=tf.float32)
self.embedded_input = tf.nn.embedding_lookup(
self.embedding, self.data_input, name='x_m')
with tf.variable_scope('RNN'):
# need to keep the embedded input, then feed those in as inputs
# into the tf nn dynamic rnn cell
self.initial_hidden_state = tf.get_variable(
'h_init', [ 1, FLAGS.hidden_dim ], dtype=tf.float32, trainable=False,
initializer=tf.zeros_initializer())
self.input_entry = tf.get_variable(
'W_mh', [ FLAGS.hidden_dim, FLAGS.hidden_dim ], dtype=tf.float32)
self.recurrence = tf.get_variable(
'W_hh', [ FLAGS.hidden_dim, FLAGS.hidden_dim ], dtype=tf.float32)
self.recurrence_bias = tf.get_variable(
'b_h', [ 1, FLAGS.hidden_dim ], dtype=tf.float32)
self.build_inference()
self.build_loss()
self.build_optimizer()
def build_recurrence(self, h_prev, x_m):
# expand_dims used to convert 1d array to 1d vector
return tf.tanh(tf.matmul(tf.expand_dims(x_m, 0), self.input_entry)
+ tf.matmul(h_prev, self.recurrence)
+ self.recurrence_bias)
def build_inference(self):
with tf.variable_scope('Inference'):
self.hidden_output = tf.scan(self.build_recurrence, self.embedded_input,
initializer=self.initial_hidden_state)
self.output_exit = tf.get_variable(
'W_hx', [ FLAGS.hidden_dim, FLAGS.vocab_size ], dtype=tf.float32)
self.output_exit_bias = tf.get_variable(
'b_x', [ 1, FLAGS.vocab_size ], dtype=tf.float32)
self.outputs_squashed = tf.reshape(self.hidden_output, [-1, FLAGS.hidden_dim])
self.logits = tf.matmul(self.outputs_squashed, self.output_exit) \
+ self.output_exit_bias
self.token_probs = tf.nn.softmax(self.logits, name='p_ts')
self.predicted_tokens = tf.argmax(self.token_probs, axis=1,
name='predicteds')
def build_loss(self):
with tf.variable_scope('Loss'):
# self.predicted_tokens = tf.argmax(self.token_probs, axis=1)
# TODO decoding?
# https://www.tensorflow.org/api_guides/python/nn#Classification
# NOTE: can look here to see another functions
self.losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
name='losses',
labels=self.data_target,
logits=self.logits)
self.loss = tf.reduce_mean(self.losses, name='loss')
tf.summary.scalar("loss_smy", self.loss)
log.debug('Loss shape; %s' % self.loss.shape)
def build_optimizer(self):
'''
optimizer using the loss function
'''
with tf.variable_scope('Optimizer'):
#tf.train.optimizer.minimize(self.loss, name='optimizer')
self.optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
self.minimizer = self.optimizer.minimize(self.loss, name='minimizer')
tf.summary.scalar("learning_rate", self.optimizer._learning_rate)
def _load_or_create(self, sess):
ckpt = tf.train.get_checkpoint_state(os.path.dirname(FLAGS.checkpoint_prefix))
self.saver = tf.train.Saver()
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(sess, ckpt.model_checkpoint_path)
log.debug('Model restored from %s.' % ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
self.saver.save(sess, FLAGS.checkpoint_prefix, global_step=0)
log.debug('Initialized new model.')
def train(self, data, id_tok):
verbose = FLAGS.output_mode == 'verbose'
with tf.Session(config=tf.ConfigProto(allow_soft_placement=verbose,
log_device_placement=verbose,
gpu_options=tf.GPUOptions(allow_growth=True))) as sess:
self._load_or_create(sess)
file_writer = tf.summary.FileWriter(
os.path.join(FLAGS.tf_log_dir, FLAGS.run_name),
sess.graph)
summaries = tf.summary.merge_all()
window = FLAGS.max_time_steps
log.info('Starting training...')
log.debug('Training %s' % tf.trainable_variables())
cum_loss = 0.0
for i in range(0, len(data) - window + 1):
source = data[i : i + window]
target = data[i + 1 : i + window + 1]
if len(source) < window:
source = np.pad(source, (0, window - len(source)), (0, reader.PAD_ID))
if len(target) < window:
target = np.pad(target, (0, window - len(target)), (0, reader.PAD_ID))
_, loss, summary_output, out = sess.run(
[ self.minimizer, self.loss, summaries,
self.predicted_tokens ],
feed_dict={
self.data_input : source,
self.data_target: target
})
cum_loss = loss + cum_loss
if i % 5000 == 0:
log.debug('Loss %s\n\ttarget: %s\n\tpredicted: %s'
% (cum_loss,
convert_id_tok(target, id_tok),
convert_id_tok(out, id_tok)))
cum_loss = 0
log.debug('Saved model checkpoint to %s.' % FLAGS.checkpoint_prefix)
self.saver.save(sess, FLAGS.checkpoint_prefix, global_step=i)
file_writer.add_summary(summary_output, global_step=i)
def get_data():
train_data, tok_id, id_tok = reader.prepare_data(FLAGS.train_data,
FLAGS.vocab_data, FLAGS.vocab_size)
dev_data, _, _ = reader.prepare_data(FLAGS.dev_data,
FLAGS.vocab_data, FLAGS.vocab_size)
log.debug('Train data: %s' % train_data[:2])
log.debug('Dev data: %s' % dev_data[:2])
return train_data, dev_data, tok_id, id_tok
def resize_data(data):
'''
Squash all sentences together.
'''
squashed = np.concatenate(data)
return squashed
def main(_):
train_data, dev_data, tok_id, id_tok = get_data()
train_data = resize_data(train_data)
model = RNN()
model.train(train_data, id_tok)
if __name__ == '__main__':
if FLAGS.output_mode == 'debug' or FLAGS.output_mode == 'verbose':
log.getLogger().setLevel(log.DEBUG)
elif FLAGS.output_mode == 'info':
log.getLogger().setLevel(log.INFO)
tf.app.run()
| [
"logging.getLogger",
"logging.debug",
"tensorflow.get_variable",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.nn.softmax",
"tensorflow.zeros_initializer",
"tensorflow.scan",
"tensorflow.reduce_mean",
"logging.info",
"tensorflow.GPUOptions",
"tensorflow.app.run",
"tenso... | [((1061, 1111), 'logging.basicConfig', 'log.basicConfig', ([], {'stream': 'sys.stderr', 'level': 'log.INFO'}), '(stream=sys.stderr, level=log.INFO)\n', (1076, 1111), True, 'import logging as log\n'), ((7011, 7084), 'reader.prepare_data', 'reader.prepare_data', (['FLAGS.train_data', 'FLAGS.vocab_data', 'FLAGS.vocab_size'], {}), '(FLAGS.train_data, FLAGS.vocab_data, FLAGS.vocab_size)\n', (7030, 7084), False, 'import reader\n'), ((7110, 7181), 'reader.prepare_data', 'reader.prepare_data', (['FLAGS.dev_data', 'FLAGS.vocab_data', 'FLAGS.vocab_size'], {}), '(FLAGS.dev_data, FLAGS.vocab_data, FLAGS.vocab_size)\n', (7129, 7181), False, 'import reader\n'), ((7191, 7235), 'logging.debug', 'log.debug', (["('Train data: %s' % train_data[:2])"], {}), "('Train data: %s' % train_data[:2])\n", (7200, 7235), True, 'import logging as log\n'), ((7238, 7278), 'logging.debug', 'log.debug', (["('Dev data: %s' % dev_data[:2])"], {}), "('Dev data: %s' % dev_data[:2])\n", (7247, 7278), True, 'import logging as log\n'), ((7408, 7428), 'numpy.concatenate', 'np.concatenate', (['data'], {}), '(data)\n', (7422, 7428), True, 'import numpy as np\n'), ((7818, 7830), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (7828, 7830), True, 'import tensorflow as tf\n'), ((4856, 4872), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4870, 4872), True, 'import tensorflow as tf\n'), ((1328, 1354), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Input"""'], {}), "('Input')\n", (1345, 1354), True, 'import tensorflow as tf\n'), ((1457, 1510), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[FLAGS.max_time_steps]', '"""x"""'], {}), "(tf.int32, [FLAGS.max_time_steps], 'x')\n", (1471, 1510), True, 'import tensorflow as tf\n'), ((1538, 1591), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[FLAGS.max_time_steps]', '"""y"""'], {}), "(tf.int32, [FLAGS.max_time_steps], 'y')\n", (1552, 1591), True, 'import tensorflow as tf\n'), ((1603, 1633), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Embedding"""'], {}), "('Embedding')\n", (1620, 1633), True, 'import tensorflow as tf\n'), ((1710, 1789), 'tensorflow.get_variable', 'tf.get_variable', (['"""W_xm"""', '[FLAGS.vocab_size, FLAGS.hidden_dim]'], {'dtype': 'tf.float32'}), "('W_xm', [FLAGS.vocab_size, FLAGS.hidden_dim], dtype=tf.float32)\n", (1725, 1789), True, 'import tensorflow as tf\n'), ((1831, 1898), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.embedding', 'self.data_input'], {'name': '"""x_m"""'}), "(self.embedding, self.data_input, name='x_m')\n", (1853, 1898), True, 'import tensorflow as tf\n'), ((1920, 1944), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""RNN"""'], {}), "('RNN')\n", (1937, 1944), True, 'import tensorflow as tf\n'), ((2258, 2337), 'tensorflow.get_variable', 'tf.get_variable', (['"""W_mh"""', '[FLAGS.hidden_dim, FLAGS.hidden_dim]'], {'dtype': 'tf.float32'}), "('W_mh', [FLAGS.hidden_dim, FLAGS.hidden_dim], dtype=tf.float32)\n", (2273, 2337), True, 'import tensorflow as tf\n'), ((2375, 2454), 'tensorflow.get_variable', 'tf.get_variable', (['"""W_hh"""', '[FLAGS.hidden_dim, FLAGS.hidden_dim]'], {'dtype': 'tf.float32'}), "('W_hh', [FLAGS.hidden_dim, FLAGS.hidden_dim], dtype=tf.float32)\n", (2390, 2454), True, 'import tensorflow as tf\n'), ((2497, 2560), 'tensorflow.get_variable', 'tf.get_variable', (['"""b_h"""', '[1, FLAGS.hidden_dim]'], {'dtype': 'tf.float32'}), "('b_h', [1, FLAGS.hidden_dim], dtype=tf.float32)\n", (2512, 2560), True, 'import tensorflow as tf\n'), ((2957, 2987), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Inference"""'], {}), "('Inference')\n", (2974, 2987), True, 'import tensorflow as tf\n'), ((3016, 3111), 'tensorflow.scan', 'tf.scan', (['self.build_recurrence', 'self.embedded_input'], {'initializer': 'self.initial_hidden_state'}), '(self.build_recurrence, self.embedded_input, initializer=self.\n initial_hidden_state)\n', (3023, 3111), True, 'import tensorflow as tf\n'), ((3144, 3223), 'tensorflow.get_variable', 'tf.get_variable', (['"""W_hx"""', '[FLAGS.hidden_dim, FLAGS.vocab_size]'], {'dtype': 'tf.float32'}), "('W_hx', [FLAGS.hidden_dim, FLAGS.vocab_size], dtype=tf.float32)\n", (3159, 3223), True, 'import tensorflow as tf\n'), ((3267, 3330), 'tensorflow.get_variable', 'tf.get_variable', (['"""b_x"""', '[1, FLAGS.vocab_size]'], {'dtype': 'tf.float32'}), "('b_x', [1, FLAGS.vocab_size], dtype=tf.float32)\n", (3282, 3330), True, 'import tensorflow as tf\n'), ((3379, 3433), 'tensorflow.reshape', 'tf.reshape', (['self.hidden_output', '[-1, FLAGS.hidden_dim]'], {}), '(self.hidden_output, [-1, FLAGS.hidden_dim])\n', (3389, 3433), True, 'import tensorflow as tf\n'), ((3575, 3614), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.logits'], {'name': '"""p_ts"""'}), "(self.logits, name='p_ts')\n", (3588, 3614), True, 'import tensorflow as tf\n'), ((3646, 3700), 'tensorflow.argmax', 'tf.argmax', (['self.token_probs'], {'axis': '(1)', 'name': '"""predicteds"""'}), "(self.token_probs, axis=1, name='predicteds')\n", (3655, 3700), True, 'import tensorflow as tf\n'), ((3745, 3770), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Loss"""'], {}), "('Loss')\n", (3762, 3770), True, 'import tensorflow as tf\n'), ((4008, 4119), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'name': '"""losses"""', 'labels': 'self.data_target', 'logits': 'self.logits'}), "(name='losses', labels=self.\n data_target, logits=self.logits)\n", (4054, 4119), True, 'import tensorflow as tf\n'), ((4164, 4204), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.losses'], {'name': '"""loss"""'}), "(self.losses, name='loss')\n", (4178, 4204), True, 'import tensorflow as tf\n'), ((4211, 4251), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss_smy"""', 'self.loss'], {}), "('loss_smy', self.loss)\n", (4228, 4251), True, 'import tensorflow as tf\n'), ((4258, 4303), 'logging.debug', 'log.debug', (["('Loss shape; %s' % self.loss.shape)"], {}), "('Loss shape; %s' % self.loss.shape)\n", (4267, 4303), True, 'import logging as log\n'), ((4397, 4427), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Optimizer"""'], {}), "('Optimizer')\n", (4414, 4427), True, 'import tensorflow as tf\n'), ((4516, 4570), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['FLAGS.learning_rate'], {}), '(FLAGS.learning_rate)\n', (4549, 4570), True, 'import tensorflow as tf\n'), ((4654, 4719), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'self.optimizer._learning_rate'], {}), "('learning_rate', self.optimizer._learning_rate)\n", (4671, 4719), True, 'import tensorflow as tf\n'), ((4797, 4837), 'os.path.dirname', 'os.path.dirname', (['FLAGS.checkpoint_prefix'], {}), '(FLAGS.checkpoint_prefix)\n', (4812, 4837), False, 'import os, sys, time\n'), ((4983, 5048), 'logging.debug', 'log.debug', (["('Model restored from %s.' % ckpt.model_checkpoint_path)"], {}), "('Model restored from %s.' % ckpt.model_checkpoint_path)\n", (4992, 5048), True, 'import logging as log\n'), ((5183, 5218), 'logging.debug', 'log.debug', (['"""Initialized new model."""'], {}), "('Initialized new model.')\n", (5192, 5218), True, 'import logging as log\n'), ((5671, 5693), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (5691, 5693), True, 'import tensorflow as tf\n'), ((5737, 5769), 'logging.info', 'log.info', (['"""Starting training..."""'], {}), "('Starting training...')\n", (5745, 5769), True, 'import logging as log\n'), ((3454, 3504), 'tensorflow.matmul', 'tf.matmul', (['self.outputs_squashed', 'self.output_exit'], {}), '(self.outputs_squashed, self.output_exit)\n', (3463, 3504), True, 'import tensorflow as tf\n'), ((5074, 5107), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5105, 5107), True, 'import tensorflow as tf\n'), ((5583, 5629), 'os.path.join', 'os.path.join', (['FLAGS.tf_log_dir', 'FLAGS.run_name'], {}), '(FLAGS.tf_log_dir, FLAGS.run_name)\n', (5595, 5629), False, 'import os, sys, time\n'), ((7705, 7720), 'logging.getLogger', 'log.getLogger', ([], {}), '()\n', (7718, 7720), True, 'import logging as log\n'), ((2209, 2231), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (2229, 2231), True, 'import tensorflow as tf\n'), ((2842, 2876), 'tensorflow.matmul', 'tf.matmul', (['h_prev', 'self.recurrence'], {}), '(h_prev, self.recurrence)\n', (2851, 2876), True, 'import tensorflow as tf\n'), ((5802, 5826), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (5824, 5826), True, 'import tensorflow as tf\n'), ((6757, 6825), 'logging.debug', 'log.debug', (["('Saved model checkpoint to %s.' % FLAGS.checkpoint_prefix)"], {}), "('Saved model checkpoint to %s.' % FLAGS.checkpoint_prefix)\n", (6766, 6825), True, 'import logging as log\n'), ((7781, 7796), 'logging.getLogger', 'log.getLogger', ([], {}), '()\n', (7794, 7796), True, 'import logging as log\n'), ((2781, 2803), 'tensorflow.expand_dims', 'tf.expand_dims', (['x_m', '(0)'], {}), '(x_m, 0)\n', (2795, 2803), True, 'import tensorflow as tf\n'), ((5452, 5484), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (5465, 5484), True, 'import tensorflow as tf\n')] |
# Evaluacion
## Dataset
#Explicar
### Estaciones
#### Objetivo
#'21057060': PAICOL
target = '21057060-WL_CAL_AVG'
#### Predictoras
# PAICOL
preds_cod = ['21017060', '21017040' ,'21087080', '21057050', '21057060']
# Removed PTE balseadero (Data hasta el 2015) 21047010
### Variables
#--NOT-- PR_CAL_ACU -> Precipitacion acumulada horaria
#WL_CAL_AVG -> Nivel promedio horario
#PR_CAL_ITS -> Intensidad de Precipitacion horaria
siglas_vars = ["WL_CAL_AVG", "PR_CAL_ITS"]
#### Lags
from emjav.emjav_data.tools import cargar_valores_observados
from datetime import datetime, timedelta
import pandas as pd
from pandas import Series
import numpy as np
fecha_inicial = datetime(2015, 1, 1, 0, 0)
fecha_final = datetime(2018, 1, 1, 0, 0)
max_lag = 10
series = {}
base_keys = []
for est_code in preds_cod:
for sigla in siglas_vars:
key = "{}-{}".format(est_code, sigla)
base_keys.append(key)
series[key] = cargar_valores_observados(est_code, sigla, fecha_inicial, fecha_final, return_type="Series")
for b_key in base_keys:
for i in range(1,max_lag):
# First shifts
series["{}-Lag{}".format(b_key,i)] = series[b_key].shift(i)
if b_key != target:
del series[b_key]
# Then dropna
predictor_data = pd.DataFrame(series)
cleaned_data = predictor_data.dropna()
# Reorder columns
ordered_columns = list(cleaned_data.columns)
target_col_index = cleaned_data.columns.get_loc(target)
ordered_columns[target_col_index] = ordered_columns[-1] # Replace by last existing column
ordered_columns.pop() # And removed not used!
cleaned_data['Ones'] = Series(np.ones(cleaned_data.shape[0]), index=cleaned_data.index)
ordered_data = cleaned_data[ordered_columns+['Ones',target]].sort_index(ascending=False) # Recent data is most relevant
data_window = 300
ordered_data = ordered_data[:data_window]
ordered_data.to_csv("/tmp/pronos_ordered_cleaned.csv", index=False)
| [
"datetime.datetime",
"emjav.emjav_data.tools.cargar_valores_observados",
"numpy.ones",
"pandas.DataFrame"
] | [((663, 689), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(0)', '(0)'], {}), '(2015, 1, 1, 0, 0)\n', (671, 689), False, 'from datetime import datetime, timedelta\n'), ((704, 730), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(1)', '(0)', '(0)'], {}), '(2018, 1, 1, 0, 0)\n', (712, 730), False, 'from datetime import datetime, timedelta\n'), ((1250, 1270), 'pandas.DataFrame', 'pd.DataFrame', (['series'], {}), '(series)\n', (1262, 1270), True, 'import pandas as pd\n'), ((1596, 1626), 'numpy.ones', 'np.ones', (['cleaned_data.shape[0]'], {}), '(cleaned_data.shape[0])\n', (1603, 1626), True, 'import numpy as np\n'), ((926, 1022), 'emjav.emjav_data.tools.cargar_valores_observados', 'cargar_valores_observados', (['est_code', 'sigla', 'fecha_inicial', 'fecha_final'], {'return_type': '"""Series"""'}), "(est_code, sigla, fecha_inicial, fecha_final,\n return_type='Series')\n", (951, 1022), False, 'from emjav.emjav_data.tools import cargar_valores_observados\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.