code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python
"""
Multirunner example
<NAME>
Lawrence Berkeley National Lab
March 2019
"""
import sys, os, time
import multiprocessing as mp
import argparse
import glob
import astropy.io.fits as fits
from astropy.table import Table, vstack
import numpy as np
import matplotlib.pyplot as plt
from ci_reduce.common import expid_from_filename
from ci_reduce.io import realtime_raw_read
parser = argparse.ArgumentParser(usage = "{prog} [options]")
parser.add_argument("-i", "--indir", type=str, help="input directory")
parser.add_argument("-n", "--numworkers", type=int, default=1, help="number of workers")
parser.add_argument("-w", "--waittime", type=int, default=5, help="wait time between directory checks")
args = parser.parse_args()
#- Create communication queue to pass files to workers
q = mp.Queue()
all_ccds = None
def _seeing_plot():
global all_ccds
expid_u = np.unique(all_ccds['EXPID'])
seeing = []
expids = []
for expid in expid_u:
keep = (all_ccds['EXPID'] == expid) & ((all_ccds['n_sources_for_shape'] >= 5))
if np.sum(keep) == 0:
continue
expids.append(expid)
seeing.append(np.median(all_ccds[keep]['fwhm_asec']))
seeing = np.array(seeing)
expids = np.array(expids)
plt.scatter(expids, seeing)
ymin = 0
ymax = 2
plt.scatter(expids[seeing > ymax], seeing[seeing > ymax]*0.0 + 1.95, marker='^')
plt.xticks(rotation='vertical')
plt.ylim((ymin, ymax))
plt.xlabel('EXPID')
plt.ylabel('FWHM (asec)')
plt.savefig('seeing_plots/seeing-' + str(max(expid_u)).zfill(8) + '.png', dpi=200, bbox_inches='tight')
plt.cla()
def _read_ccds_1exp(fname):
global all_ccds
print('READING ' + fname)
hdul = realtime_raw_read(fname)
ccds = Table(hdul[1].data)
ccds['EXPID'] = expid_from_filename(fname)
if all_ccds is None:
all_ccds = ccds
else:
all_ccds = vstack([all_ccds, ccds])
print('ccds length : ', len(all_ccds))
_seeing_plot()
#- Function to run for each worker.
#- Listens on Queue q for filenames to process.
def run(workerid, q):
print('Worker {} ready to go'.format(workerid))
while True:
filename = q.get(block=True)
print('Worker {} processing {}'.format(workerid, filename))
sys.stdout.flush()
#- Do something with that filename; in this case just sleep
_read_ccds_1exp(filename)
print('Worker {} done with {}'.format(workerid, filename))
sys.stdout.flush()
#- Start workers
for i in range(args.numworkers):
p = mp.Process(target=run, args=(i, q))
p.start()
#- Track what files have already been added to queue.
#- TODO: Upon startup, this could compare against files in output dir
#- and only load input files haven't already been processed.
known_files = set()
#- Periodically check for any new files that may have appeared and add them
#- to the queue for a worker to process.
glob_pattern = os.path.join(args.indir, '*/*_ccds.fits')
while(True):
flist = glob.glob(glob_pattern)
flist.sort()
for filename in flist:
if filename not in known_files:
print('Server putting {} in the queue'.format(filename))
sys.stdout.flush()
q.put(filename)
known_files.add(filename)
time.sleep(args.waittime)
| [
"numpy.sum",
"argparse.ArgumentParser",
"ci_reduce.common.expid_from_filename",
"sys.stdout.flush",
"multiprocessing.Queue",
"glob.glob",
"os.path.join",
"numpy.unique",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylim",
"numpy.median",
"ci_reduce.io.realtime_raw_... | [((404, 453), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '"""{prog} [options]"""'}), "(usage='{prog} [options]')\n", (427, 453), False, 'import argparse\n'), ((809, 819), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (817, 819), True, 'import multiprocessing as mp\n'), ((2979, 3020), 'os.path.join', 'os.path.join', (['args.indir', '"""*/*_ccds.fits"""'], {}), "(args.indir, '*/*_ccds.fits')\n", (2991, 3020), False, 'import sys, os, time\n'), ((892, 920), 'numpy.unique', 'np.unique', (["all_ccds['EXPID']"], {}), "(all_ccds['EXPID'])\n", (901, 920), True, 'import numpy as np\n'), ((1223, 1239), 'numpy.array', 'np.array', (['seeing'], {}), '(seeing)\n', (1231, 1239), True, 'import numpy as np\n'), ((1253, 1269), 'numpy.array', 'np.array', (['expids'], {}), '(expids)\n', (1261, 1269), True, 'import numpy as np\n'), ((1279, 1306), 'matplotlib.pyplot.scatter', 'plt.scatter', (['expids', 'seeing'], {}), '(expids, seeing)\n', (1290, 1306), True, 'import matplotlib.pyplot as plt\n'), ((1337, 1423), 'matplotlib.pyplot.scatter', 'plt.scatter', (['expids[seeing > ymax]', '(seeing[seeing > ymax] * 0.0 + 1.95)'], {'marker': '"""^"""'}), "(expids[seeing > ymax], seeing[seeing > ymax] * 0.0 + 1.95,\n marker='^')\n", (1348, 1423), True, 'import matplotlib.pyplot as plt\n'), ((1422, 1453), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""vertical"""'}), "(rotation='vertical')\n", (1432, 1453), True, 'import matplotlib.pyplot as plt\n'), ((1458, 1480), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(ymin, ymax)'], {}), '((ymin, ymax))\n', (1466, 1480), True, 'import matplotlib.pyplot as plt\n'), ((1485, 1504), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""EXPID"""'], {}), "('EXPID')\n", (1495, 1504), True, 'import matplotlib.pyplot as plt\n'), ((1509, 1534), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""FWHM (asec)"""'], {}), "('FWHM (asec)')\n", (1519, 1534), True, 'import matplotlib.pyplot as plt\n'), ((1647, 1656), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (1654, 1656), True, 'import matplotlib.pyplot as plt\n'), ((1755, 1779), 'ci_reduce.io.realtime_raw_read', 'realtime_raw_read', (['fname'], {}), '(fname)\n', (1772, 1779), False, 'from ci_reduce.io import realtime_raw_read\n'), ((1791, 1810), 'astropy.table.Table', 'Table', (['hdul[1].data'], {}), '(hdul[1].data)\n', (1796, 1810), False, 'from astropy.table import Table, vstack\n'), ((1831, 1857), 'ci_reduce.common.expid_from_filename', 'expid_from_filename', (['fname'], {}), '(fname)\n', (1850, 1857), False, 'from ci_reduce.common import expid_from_filename\n'), ((2590, 2625), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'run', 'args': '(i, q)'}), '(target=run, args=(i, q))\n', (2600, 2625), True, 'import multiprocessing as mp\n'), ((3046, 3069), 'glob.glob', 'glob.glob', (['glob_pattern'], {}), '(glob_pattern)\n', (3055, 3069), False, 'import glob\n'), ((3325, 3350), 'time.sleep', 'time.sleep', (['args.waittime'], {}), '(args.waittime)\n', (3335, 3350), False, 'import sys, os, time\n'), ((1936, 1960), 'astropy.table.vstack', 'vstack', (['[all_ccds, ccds]'], {}), '([all_ccds, ccds])\n', (1942, 1960), False, 'from astropy.table import Table, vstack\n'), ((2316, 2334), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2332, 2334), False, 'import sys, os, time\n'), ((2512, 2530), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2528, 2530), False, 'import sys, os, time\n'), ((1078, 1090), 'numpy.sum', 'np.sum', (['keep'], {}), '(keep)\n', (1084, 1090), True, 'import numpy as np\n'), ((1169, 1207), 'numpy.median', 'np.median', (["all_ccds[keep]['fwhm_asec']"], {}), "(all_ccds[keep]['fwhm_asec'])\n", (1178, 1207), True, 'import numpy as np\n'), ((3235, 3253), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3251, 3253), False, 'import sys, os, time\n')] |
import setuptools
import setuptools.command.build_ext
import ABXpy
class build_ext(setuptools.command.build_ext.build_ext):
def finalize_options(self):
setuptools.command.build_ext.build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
setuptools.setup(
name='ABXpy',
version=ABXpy.version,
author='<NAME>',
description='ABX discrimination task',
long_description=open('README.rst').read(),
url='https://github.com/bootphon/ABXpy',
license='LICENSE.txt',
packages=setuptools.find_packages(exclude='test'),
# needed for cython/setuptools, see
# http://docs.cython.org/en/latest/src/quickstart/build.html
zip_safe=False,
setup_requires=[
'editdistance',
'cython',
'setuptools>=18.0',
'numpy>=1.9.0',
'pytest-runner'
],
install_requires=[
'h5py >= 2.2.1',
'numpy >= 1.8.0',
'pandas >= 0.13.1',
'scipy >= 0.13.0',
'tables',
],
tests_require=[
'h5features',
'pytest>=2.6',
'pytest-cov'
],
ext_modules=[setuptools.Extension(
'ABXpy.distances.metrics.dtw',
sources=['ABXpy/distances/metrics/dtw/dtw.pyx'],
extra_compile_args=['-O3'])],
cmdclass={'build_ext': build_ext},
entry_points={'console_scripts': [
'abx-task = ABXpy.task:main',
'abx-distance = ABXpy.distance:main',
'abx-analyze = ABXpy.analyze:main',
'abx-score = ABXpy.score:main',
]}
)
| [
"setuptools.Extension",
"numpy.get_include",
"setuptools.command.build_ext.build_ext.finalize_options",
"setuptools.find_packages"
] | [((166, 227), 'setuptools.command.build_ext.build_ext.finalize_options', 'setuptools.command.build_ext.build_ext.finalize_options', (['self'], {}), '(self)\n', (221, 227), False, 'import setuptools\n'), ((683, 723), 'setuptools.find_packages', 'setuptools.find_packages', ([], {'exclude': '"""test"""'}), "(exclude='test')\n", (707, 723), False, 'import setuptools\n'), ((399, 418), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (416, 418), False, 'import numpy\n'), ((1265, 1398), 'setuptools.Extension', 'setuptools.Extension', (['"""ABXpy.distances.metrics.dtw"""'], {'sources': "['ABXpy/distances/metrics/dtw/dtw.pyx']", 'extra_compile_args': "['-O3']"}), "('ABXpy.distances.metrics.dtw', sources=[\n 'ABXpy/distances/metrics/dtw/dtw.pyx'], extra_compile_args=['-O3'])\n", (1285, 1398), False, 'import setuptools\n')] |
import numpy as np
import matplotlib.pyplot as plt
import math
class Network(object):
def __init__(self, hidden_size, input_size = 256, output_size = 10, std = 1e-4):
self.params = {}
self.params['W1'] = std*np.random.randn(input_size,hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std*np.random.randn(hidden_size,output_size)
self.params['b2'] = np.zeros(output_size)
return
def forward_pass(self, X, y = None, wd_decay = 0.0):
loss = None
predict = None
self.hidden_out = np.clip(np.dot(X,self.params['W1'])+self.params['b1'],0,np.inf)
self.class_out = np.dot(self.hidden_out,self.params['W2'])+self.params['b2']
self.softmax_out = np.exp(self.class_out)/np.sum(np.exp(self.class_out),axis=-1).reshape(X.shape[0],1)
if y is None:
predict = np.argmax(self.softmax_out,axis=-1)
return predict
else:
loss = np.mean(-np.log(self.softmax_out[range(len(self.softmax_out)),y])) + wd_decay*(np.sum(self.params['W1']**2)+np.sum(self.params['W2']**2))/2
return loss
def back_prop(self, X, y, wd_decay = 0.0):
grads = {}
#grads should contain grads['W1'] grads['b1'] grads['W2'] grads['b2']
delta = np.eye(self.params['b2'].shape[-1])[y]
grads['W1'] = (1/X.shape[0])*np.dot(X.T, ((np.dot(X, self.params['W1']) + self.params['b1']) > 0) * np.dot((self.softmax_out - delta), self.params['W2'].T)) + wd_decay * self.params['W1']
grads['b1'] = np.mean(((np.dot(X, self.params['W1']) + self.params['b1']) > 0) * np.dot((self.softmax_out - delta), self.params['W2'].T), axis=0)
grads['W2'] = (1/X.shape[0])*np.dot(self.hidden_out.T, (self.softmax_out - delta)) + wd_decay * self.params['W2']
grads['b2'] = np.mean(self.softmax_out - delta, axis=0)
return grads
def numerical_gradient(self, X, y, wd_decay = 0.0, delta = 1e-6):
grads = {}
for param_name in self.params:
grads[param_name] = np.zeros(self.params[param_name].shape)
itx = np.nditer(self.params[param_name], flags=['multi_index'], op_flags=['readwrite'])
while not itx.finished:
idx = itx.multi_index
#This part will iterate for every params
#You can use self.parmas[param_name][idx] and grads[param_name][idx] to access or modify params and grads
self.params[param_name][idx]+=delta
f1=self.forward_pass(X,y,wd_decay)
self.params[param_name][idx]-=2*delta
f2=self.forward_pass(X,y,wd_decay)
grads[param_name][idx]=(f1-f2)/2/delta
self.params[param_name][idx]+=delta
itx.iternext()
return grads
def get_acc(self, X, y):
pred = self.forward_pass(X)
return np.mean(pred == y)
def train(self, X, y, X_val, y_val,
learning_rate=0,
momentum=0, do_early_stopping=False, alpha = 0,
wd_decay=0, num_iters=10,
batch_size=4, verbose=False, print_every=10):
num_train = X.shape[0]
iterations_per_epoch = max(num_train / batch_size, 1)
loss_history = []
acc_history = []
val_acc_history = []
val_loss_history = []
velocity={}
for param_name in self.params:
velocity[param_name]=np.zeros(self.params[param_name].shape)
best_acc=0
best_params=self.params
for it in range(num_iters):
#Learning rate decay
if alpha==0:
learning_rate_a=learning_rate
if alpha==1:
if it<=30*iterations_per_epoch:
learning_rate_a=learning_rate
elif it<=60*iterations_per_epoch:
learning_rate_a=0.5*learning_rate
elif it<=90*iterations_per_epoch:
learning_rate_a=0.1*learning_rate
else:
learning_rate_a=0.01*learning_rate
elif alpha==2:
learning_rate_a=learning_rate*(1-it/num_iters)
elif alpha==3:
learning_rate_a=0.5*learning_rate*(1+math.cos(math.pi*it/num_iters))
index=np.random.choice(num_train,batch_size)
X_batch = X[index]
y_batch = y[index]
loss = self.forward_pass(X_batch,y_batch,wd_decay=wd_decay)
grads = self.back_prop(X_batch,y_batch,wd_decay=wd_decay)
for param_name in self.params:
velocity[param_name]=momentum*velocity[param_name]-learning_rate_a*grads[param_name]
self.params[param_name]+=velocity[param_name]
val_loss = self.forward_pass(X_val,y_val,wd_decay=wd_decay)
loss_history.append(loss)
val_loss_history.append(val_loss)
if verbose and it % print_every == 0:
print('iteration %d / %d: training loss %f val loss: %f' % (it, num_iters, loss, val_loss))
if it % iterations_per_epoch == 0:
train_acc = self.get_acc(X_batch, y_batch)
val_acc = self.get_acc(X_val, y_val)
acc_history.append(train_acc)
val_acc_history.append(val_acc)
if do_early_stopping:
if it % iterations_per_epoch == 0:
if best_acc-val_acc>0.1:
self.params=best_params
break
else:
if val_acc>best_acc:
best_acc=val_acc
best_params=self.params
return {
'loss_history': loss_history,
'val_loss_history': val_loss_history,
'acc_history': acc_history,
'val_acc_history': val_acc_history,
} | [
"numpy.sum",
"numpy.eye",
"numpy.random.randn",
"numpy.argmax",
"numpy.nditer",
"numpy.zeros",
"numpy.mean",
"numpy.exp",
"math.cos",
"numpy.random.choice",
"numpy.dot"
] | [((320, 341), 'numpy.zeros', 'np.zeros', (['hidden_size'], {}), '(hidden_size)\n', (328, 341), True, 'import numpy as np\n'), ((443, 464), 'numpy.zeros', 'np.zeros', (['output_size'], {}), '(output_size)\n', (451, 464), True, 'import numpy as np\n'), ((1958, 1999), 'numpy.mean', 'np.mean', (['(self.softmax_out - delta)'], {'axis': '(0)'}), '(self.softmax_out - delta, axis=0)\n', (1965, 1999), True, 'import numpy as np\n'), ((3069, 3087), 'numpy.mean', 'np.mean', (['(pred == y)'], {}), '(pred == y)\n', (3076, 3087), True, 'import numpy as np\n'), ((252, 292), 'numpy.random.randn', 'np.random.randn', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (267, 292), True, 'import numpy as np\n'), ((374, 415), 'numpy.random.randn', 'np.random.randn', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (389, 415), True, 'import numpy as np\n'), ((723, 765), 'numpy.dot', 'np.dot', (['self.hidden_out', "self.params['W2']"], {}), "(self.hidden_out, self.params['W2'])\n", (729, 765), True, 'import numpy as np\n'), ((810, 832), 'numpy.exp', 'np.exp', (['self.class_out'], {}), '(self.class_out)\n', (816, 832), True, 'import numpy as np\n'), ((948, 984), 'numpy.argmax', 'np.argmax', (['self.softmax_out'], {'axis': '(-1)'}), '(self.softmax_out, axis=-1)\n', (957, 984), True, 'import numpy as np\n'), ((1416, 1451), 'numpy.eye', 'np.eye', (["self.params['b2'].shape[-1]"], {}), "(self.params['b2'].shape[-1])\n", (1422, 1451), True, 'import numpy as np\n'), ((2205, 2244), 'numpy.zeros', 'np.zeros', (['self.params[param_name].shape'], {}), '(self.params[param_name].shape)\n', (2213, 2244), True, 'import numpy as np\n'), ((2263, 2349), 'numpy.nditer', 'np.nditer', (['self.params[param_name]'], {'flags': "['multi_index']", 'op_flags': "['readwrite']"}), "(self.params[param_name], flags=['multi_index'], op_flags=[\n 'readwrite'])\n", (2272, 2349), True, 'import numpy as np\n'), ((3641, 3680), 'numpy.zeros', 'np.zeros', (['self.params[param_name].shape'], {}), '(self.params[param_name].shape)\n', (3649, 3680), True, 'import numpy as np\n'), ((4556, 4595), 'numpy.random.choice', 'np.random.choice', (['num_train', 'batch_size'], {}), '(num_train, batch_size)\n', (4572, 4595), True, 'import numpy as np\n'), ((642, 670), 'numpy.dot', 'np.dot', (['X', "self.params['W1']"], {}), "(X, self.params['W1'])\n", (648, 670), True, 'import numpy as np\n'), ((1749, 1802), 'numpy.dot', 'np.dot', (['(self.softmax_out - delta)', "self.params['W2'].T"], {}), "(self.softmax_out - delta, self.params['W2'].T)\n", (1755, 1802), True, 'import numpy as np\n'), ((1851, 1902), 'numpy.dot', 'np.dot', (['self.hidden_out.T', '(self.softmax_out - delta)'], {}), '(self.hidden_out.T, self.softmax_out - delta)\n', (1857, 1902), True, 'import numpy as np\n'), ((840, 862), 'numpy.exp', 'np.exp', (['self.class_out'], {}), '(self.class_out)\n', (846, 862), True, 'import numpy as np\n'), ((1572, 1625), 'numpy.dot', 'np.dot', (['(self.softmax_out - delta)', "self.params['W2'].T"], {}), "(self.softmax_out - delta, self.params['W2'].T)\n", (1578, 1625), True, 'import numpy as np\n'), ((1692, 1720), 'numpy.dot', 'np.dot', (['X', "self.params['W1']"], {}), "(X, self.params['W1'])\n", (1698, 1720), True, 'import numpy as np\n'), ((1148, 1178), 'numpy.sum', 'np.sum', (["(self.params['W1'] ** 2)"], {}), "(self.params['W1'] ** 2)\n", (1154, 1178), True, 'import numpy as np\n'), ((1177, 1207), 'numpy.sum', 'np.sum', (["(self.params['W2'] ** 2)"], {}), "(self.params['W2'] ** 2)\n", (1183, 1207), True, 'import numpy as np\n'), ((1515, 1543), 'numpy.dot', 'np.dot', (['X', "self.params['W1']"], {}), "(X, self.params['W1'])\n", (1521, 1543), True, 'import numpy as np\n'), ((4473, 4507), 'math.cos', 'math.cos', (['(math.pi * it / num_iters)'], {}), '(math.pi * it / num_iters)\n', (4481, 4507), False, 'import math\n')] |
import numpy as np
"""
This is a set of functions that calculate the EXIT chart for regular and irregular LDPC code
in AWGN channel.
"""
def exit_reg_awgn(dv, dc, ebn0_db):
"""
This function calculates the EXIT chart for regular LDPC ensemble
Ref. [1] Channel Codes classical and modern -- <NAME> and <NAME> (9.42) (9.43)
"""
code_rate = dv / dc
ebn0 = 10 ** (ebn0_db / 10)
sigma_ch = np.sqrt(8 * code_rate * ebn0)
i_a = np.linspace(0, 1, num=21)
i_ev = iev_iav(i_a, sigma_ch, dv)
i_ec = iec_iac(i_a, dc)
return i_ev, i_ec, i_a
def exit_irreg_awgn(lmbda, rho, ebn0_db):
"""
This function calculates the EXIT chart for irregular LDPC ensemble
Ref. [1] Channel Codes classical and modern -- <NAME> and <NAME> (9.44) (9.45)
"""
code_rate = 1 - np.divide(rho, list(range(1, len(rho) + 1))).sum() / np.divide(lmbda, list(range(1, len(lmbda) + 1))).sum()
ebn0 = 10 ** (ebn0_db / 10)
sigma_ch = np.sqrt(8 * code_rate * ebn0)
i_a = np.linspace(0, 1, num=21)
i_ev = np.zeros(i_a.size)
for ii in range(len(lmbda)):
i_ev = lmbda[ii] * iev_iav(i_a, sigma_ch, ii + 1) + i_ev
i_ec = np.zeros(i_a.size)
for ii in range(len(rho)):
i_ec = rho[ii] * iec_iac(i_a, ii + 1) + i_ec
return i_ev, i_ec, i_a
def iev_iav(i_a, sigma_ch, dv):
"""
this function calculate the EXIT curve for variable nodes with degree dv
Ref. [1] Channel Codes classical and modern -- <NAME> and <NAME> (9.42)
:param i_a: a priori mutual information that goes into the variable node
:param sigma_ch: 8 * code_rate * EbN0
:param dv: variable node degree
:return: extrinsic mutual information that goes out of the variable node
"""
i_ev = np.zeros(i_a.size)
for ii in range(i_a.size):
tmp = j_sigma_inv(i_a[ii])
j_arg = ((dv - 1) * tmp ** 2 + sigma_ch ** 2) ** 0.5
i_ev[ii] = j_sigma(j_arg)
return i_ev
def iec_iac(i_a, dc):
"""
this function calculate the EXIT curve for check nodes with degree dc
Ref. [1] Channel Codes classical and modern -- <NAME> and <NAME> (9.43)
:param i_a: a priori mutual information that goes into the check node
:param dc: check node degree
:return: extrinsic mutual information that goes out of the check node
"""
i_ec = np.zeros(i_a.size)
for ii in range(i_a.size):
tmp = j_sigma_inv(1 - i_a[ii])
j_arg = ((dc - 1) * tmp ** 2) ** 0.5
i_ec[ii] = 1 - j_sigma(j_arg)
return i_ec
def j_sigma(sigma):
"""
this function is one of the steps in the EXIT calculation
Ref. [1] Design of Low-Density Parity-Check Codes for Modulation and
Detection -- <NAME> et al. Appendix
"""
sigma_star = 1.6363
aj1 = -0.0421061
bj1 = 0.209252
cj1 = -0.00640081
aj2 = 0.00181491
bj2 = -0.142675
cj2 = -0.0822054
dj2 = 0.0549608
if 0 <= sigma <= sigma_star:
out = np.multiply([aj1, bj1, cj1], np.power(sigma, [3, 2, 1])).sum()
elif sigma > sigma_star:
out = 1 - np.exp(np.multiply([aj2, bj2, cj2, dj2], np.power(sigma, [3, 2, 1, 0])).sum())
else:
out = 1
return out
def j_sigma_inv(ei):
"""
this function is one of the steps in the EXIT calculation
Ref.[1] Design of Low - Density Parity - Check Codes for Modulation and
Detection - - <NAME> et al. Appendix
"""
ei_star = 0.3646
as1 = 1.09542
bs1 = 0.214217
cs1 = 2.33727
as2 = 0.706692
bs2 = 0.386013
cs2 = -1.75017
if 0 <= ei <= ei_star:
out = np.multiply([as1, bs1, cs1], np.power(ei, [2, 1, 0.5])).sum()
elif ei_star < ei < 1:
out = - as2 * np.log(bs2 * (1 - ei)) - cs2 * ei
else:
out = 10
print('Numerical error in the inverse J_sigma function\n')
return out
| [
"numpy.log",
"numpy.power",
"numpy.zeros",
"numpy.linspace",
"numpy.sqrt"
] | [((417, 446), 'numpy.sqrt', 'np.sqrt', (['(8 * code_rate * ebn0)'], {}), '(8 * code_rate * ebn0)\n', (424, 446), True, 'import numpy as np\n'), ((457, 482), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': '(21)'}), '(0, 1, num=21)\n', (468, 482), True, 'import numpy as np\n'), ((970, 999), 'numpy.sqrt', 'np.sqrt', (['(8 * code_rate * ebn0)'], {}), '(8 * code_rate * ebn0)\n', (977, 999), True, 'import numpy as np\n'), ((1011, 1036), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': '(21)'}), '(0, 1, num=21)\n', (1022, 1036), True, 'import numpy as np\n'), ((1049, 1067), 'numpy.zeros', 'np.zeros', (['i_a.size'], {}), '(i_a.size)\n', (1057, 1067), True, 'import numpy as np\n'), ((1179, 1197), 'numpy.zeros', 'np.zeros', (['i_a.size'], {}), '(i_a.size)\n', (1187, 1197), True, 'import numpy as np\n'), ((1757, 1775), 'numpy.zeros', 'np.zeros', (['i_a.size'], {}), '(i_a.size)\n', (1765, 1775), True, 'import numpy as np\n'), ((2339, 2357), 'numpy.zeros', 'np.zeros', (['i_a.size'], {}), '(i_a.size)\n', (2347, 2357), True, 'import numpy as np\n'), ((2990, 3016), 'numpy.power', 'np.power', (['sigma', '[3, 2, 1]'], {}), '(sigma, [3, 2, 1])\n', (2998, 3016), True, 'import numpy as np\n'), ((3620, 3645), 'numpy.power', 'np.power', (['ei', '[2, 1, 0.5]'], {}), '(ei, [2, 1, 0.5])\n', (3628, 3645), True, 'import numpy as np\n'), ((3704, 3726), 'numpy.log', 'np.log', (['(bs2 * (1 - ei))'], {}), '(bs2 * (1 - ei))\n', (3710, 3726), True, 'import numpy as np\n'), ((3114, 3143), 'numpy.power', 'np.power', (['sigma', '[3, 2, 1, 0]'], {}), '(sigma, [3, 2, 1, 0])\n', (3122, 3143), True, 'import numpy as np\n')] |
# import the necessary packages
import random
import shutil
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.optimizers import Adagrad
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import os
# import the necessary packages
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import SeparableConv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Dense
from tensorflow.keras import backend as K
import matplotlib
# initialize the path to the *original* input directory of images
ORIG_INPUT_DATASET = "/content/drive/MyDrive/classification/datasets/orig"
# initialize the base path to the *new* directory that will contain
# our images after computing the training and testing split
BASE_PATH = "/content/drive/MyDrive/classification/datasets/idc"
# derive the training, validation, and testing directories
TRAIN_PATH = os.path.sep.join([BASE_PATH, "training"])
VAL_PATH = os.path.sep.join([BASE_PATH, "validation"])
TEST_PATH = os.path.sep.join([BASE_PATH, "testing"])
# define the amount of data that will be used training
TRAIN_SPLIT = 0.8
# the amount of validation data will be a percentage of the
# *training* data
VAL_SPLIT = 0.1
# import the necessary packages
# grab the paths to all input images in the original input directory
# and shuffle them
imagePaths = list(paths.list_images(ORIG_INPUT_DATASET))
random.seed(42)
random.shuffle(imagePaths)
# compute the training and testing split
i = int(len(imagePaths) * TRAIN_SPLIT)
trainPaths = imagePaths[:i]
testPaths = imagePaths[i:]
# we'll be using part of the training data for validation
i = int(len(trainPaths) * VAL_SPLIT)
valPaths = trainPaths[:i]
trainPaths = trainPaths[i:]
# define the datasets that we'll be building
datasets = [
("training", trainPaths, TRAIN_PATH),
("validation", valPaths, VAL_PATH),
("testing", testPaths, TEST_PATH)
]
# loop over the datasets
for (dType, imagePaths, baseOutput) in datasets:
# show which data split we are creating
print("[INFO] building '{}' split".format(dType))
# if the output base output directory does not exist, create it
if not os.path.exists(baseOutput):
print("[INFO] 'creating {}' directory".format(baseOutput))
os.makedirs(baseOutput)
# loop over the input image paths
for inputPath in imagePaths:
# extract the filename of the input image and extract the
# class label ("0" for "negative" and "1" for "positive")
filename = inputPath.split(os.path.sep)[-1]
label = filename[-5:-4]
# build the path to the label directory
labelPath = os.path.sep.join([baseOutput, label])
# if the label output directory does not exist, create it
if not os.path.exists(labelPath):
print("[INFO] 'creating {}' directory".format(labelPath))
os.makedirs(labelPath)
# construct the path to the destination image and then copy
# the image itself
p = os.path.sep.join([labelPath, filename])
shutil.copy2(inputPath, p)
matplotlib.use("Agg")
# initialize the path to the *original* input directory of images
ORIG_INPUT_DATASET = "/content/drive/MyDrive/classification/datasets/orig"
# initialize the base path to the *new* directory that will contain
# our images after computing the training and testing split
BASE_PATH = "/content/drive/MyDrive/classification/datasets/idc"
# derive the training, validation, and testing directories
TRAIN_PATH = os.path.sep.join([BASE_PATH, "training"])
VAL_PATH = os.path.sep.join([BASE_PATH, "validation"])
TEST_PATH = os.path.sep.join([BASE_PATH, "testing"])
# define the amount of data that will be used training
TRAIN_SPLIT = 0.8
# the amount of validation data will be a percentage of the
# *training* data
VAL_SPLIT = 0.1
# set the matplotlib backend so figures can be saved in the background
class DetectNet:
@staticmethod
def build(width, height, depth, classes):
# initialize the model along with the input shape to be
# "channels last" and the channels dimension itself
inputShape = (height, width, depth)
chanDim = -1
# if we are using "channels first", update the input shape
# and channels dimension
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
chanDim = 1
# CONV => RELU => POOL
model = Sequential()
model.add(SeparableConv2D(32, (3, 3), padding="same", input_shape=inputShape))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25)) # (CONV => RELU => POOL) * 2
model.add(SeparableConv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(SeparableConv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25)) # (CONV => RELU => POOL) * 3
model.add(SeparableConv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(SeparableConv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(SeparableConv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25)) # first (and only) set of FC => RELU layers
model.add(Flatten())
model.add(Dense(256))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5)) # softmax classifier
model.add(Dense(classes))
model.add(Activation("sigmoid"))
# return the constructed network architecture
return model
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--plot", type=str, default="plot.png",
help="path to output loss/accuracy plot")
args = vars(ap.parse_args())
# initialize our number of epochs, initial learning rate, and batch
# size
NUM_EPOCHS = 40
INIT_LR = 1e-2
BS = 32
# determine the total number of image paths in training, validation,
# and testing directories
trainPaths = list(paths.list_images(TRAIN_PATH))
totalTrain = len(trainPaths)
totalVal = len(list(paths.list_images(VAL_PATH)))
totalTest = len(list(paths.list_images(TEST_PATH)))
# calculate the total number of training images in each class and
# initialize a dictionary to store the class weights
trainLabels = [int(p.split(os.path.sep)[-2]) for p in trainPaths]
trainLabels = to_categorical(trainLabels)
classTotals = trainLabels.sum(axis=0)
classWeight = dict()
# loop over all classes and calculate the class weight
for i in range(0, len(classTotals)):
classWeight[i] = classTotals.max() / classTotals[i]
# initialize the training data augmentation object
trainAug = ImageDataGenerator(
rescale=1 / 255.0,
rotation_range=20,
zoom_range=0.05,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.05,
horizontal_flip=True,
vertical_flip=True,
fill_mode="nearest")
# initialize the validation (and testing) data augmentation object
valAug = ImageDataGenerator(rescale=1 / 255.0)
# initialize the training generator
trainGen = trainAug.flow_from_directory(
TRAIN_PATH,
class_mode="categorical",
target_size=(48, 48),
color_mode="rgb",
shuffle=True,
batch_size=BS)
# initialize the validation generator
valGen = valAug.flow_from_directory(
VAL_PATH,
class_mode="categorical",
target_size=(48, 48),
color_mode="rgb",
shuffle=False,
batch_size=BS)
# initialize the testing generator
testGen = valAug.flow_from_directory(
TEST_PATH,
class_mode="categorical",
target_size=(48, 48),
color_mode="rgb",
shuffle=False,
batch_size=BS)
# initialize our DetectNet model and compile it
model = DetectNet.build(width=48, height=48, depth=3,
classes=2)
opt = Adagrad(lr=INIT_LR, decay=INIT_LR / NUM_EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt,
metrics=["accuracy"])
# fit the model
H = model.fit(
x=trainGen,
steps_per_epoch=totalTrain // BS,
validation_data=valGen,
validation_steps=totalVal // BS,
class_weight=classWeight,
epochs=NUM_EPOCHS)
model.save("detection-class.model")
# reset the testing generator and then use our trained model to
# make predictions on the data
print("[INFO] evaluating network...")
testGen.reset()
predIdxs = model.predict(x=testGen, steps=(totalTest // BS) + 1)
# for each image in the testing set we need to find the index of the
# label with corresponding largest predicted probability
predIdxs = np.argmax(predIdxs, axis=1)
# show a nicely formatted classification report
print(classification_report(testGen.classes, predIdxs,
target_names=testGen.class_indices.keys()))
# compute the confusion matrix and and use it to derive the raw
# accuracy, sensitivity, and specificity
cm = confusion_matrix(testGen.classes, predIdxs)
total = sum(sum(cm))
acc = (cm[0, 0] + cm[1, 1]) / total
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
# show the confusion matrix, accuracy, sensitivity, and specificity
print(cm)
print("acc: {:.4f}".format(acc))
print("sensitivity: {:.4f}".format(sensitivity))
print("specificity: {:.4f}".format(specificity))
# plot the training loss and accuracy
N = NUM_EPOCHS
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy on Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig(args["plot"])
| [
"matplotlib.pyplot.title",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.layers.MaxPooling2D",
"argparse.ArgumentParser",
"numpy.argmax",
"tensorflow.keras.layers.Dense",
"random.shuffle",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"numpy.arange",
"... | [((1365, 1406), 'os.path.sep.join', 'os.path.sep.join', (["[BASE_PATH, 'training']"], {}), "([BASE_PATH, 'training'])\n", (1381, 1406), False, 'import os\n'), ((1418, 1461), 'os.path.sep.join', 'os.path.sep.join', (["[BASE_PATH, 'validation']"], {}), "([BASE_PATH, 'validation'])\n", (1434, 1461), False, 'import os\n'), ((1474, 1514), 'os.path.sep.join', 'os.path.sep.join', (["[BASE_PATH, 'testing']"], {}), "([BASE_PATH, 'testing'])\n", (1490, 1514), False, 'import os\n'), ((1861, 1876), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (1872, 1876), False, 'import random\n'), ((1877, 1903), 'random.shuffle', 'random.shuffle', (['imagePaths'], {}), '(imagePaths)\n', (1891, 1903), False, 'import random\n'), ((3534, 3555), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (3548, 3555), False, 'import matplotlib\n'), ((3962, 4003), 'os.path.sep.join', 'os.path.sep.join', (["[BASE_PATH, 'training']"], {}), "([BASE_PATH, 'training'])\n", (3978, 4003), False, 'import os\n'), ((4015, 4058), 'os.path.sep.join', 'os.path.sep.join', (["[BASE_PATH, 'validation']"], {}), "([BASE_PATH, 'validation'])\n", (4031, 4058), False, 'import os\n'), ((4071, 4111), 'os.path.sep.join', 'os.path.sep.join', (["[BASE_PATH, 'testing']"], {}), "([BASE_PATH, 'testing'])\n", (4087, 4111), False, 'import os\n'), ((6611, 6636), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6634, 6636), False, 'import argparse\n'), ((7374, 7401), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['trainLabels'], {}), '(trainLabels)\n', (7388, 7401), False, 'from tensorflow.keras.utils import to_categorical\n'), ((7671, 7880), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1 / 255.0)', 'rotation_range': '(20)', 'zoom_range': '(0.05)', 'width_shift_range': '(0.1)', 'height_shift_range': '(0.1)', 'shear_range': '(0.05)', 'horizontal_flip': '(True)', 'vertical_flip': '(True)', 'fill_mode': '"""nearest"""'}), "(rescale=1 / 255.0, rotation_range=20, zoom_range=0.05,\n width_shift_range=0.1, height_shift_range=0.1, shear_range=0.05,\n horizontal_flip=True, vertical_flip=True, fill_mode='nearest')\n", (7689, 7880), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((8004, 8041), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1 / 255.0)'}), '(rescale=1 / 255.0)\n', (8022, 8041), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((8802, 8849), 'tensorflow.keras.optimizers.Adagrad', 'Adagrad', ([], {'lr': 'INIT_LR', 'decay': '(INIT_LR / NUM_EPOCHS)'}), '(lr=INIT_LR, decay=INIT_LR / NUM_EPOCHS)\n', (8809, 8849), False, 'from tensorflow.keras.optimizers import Adagrad\n'), ((9560, 9587), 'numpy.argmax', 'np.argmax', (['predIdxs'], {'axis': '(1)'}), '(predIdxs, axis=1)\n', (9569, 9587), True, 'import numpy as np\n'), ((9873, 9916), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['testGen.classes', 'predIdxs'], {}), '(testGen.classes, predIdxs)\n', (9889, 9916), False, 'from sklearn.metrics import confusion_matrix\n'), ((10330, 10353), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (10343, 10353), True, 'import matplotlib.pyplot as plt\n'), ((10354, 10366), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10364, 10366), True, 'import matplotlib.pyplot as plt\n'), ((10637, 10687), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Loss and Accuracy on Dataset"""'], {}), "('Training Loss and Accuracy on Dataset')\n", (10646, 10687), True, 'import matplotlib.pyplot as plt\n'), ((10688, 10709), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch #"""'], {}), "('Epoch #')\n", (10698, 10709), True, 'import matplotlib.pyplot as plt\n'), ((10710, 10737), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss/Accuracy"""'], {}), "('Loss/Accuracy')\n", (10720, 10737), True, 'import matplotlib.pyplot as plt\n'), ((10738, 10766), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""'}), "(loc='lower left')\n", (10748, 10766), True, 'import matplotlib.pyplot as plt\n'), ((10767, 10792), 'matplotlib.pyplot.savefig', 'plt.savefig', (["args['plot']"], {}), "(args['plot'])\n", (10778, 10792), True, 'import matplotlib.pyplot as plt\n'), ((1822, 1859), 'imutils.paths.list_images', 'paths.list_images', (['ORIG_INPUT_DATASET'], {}), '(ORIG_INPUT_DATASET)\n', (1839, 1859), False, 'from imutils import paths\n'), ((7013, 7042), 'imutils.paths.list_images', 'paths.list_images', (['TRAIN_PATH'], {}), '(TRAIN_PATH)\n', (7030, 7042), False, 'from imutils import paths\n'), ((10376, 10391), 'numpy.arange', 'np.arange', (['(0)', 'N'], {}), '(0, N)\n', (10385, 10391), True, 'import numpy as np\n'), ((10441, 10456), 'numpy.arange', 'np.arange', (['(0)', 'N'], {}), '(0, N)\n', (10450, 10456), True, 'import numpy as np\n'), ((10508, 10523), 'numpy.arange', 'np.arange', (['(0)', 'N'], {}), '(0, N)\n', (10517, 10523), True, 'import numpy as np\n'), ((10576, 10591), 'numpy.arange', 'np.arange', (['(0)', 'N'], {}), '(0, N)\n', (10585, 10591), True, 'import numpy as np\n'), ((2619, 2645), 'os.path.exists', 'os.path.exists', (['baseOutput'], {}), '(baseOutput)\n', (2633, 2645), False, 'import os\n'), ((2722, 2745), 'os.makedirs', 'os.makedirs', (['baseOutput'], {}), '(baseOutput)\n', (2733, 2745), False, 'import os\n'), ((3101, 3138), 'os.path.sep.join', 'os.path.sep.join', (['[baseOutput, label]'], {}), '([baseOutput, label])\n', (3117, 3138), False, 'import os\n'), ((3459, 3498), 'os.path.sep.join', 'os.path.sep.join', (['[labelPath, filename]'], {}), '([labelPath, filename])\n', (3475, 3498), False, 'import os\n'), ((3507, 3533), 'shutil.copy2', 'shutil.copy2', (['inputPath', 'p'], {}), '(inputPath, p)\n', (3519, 3533), False, 'import shutil\n'), ((4896, 4908), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4906, 4908), False, 'from tensorflow.keras.models import Sequential\n'), ((7093, 7120), 'imutils.paths.list_images', 'paths.list_images', (['VAL_PATH'], {}), '(VAL_PATH)\n', (7110, 7120), False, 'from imutils import paths\n'), ((7144, 7172), 'imutils.paths.list_images', 'paths.list_images', (['TEST_PATH'], {}), '(TEST_PATH)\n', (7161, 7172), False, 'from imutils import paths\n'), ((3220, 3245), 'os.path.exists', 'os.path.exists', (['labelPath'], {}), '(labelPath)\n', (3234, 3245), False, 'import os\n'), ((3329, 3351), 'os.makedirs', 'os.makedirs', (['labelPath'], {}), '(labelPath)\n', (3340, 3351), False, 'import os\n'), ((4734, 4755), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (4753, 4755), True, 'from tensorflow.keras import backend as K\n'), ((4927, 4994), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""', 'input_shape': 'inputShape'}), "(32, (3, 3), padding='same', input_shape=inputShape)\n", (4942, 4994), False, 'from tensorflow.keras.layers import SeparableConv2D\n'), ((5014, 5032), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5024, 5032), False, 'from tensorflow.keras.layers import Activation\n'), ((5052, 5084), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'chanDim'}), '(axis=chanDim)\n', (5070, 5084), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((5104, 5134), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5116, 5134), False, 'from tensorflow.keras.layers import MaxPooling2D\n'), ((5154, 5167), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (5161, 5167), False, 'from tensorflow.keras.layers import Dropout\n'), ((5216, 5259), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (5231, 5259), False, 'from tensorflow.keras.layers import SeparableConv2D\n'), ((5279, 5297), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5289, 5297), False, 'from tensorflow.keras.layers import Activation\n'), ((5317, 5349), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'chanDim'}), '(axis=chanDim)\n', (5335, 5349), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((5369, 5412), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (5384, 5412), False, 'from tensorflow.keras.layers import SeparableConv2D\n'), ((5432, 5450), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5442, 5450), False, 'from tensorflow.keras.layers import Activation\n'), ((5470, 5502), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'chanDim'}), '(axis=chanDim)\n', (5488, 5502), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((5522, 5552), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5534, 5552), False, 'from tensorflow.keras.layers import MaxPooling2D\n'), ((5572, 5585), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (5579, 5585), False, 'from tensorflow.keras.layers import Dropout\n'), ((5634, 5678), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (5649, 5678), False, 'from tensorflow.keras.layers import SeparableConv2D\n'), ((5698, 5716), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5708, 5716), False, 'from tensorflow.keras.layers import Activation\n'), ((5736, 5768), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'chanDim'}), '(axis=chanDim)\n', (5754, 5768), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((5788, 5832), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (5803, 5832), False, 'from tensorflow.keras.layers import SeparableConv2D\n'), ((5852, 5870), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5862, 5870), False, 'from tensorflow.keras.layers import Activation\n'), ((5890, 5922), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'chanDim'}), '(axis=chanDim)\n', (5908, 5922), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((5942, 5986), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (5957, 5986), False, 'from tensorflow.keras.layers import SeparableConv2D\n'), ((6006, 6024), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (6016, 6024), False, 'from tensorflow.keras.layers import Activation\n'), ((6044, 6076), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'chanDim'}), '(axis=chanDim)\n', (6062, 6076), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((6096, 6126), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (6108, 6126), False, 'from tensorflow.keras.layers import MaxPooling2D\n'), ((6146, 6159), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (6153, 6159), False, 'from tensorflow.keras.layers import Dropout\n'), ((6224, 6233), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6231, 6233), False, 'from tensorflow.keras.layers import Flatten\n'), ((6253, 6263), 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {}), '(256)\n', (6258, 6263), False, 'from tensorflow.keras.layers import Dense\n'), ((6283, 6301), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (6293, 6301), False, 'from tensorflow.keras.layers import Activation\n'), ((6321, 6341), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (6339, 6341), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((6361, 6373), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (6368, 6373), False, 'from tensorflow.keras.layers import Dropout\n'), ((6416, 6430), 'tensorflow.keras.layers.Dense', 'Dense', (['classes'], {}), '(classes)\n', (6421, 6430), False, 'from tensorflow.keras.layers import Dense\n'), ((6450, 6471), 'tensorflow.keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (6460, 6471), False, 'from tensorflow.keras.layers import Activation\n')] |
import numpy as np
import cv2
deneme = np.zeros((200,200,3),dtype=np.uint8)
cv2.imshow('deneme siyah',deneme)
deneme[:]=(255,255,255)
cv2.imshow('deneme beyaz',deneme)
deneme[:]=(0,0,255)
cv2.imshow('deneme kırmızı',deneme)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.imshow",
"numpy.zeros"
] | [((39, 78), 'numpy.zeros', 'np.zeros', (['(200, 200, 3)'], {'dtype': 'np.uint8'}), '((200, 200, 3), dtype=np.uint8)\n', (47, 78), True, 'import numpy as np\n'), ((76, 110), 'cv2.imshow', 'cv2.imshow', (['"""deneme siyah"""', 'deneme'], {}), "('deneme siyah', deneme)\n", (86, 110), False, 'import cv2\n'), ((134, 168), 'cv2.imshow', 'cv2.imshow', (['"""deneme beyaz"""', 'deneme'], {}), "('deneme beyaz', deneme)\n", (144, 168), False, 'import cv2\n'), ((188, 224), 'cv2.imshow', 'cv2.imshow', (['"""deneme kırmızı"""', 'deneme'], {}), "('deneme kırmızı', deneme)\n", (198, 224), False, 'import cv2\n'), ((224, 238), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (235, 238), False, 'import cv2\n'), ((239, 262), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (260, 262), False, 'import cv2\n')] |
## interaction3 / abstract / manipulations.py
# names to export
__all__ = ['move_membrane', 'translate_membrane', 'rotate_membrane', 'move_element', 'translate_element',
'rotate_element', 'element_position_from_membranes', 'channel_position_from_elements', 'focus_channel',
'defocus_channel', 'bias_channel', 'activate_channel', 'deactivate_channel', 'move_array',
'translate_array', 'rotate_array', 'array_position_from_vertices', 'get_channel_positions_from_array',
'get_element_positions_from_array', 'get_membrane_positions_from_array', 'focus_array', 'reset_focus_array',
'get_channel_count']
import numpy as np
import math
## DECORATORS ##
def vectorize(f):
def decorator(m, *args, **kwargs):
if isinstance(m, (list, tuple)):
res = list()
for i in m:
res.append(f(i, *args, **kwargs))
return res
else:
return f(m, *args, **kwargs)
return decorator
## HELPER FUNCTIONS ##
def rotation_matrix(vec, angle):
if isinstance(vec, str):
string = vec.lower()
if string == 'x':
vec = [1, 0, 0]
elif string == '-x':
vec = [-1, 0, 0]
elif string == 'y':
vec = [0, 1, 0]
elif string == '-y':
vec = [0, -1, 0]
elif string == 'z':
vec = [0, 0, 1]
elif string == '-z':
vec = [0, 0, -1]
x, y, z = vec
a = angle
r = np.zeros((3, 3))
r[0, 0] = np.cos(a) + x**2 * (1 - np.cos(a))
r[0, 1] = x * y * (1 - np.cos(a)) - z * np.sin(a)
r[0, 2] = x * z * (1 - np.cos(a)) + y * np.sin(a)
r[1, 0] = y * x * (1 - np.cos(a)) + z * np.sin(a)
r[1, 1] = np.cos(a) + y**2 * (1 - np.cos(a))
r[1, 2] = y * z * (1 - np.cos(a)) - x * np.sin(a)
r[2, 0] = z * x * (1 - np.cos(a)) - y * np.sin(a)
r[2, 1] = z * y * (1 - np.cos(a)) + x * np.sin(a)
r[2, 2] = np.cos(a) + z**2 * (1 - np.cos(a))
return r
def distance(x, y):
return math.sqrt(math.fsum([(i - j) ** 2 for i, j in zip(x, y)]))
## MEMBRANE MANIPLUATIONS ##
@vectorize
def move_membrane(m, pos):
m['position'] = pos
@vectorize
def translate_membrane(m, vec):
m['position'] = [i + j for i, j in zip(m['position'], vec)]
@vectorize
def rotate_membrane(m, origin, vec, angle):
org = np.array(origin)
pos = np.array(m['position'])
# determine new membrane position
newpos = rotation_matrix(vec, angle).dot(pos - org) + org
m['position'] = newpos.tolist()
# update membrane rotation list
if 'rotations' in m:
m['rotations'].append([vec, angle])
else:
m['rotations'] = [[vec, angle]]
## ELEMENT MANIPULATIONS ##
@vectorize
def move_element(e, pos):
vec = [i - j for i, j in zip(pos, e['position'])]
translate_element(e, vec)
@vectorize
def translate_element(e, vec):
e['position'] = [i + j for i, j in zip(e['position'], vec)]
for m in e['membranes']:
translate_membrane(m, vec)
@vectorize
def rotate_element(e, origin, vec, angle):
org = np.array(origin)
pos = np.array(e['position'])
# determine new element position
newpos = rotation_matrix(vec, angle).dot(pos - org) + org
e['position'] = newpos.tolist()
# rotate membranes
for m in e['membranes']:
rotate_membrane(m, origin, vec, angle)
@vectorize
def element_position_from_membranes(e):
membranes = e['membranes']
x = [m['position'][0] for m in membranes]
y = [m['position'][1] for m in membranes]
z = [m['position'][2] for m in membranes]
e['position'] = [np.mean(x), np.mean(y), np.mean(z)]
## CHANNEL MANIPULATIONS ##
@vectorize
def move_channel(ch, pos):
vec = [i - j for i, j in zip(pos, ch['position'])]
translate_channel(ch, vec)
@vectorize
def translate_channel(ch, vec):
ch['position'] = [i + j for i, j in zip(ch['position'], vec)]
for e in ch['elements']:
translate_element(e, vec)
@vectorize
def rotate_channel(ch, origin, vec, angle):
org = np.array(origin)
pos = np.array(ch['position'])
# determine new element position
newpos = rotation_matrix(vec, angle).dot(pos - org) + org
ch['position'] = newpos.tolist()
# rotate membranes
for e in ch['elements']:
rotate_element(e, origin, vec, angle)
@vectorize
def channel_position_from_elements(ch):
elements = ch['elements']
x = [e['position'][0] for e in elements]
y = [e['position'][1] for e in elements]
z = [e['position'][2] for e in elements]
ch['position'] = [np.mean(x), np.mean(y), np.mean(z)]
@vectorize
def focus_channel(ch, pos, sound_speed, quantization=None):
d = distance(ch['position'], pos)
if quantization is None or quantization == 0:
t = d / sound_speed
else:
t = round(d / sound_speed / quantization) * quantization
ch['delay'] = -t
@vectorize
def defocus_channel(ch, pos):
raise NotImplementedError
@vectorize
def bias_channel(ch, bias):
ch['dc_bias'] = bias
@vectorize
def activate_channel(ch):
ch['active'] = True
@vectorize
def deactivate_channel(ch):
ch['active'] = False
## ARRAY MANIPLUATIONS ##
@vectorize
def move_array(a, pos):
vec = [i - j for i, j in zip(pos, a['position'])]
translate_array(a, vec)
@vectorize
def translate_array(a, vec):
a['position'] = [i + j for i, j in zip(a['position'], vec)]
if 'vertices' in a:
new_vertices = list()
for v in a['vertices']:
new_vertices.append([i + j for i, j in zip(v, vec)])
a['vertices'] = new_vertices
for ch in a['channels']:
translate_channel(ch, vec)
@vectorize
def rotate_array(a, vec, angle, origin=None):
if origin is None:
origin = a['rotation_origin']
org = np.array(origin)
pos = np.array(a['position'])
# determine new array position
newpos = rotation_matrix(vec, angle).dot(pos - org) + org
a['position'] = newpos.tolist()
if 'vertices' in a:
new_vertices = list()
for v in a['vertices']:
newpos = rotation_matrix(vec, angle).dot(np.array(v) - org) + org
new_vertices.append(newpos.tolist())
a['vertices'] = new_vertices
# rotate channels
for ch in a['channels']:
rotate_channel(ch, origin, vec, angle)
@vectorize
def array_position_from_vertices(a):
a['position'] = np.mean(np.array(a['vertices']), axis=0).tolist()
@vectorize
def get_channel_positions_from_array(a):
return np.array([ch['position'] for ch in a['channels']])
@vectorize
def get_element_positions_from_array(a):
return np.array([e['position'] for ch in a['channels'] for e in ch['elements']])
@vectorize
def get_membrane_positions_from_array(a):
return np.array([m['position'] for ch in a['channels'] for e in ch['elements'] for m in e['membranes']])
@vectorize
def focus_array(a, pos, sound_speed, quantization=None, kind=None):
if kind.lower() in ['tx', 'transmit']:
channels = [ch for ch in a['channels'] if ch['kind'].lower() in ['tx', 'transmit', 'both', 'txrx']]
elif kind.lower() in ['rx', 'receive']:
channels = [ch for ch in a['channels'] if ch['kind'].lower() in ['rx', 'receive', 'both', 'txrx']]
elif kind.lower() in ['txrx', 'both'] or kind is None:
channels = a['channels']
for ch in channels:
focus_channel(ch, pos, sound_speed, quantization)
@vectorize
def reset_focus_array(a):
for ch in a['channels']:
ch['delay'] = 0
@vectorize
def get_channel_count(a, kind=None):
if kind is None:
return len(a['channels'])
elif kind.lower() in ['tx', 'transmit']:
return len([ch for ch in a['channels'] if ch['kind'].lower() in ['tx', 'transmit', 'both', 'txrx']])
elif kind.lower() in ['rx', 'receive']:
return len([ch for ch in a['channels'] if ch['kind'].lower() in ['rx', 'receive', 'both', 'txrx']])
elif kind.lower() in ['txrx', 'both']:
return len([ch for ch in a['channels'] if ch['kind'].lower() in ['both', 'txrx']])
if __name__ == '__main__':
pass
| [
"numpy.zeros",
"numpy.mean",
"numpy.array",
"numpy.sin",
"numpy.cos"
] | [((1505, 1521), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1513, 1521), True, 'import numpy as np\n'), ((2370, 2386), 'numpy.array', 'np.array', (['origin'], {}), '(origin)\n', (2378, 2386), True, 'import numpy as np\n'), ((2397, 2420), 'numpy.array', 'np.array', (["m['position']"], {}), "(m['position'])\n", (2405, 2420), True, 'import numpy as np\n'), ((3108, 3124), 'numpy.array', 'np.array', (['origin'], {}), '(origin)\n', (3116, 3124), True, 'import numpy as np\n'), ((3135, 3158), 'numpy.array', 'np.array', (["e['position']"], {}), "(e['position'])\n", (3143, 3158), True, 'import numpy as np\n'), ((4077, 4093), 'numpy.array', 'np.array', (['origin'], {}), '(origin)\n', (4085, 4093), True, 'import numpy as np\n'), ((4104, 4128), 'numpy.array', 'np.array', (["ch['position']"], {}), "(ch['position'])\n", (4112, 4128), True, 'import numpy as np\n'), ((5839, 5855), 'numpy.array', 'np.array', (['origin'], {}), '(origin)\n', (5847, 5855), True, 'import numpy as np\n'), ((5866, 5889), 'numpy.array', 'np.array', (["a['position']"], {}), "(a['position'])\n", (5874, 5889), True, 'import numpy as np\n'), ((6559, 6609), 'numpy.array', 'np.array', (["[ch['position'] for ch in a['channels']]"], {}), "([ch['position'] for ch in a['channels']])\n", (6567, 6609), True, 'import numpy as np\n'), ((6675, 6748), 'numpy.array', 'np.array', (["[e['position'] for ch in a['channels'] for e in ch['elements']]"], {}), "([e['position'] for ch in a['channels'] for e in ch['elements']])\n", (6683, 6748), True, 'import numpy as np\n'), ((6815, 6916), 'numpy.array', 'np.array', (["[m['position'] for ch in a['channels'] for e in ch['elements'] for m in e[\n 'membranes']]"], {}), "([m['position'] for ch in a['channels'] for e in ch['elements'] for\n m in e['membranes']])\n", (6823, 6916), True, 'import numpy as np\n'), ((1536, 1545), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (1542, 1545), True, 'import numpy as np\n'), ((1747, 1756), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (1753, 1756), True, 'import numpy as np\n'), ((1958, 1967), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (1964, 1967), True, 'import numpy as np\n'), ((3641, 3651), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (3648, 3651), True, 'import numpy as np\n'), ((3653, 3663), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (3660, 3663), True, 'import numpy as np\n'), ((3665, 3675), 'numpy.mean', 'np.mean', (['z'], {}), '(z)\n', (3672, 3675), True, 'import numpy as np\n'), ((4608, 4618), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (4615, 4618), True, 'import numpy as np\n'), ((4620, 4630), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (4627, 4630), True, 'import numpy as np\n'), ((4632, 4642), 'numpy.mean', 'np.mean', (['z'], {}), '(z)\n', (4639, 4642), True, 'import numpy as np\n'), ((1615, 1624), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (1621, 1624), True, 'import numpy as np\n'), ((1669, 1678), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (1675, 1678), True, 'import numpy as np\n'), ((1723, 1732), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (1729, 1732), True, 'import numpy as np\n'), ((1826, 1835), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (1832, 1835), True, 'import numpy as np\n'), ((1880, 1889), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (1886, 1889), True, 'import numpy as np\n'), ((1934, 1943), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (1940, 1943), True, 'import numpy as np\n'), ((1560, 1569), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (1566, 1569), True, 'import numpy as np\n'), ((1598, 1607), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (1604, 1607), True, 'import numpy as np\n'), ((1652, 1661), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (1658, 1661), True, 'import numpy as np\n'), ((1706, 1715), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (1712, 1715), True, 'import numpy as np\n'), ((1771, 1780), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (1777, 1780), True, 'import numpy as np\n'), ((1809, 1818), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (1815, 1818), True, 'import numpy as np\n'), ((1863, 1872), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (1869, 1872), True, 'import numpy as np\n'), ((1917, 1926), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (1923, 1926), True, 'import numpy as np\n'), ((1982, 1991), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (1988, 1991), True, 'import numpy as np\n'), ((6452, 6475), 'numpy.array', 'np.array', (["a['vertices']"], {}), "(a['vertices'])\n", (6460, 6475), True, 'import numpy as np\n'), ((6164, 6175), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (6172, 6175), True, 'import numpy as np\n')] |
"""
Wrapper class providing a minimal consistent interface to `AmberTools <http://ambermd.org/AmberTools.php>`_.
"""
__all__ = ("AmberToolsToolkitWrapper",)
# =============================================================================================
# IMPORTS
# =============================================================================================
import subprocess
import tempfile
from collections import defaultdict
from distutils.spawn import find_executable
import numpy as np
try:
from openmm import unit
except ImportError:
from simtk import unit
from openff.toolkit.utils import base_wrapper, rdkit_wrapper
from openff.toolkit.utils.exceptions import (
AntechamberNotFoundError,
ChargeCalculationError,
ChargeMethodUnavailableError,
ToolkitUnavailableException,
)
from openff.toolkit.utils.utils import temporary_cd
# =============================================================================================
# IMPLEMENTATION
# =============================================================================================
class AmberToolsToolkitWrapper(base_wrapper.ToolkitWrapper):
"""
AmberTools toolkit wrapper
.. warning :: This API is experimental and subject to change.
"""
_toolkit_name = "AmberTools"
_toolkit_installation_instructions = (
"The AmberTools toolkit (free and open source) can be found at "
"https://anaconda.org/conda-forge/ambertools"
)
def __init__(self):
super().__init__()
self._toolkit_file_read_formats = []
self._toolkit_file_write_formats = []
if not self.is_available():
raise ToolkitUnavailableException(
f"The required toolkit {self._toolkit_name} is not "
f"available. {self._toolkit_installation_instructions}"
)
# TODO: More reliable way to extract AmberTools version
out = subprocess.check_output(["antechamber", "-L"])
ambertools_version = out.decode("utf-8").split("\n")[1].split()[3].strip(":")
self._toolkit_version = ambertools_version
# TODO: Find AMBERHOME or executable home, checking miniconda if needed
# Store an instance of an RDKitToolkitWrapper for file I/O
self._rdkit_toolkit_wrapper = rdkit_wrapper.RDKitToolkitWrapper()
@staticmethod
def is_available():
"""
Check whether the AmberTools toolkit is installed
Returns
-------
is_installed : bool
True if AmberTools is installed, False otherwise.
"""
# TODO: Check all tools needed
# TODO: How should we implement find_executable?
ANTECHAMBER_PATH = find_executable("antechamber")
if ANTECHAMBER_PATH is None:
return False
# AmberToolsToolkitWrapper needs RDKit to do basically anything, since its interface requires SDF I/O
if not (rdkit_wrapper.RDKitToolkitWrapper.is_available()):
return False
return True
def assign_partial_charges(
self,
molecule,
partial_charge_method=None,
use_conformers=None,
strict_n_conformers=False,
normalize_partial_charges=True,
_cls=None,
):
"""
Compute partial charges with AmberTools using antechamber/sqm, and assign
the new values to the partial_charges attribute.
.. warning :: This API experimental and subject to change.
.. todo ::
* Do we want to also allow ESP/RESP charges?
Parameters
----------
molecule : openff.toolkit.topology.Molecule
Molecule for which partial charges are to be computed
partial_charge_method : str, optional, default=None
The charge model to use. One of ['gasteiger', 'am1bcc', 'am1-mulliken'].
If None, 'am1-mulliken' will be used.
use_conformers : iterable of openmm.unit.Quantity-wrapped numpy arrays, each
with shape (n_atoms, 3) and dimension of distance. Optional, default = None
List of (n_atoms x 3) openmm.unit.Quantities to use for partial charge calculation.
If None, an appropriate number of conformers will be generated.
strict_n_conformers : bool, default=False
Whether to raise an exception if an invalid number of conformers is provided for
the given charge method.
If this is False and an invalid number of conformers is found, a warning will be raised.
normalize_partial_charges : bool, default=True
Whether to offset partial charges so that they sum to the total formal charge of the molecule.
This is used to prevent accumulation of rounding errors when the partial charge generation method has
low precision.
_cls : class
Molecule constructor
Raises
------
ChargeMethodUnavailableError if the requested charge method can not be handled by this toolkit
ChargeCalculationError if the charge method is supported by this toolkit, but fails
"""
import os
import subprocess
from openff.toolkit.topology import Molecule
if partial_charge_method is None:
partial_charge_method = "am1-mulliken"
else:
# Standardize method name for string comparisons
partial_charge_method = partial_charge_method.lower()
SUPPORTED_CHARGE_METHODS = {
"am1bcc": {
"antechamber_keyword": "bcc",
"min_confs": 1,
"max_confs": 1,
"rec_confs": 1,
},
"am1-mulliken": {
"antechamber_keyword": "mul",
"min_confs": 1,
"max_confs": 1,
"rec_confs": 1,
},
"gasteiger": {
"antechamber_keyword": "gas",
"min_confs": 0,
"max_confs": 0,
"rec_confs": 0,
},
}
if partial_charge_method not in SUPPORTED_CHARGE_METHODS:
raise ChargeMethodUnavailableError(
f"partial_charge_method '{partial_charge_method}' is not available from AmberToolsToolkitWrapper. "
f"Available charge methods are {list(SUPPORTED_CHARGE_METHODS.keys())} "
)
charge_method = SUPPORTED_CHARGE_METHODS[partial_charge_method]
if _cls is None:
_cls = Molecule
# Make a temporary copy of the molecule, since we'll be messing with its conformers
mol_copy = _cls(molecule)
if use_conformers is None:
if charge_method["rec_confs"] == 0:
mol_copy._conformers = None
else:
mol_copy.generate_conformers(
n_conformers=charge_method["rec_confs"],
rms_cutoff=0.25 * unit.angstrom,
toolkit_registry=rdkit_wrapper.RDKitToolkitWrapper(),
)
# TODO: What's a "best practice" RMS cutoff to use here?
else:
mol_copy._conformers = None
for conformer in use_conformers:
mol_copy._add_conformer(conformer)
self._check_n_conformers(
mol_copy,
partial_charge_method=partial_charge_method,
min_confs=charge_method["min_confs"],
max_confs=charge_method["max_confs"],
strict_n_conformers=strict_n_conformers,
)
# Find the path to antechamber
# TODO: How should we implement find_executable?
ANTECHAMBER_PATH = find_executable("antechamber")
if ANTECHAMBER_PATH is None:
raise AntechamberNotFoundError(
"Antechamber not found, cannot run charge_mol()"
)
# Compute charges
with tempfile.TemporaryDirectory() as tmpdir:
with temporary_cd(tmpdir):
net_charge = mol_copy.total_charge.value_in_unit(unit.elementary_charge)
# Write out molecule in SDF format
# TODO: How should we handle multiple conformers?
self._rdkit_toolkit_wrapper.to_file(
mol_copy, "molecule.sdf", file_format="sdf"
)
# Compute desired charges
# TODO: Add error handling if antechamber chokes
short_charge_method = charge_method["antechamber_keyword"]
subprocess.check_output(
[
"antechamber",
"-i",
"molecule.sdf",
"-fi",
"sdf",
"-o",
"charged.mol2",
"-fo",
"mol2",
"-pf",
"yes",
"-dr",
"n",
"-c",
short_charge_method,
"-nc",
str(net_charge),
]
)
# Write out just charges
subprocess.check_output(
[
"antechamber",
"-dr",
"n",
"-i",
"charged.mol2",
"-fi",
"mol2",
"-o",
"charges2.mol2",
"-fo",
"mol2",
"-c",
"wc",
"-cf",
"charges.txt",
"-pf",
"yes",
]
)
# Check to ensure charges were actually produced
if not os.path.exists("charges.txt"):
# TODO: copy files into local directory to aid debugging?
raise ChargeCalculationError(
"Antechamber/sqm partial charge calculation failed on "
"molecule {} (SMILES {})".format(
molecule.name, molecule.to_smiles()
)
)
# Read the charges
with open("charges.txt", "r") as infile:
contents = infile.read()
text_charges = contents.split()
charges = np.zeros([molecule.n_atoms], np.float64)
for index, token in enumerate(text_charges):
charges[index] = float(token)
# TODO: Ensure that the atoms in charged.mol2 are in the same order as in molecule.sdf
charges = unit.Quantity(charges, unit.elementary_charge)
molecule.partial_charges = charges
if normalize_partial_charges:
molecule._normalize_partial_charges()
def compute_partial_charges_am1bcc(
self, molecule, use_conformers=None, strict_n_conformers=False
):
"""
Compute partial charges with AmberTools using antechamber/sqm. This will calculate AM1-BCC charges on the first
conformer only.
.. warning :: This API is experimental and subject to change.
Parameters
----------
molecule : Molecule
Molecule for which partial charges are to be computed
use_conformers : iterable of openmm.unit.Quantity-wrapped numpy arrays,
each with shape (n_atoms, 3) and dimension of distance. Optional, default = None
Coordinates to use for partial charge calculation. If None, an appropriate number
of conformers will be generated.
strict_n_conformers : bool, default=False
Whether to raise an exception if an invalid number of conformers is provided.
If this is False and an invalid number of conformers is found, a warning will
be raised instead of an Exception.
Returns
-------
charges : numpy.array of shape (natoms) of type float
The partial charges
"""
import warnings
warnings.warn(
"compute_partial_charges_am1bcc will be deprecated in an upcoming release. "
"Use assign_partial_charges(partial_charge_method='am1bcc') instead.",
DeprecationWarning,
)
self.assign_partial_charges(
molecule,
partial_charge_method="AM1BCC",
use_conformers=use_conformers,
strict_n_conformers=strict_n_conformers,
)
return molecule.partial_charges
def _modify_sqm_in_to_request_bond_orders(self, file_path):
"""
Modify a sqm.in file produced by antechamber to include the "printbondorders=1" directive
in the header. This method will overwrite the original file.
Parameters
----------
file_path : str
The path to sqm.in
"""
data = open(file_path).read()
# Original sqm.in file headerlooks like:
# Run semi-empirical minimization
# &qmmm
# qm_theory='AM1', grms_tol=0.0005,
# scfconv=1.d-10, ndiis_attempts=700, qmcharge=0,
# /
# ... (atom coordinates in something like XYZ format) ...
# To get WBOs, we need to add "printbondorders=1" to the list of keywords
# First, split the sqm.in text at the "/" mark at the end of the header
datasp = data.split("/")
# Insert the "printbondorders" directive in a new line and re-add the "/"
datasp.insert(1, "printbondorders=1, \n /")
# Reassemble the file text
new_data = "".join(datasp)
# Write the new file contents, overwriting the original file.
with open(file_path, "w") as of:
of.write(new_data)
def _get_fractional_bond_orders_from_sqm_out(
self, file_path, validate_elements=None
):
"""
Process a SQM output file containing bond orders, and return a dict of the form
dict[atom_1_index, atom_2_index] = fractional_bond_order
Parameters
----------
file_path : str
File path for sqm output file
validate_elements : iterable of str
The element symbols expected in molecule index order. A ValueError will be raised
if the elements are not found in this order.
Returns
-------
bond_orders : dict[(int, int)]: float
A dictionary where the keys are tuples of two atom indices and the values are
floating-point bond orders. The keys are sorted in ascending order, such that
the lower atom index is key[0] and the higher is key[1].
"""
# Example sqm.out section with WBOs:
# Bond Orders
#
# QMMM: NUM1 ELEM1 NUM2 ELEM2 BOND_ORDER
# QMMM: 2 C 1 C 1.41107532
# QMMM: 3 C 1 C 1.41047804
# ...
# QMMM: 15 H 13 H 0.00000954
# QMMM: 15 H 14 H 0.00000813
#
# --------- Calculation Completed ----------
data = open(file_path).read()
begin_sep = """ Bond Orders
QMMM: NUM1 ELEM1 NUM2 ELEM2 BOND_ORDER
"""
end_sep = """
--------- Calculation Completed ----------
"""
# Extract the chunk of text between begin_sep and end_sep, and split it by newline
fbo_lines = data.split(begin_sep)[1].split(end_sep)[0].split("\n")
# Iterate over the lines and populate the dict to return
bond_orders = dict()
for line in fbo_lines:
linesp = line.split()
atom_index_1 = int(linesp[1])
atom_element_1 = linesp[2]
atom_index_2 = int(linesp[3])
atom_element_2 = linesp[4]
bond_order = float(linesp[5])
# If validate_elements was provided, ensure that the ordering of element symbols is what we expected
if validate_elements is not None:
if (atom_element_1 != validate_elements[atom_index_1 - 1]) or (
atom_element_2 != validate_elements[atom_index_2 - 1]
):
# raise ValueError('\n'.join(fbo_lines))
raise ValueError(
f"Elements or indexing in sqm output differ from expectation. "
f"Expected {validate_elements[atom_index_1]} with index {atom_index_1} and "
f"{validate_elements[atom_index_2]} with index {atom_index_2}, "
f"but SQM output has {atom_element_1} and {atom_element_2} for the same atoms."
)
# To make lookup easier, we identify bonds as integer tuples with the lowest atom index
# first and the highest second.
index_tuple = tuple(sorted([atom_index_1, atom_index_2]))
bond_orders[index_tuple] = bond_order
return bond_orders
def assign_fractional_bond_orders(
self, molecule, bond_order_model=None, use_conformers=None, _cls=None
):
"""
Update and store list of bond orders this molecule. Bond orders are stored on each
bond, in the `bond.fractional_bond_order` attribute.
.. warning :: This API is experimental and subject to change.
Parameters
----------
molecule : openff.toolkit.topology.molecule Molecule
The molecule to assign wiberg bond orders to
bond_order_model : str, optional, default=None
The charge model to use. Only allowed value is 'am1-wiberg'. If None, 'am1-wiberg' will be used.
use_conformers : iterable of openmm.unit.Quantity(np.array) with shape (n_atoms, 3)
and dimension of distance, optional, default=None
The conformers to use for fractional bond order calculation. If None, an appropriate
number of conformers will be generated by an available ToolkitWrapper.
_cls : class
Molecule constructor
"""
from openff.toolkit.topology import Molecule
# Find the path to antechamber
# TODO: How should we implement find_executable?
ANTECHAMBER_PATH = find_executable("antechamber")
if ANTECHAMBER_PATH is None:
raise AntechamberNotFoundError(
"Antechamber not found, cannot run "
"AmberToolsToolkitWrapper.assign_fractional_bond_orders()"
)
if _cls is None:
_cls = Molecule
# Make a copy since we'll be messing with this molecule's conformers
temp_mol = _cls(molecule)
if use_conformers is None:
temp_mol.generate_conformers(
n_conformers=1,
toolkit_registry=self._rdkit_toolkit_wrapper,
)
else:
temp_mol._conformers = None
for conformer in use_conformers:
temp_mol._add_conformer(conformer)
if len(temp_mol.conformers) == 0:
raise ValueError(
"No conformers present in molecule submitted for fractional bond order calculation. Consider "
"loading the molecule from a file with geometry already present or running "
"molecule.generate_conformers() before calling molecule.assign_fractional_bond_orders"
)
# Compute bond orders
bond_order_model_to_antechamber_keyword = {"am1-wiberg": "mul"}
supported_bond_order_models = list(
bond_order_model_to_antechamber_keyword.keys()
)
if bond_order_model is None:
bond_order_model = "am1-wiberg"
bond_order_model = bond_order_model.lower()
if bond_order_model not in supported_bond_order_models:
raise ValueError(
f"Bond order model '{bond_order_model}' is not supported by AmberToolsToolkitWrapper. "
f"Supported models are {supported_bond_order_models}"
)
ac_charge_keyword = bond_order_model_to_antechamber_keyword[bond_order_model]
bond_orders = defaultdict(list)
for conformer in [*temp_mol.conformers]:
with tempfile.TemporaryDirectory() as tmpdir:
with temporary_cd(tmpdir):
net_charge = temp_mol.total_charge
# Write out molecule in SDF format
temp_mol._conformers = [conformer]
self._rdkit_toolkit_wrapper.to_file(
temp_mol, "molecule.sdf", file_format="sdf"
)
# Prepare sqm.in file as if we were going to run charge calc
# TODO: Add error handling if antechamber chokes
subprocess.check_output(
[
"antechamber",
"-i",
"molecule.sdf",
"-fi",
"sdf",
"-o",
"sqm.in",
"-fo",
"sqmcrt",
"-pf",
"yes",
"-c",
ac_charge_keyword,
"-nc",
str(net_charge),
]
)
# Modify sqm.in to request bond order calculation
self._modify_sqm_in_to_request_bond_orders("sqm.in")
# Run sqm to get bond orders
subprocess.check_output(
["sqm", "-i", "sqm.in", "-o", "sqm.out", "-O"]
)
# Ensure that antechamber/sqm did not change the indexing by checking against
# an ordered list of element symbols for this molecule
expected_elements = [at.element.symbol for at in molecule.atoms]
conformer_bond_orders = (
self._get_fractional_bond_orders_from_sqm_out(
"sqm.out", validate_elements=expected_elements
)
)
for bond_indices, value in conformer_bond_orders.items():
bond_orders[bond_indices].append(value)
# Note that sqm calculate WBOs for ALL PAIRS of atoms, not just those that have
# bonds defined in the original molecule. So here we iterate over the bonds in
# the original molecule and only nab the WBOs for those.
for bond in molecule.bonds:
# The atom index tuples that act as bond indices are ordered from lowest to highest by
# _get_fractional_bond_orders_from_sqm_out, so here we make sure that we look them up in
# sorted order as well
sorted_atom_indices = sorted(
tuple([bond.atom1_index + 1, bond.atom2_index + 1])
)
bond.fractional_bond_order = np.mean(
bond_orders[tuple(sorted_atom_indices)]
)
| [
"tempfile.TemporaryDirectory",
"openff.toolkit.utils.exceptions.ToolkitUnavailableException",
"openff.toolkit.utils.exceptions.AntechamberNotFoundError",
"subprocess.check_output",
"numpy.zeros",
"os.path.exists",
"openff.toolkit.utils.rdkit_wrapper.RDKitToolkitWrapper",
"collections.defaultdict",
"... | [((1923, 1969), 'subprocess.check_output', 'subprocess.check_output', (["['antechamber', '-L']"], {}), "(['antechamber', '-L'])\n", (1946, 1969), False, 'import subprocess\n'), ((2293, 2328), 'openff.toolkit.utils.rdkit_wrapper.RDKitToolkitWrapper', 'rdkit_wrapper.RDKitToolkitWrapper', ([], {}), '()\n', (2326, 2328), False, 'from openff.toolkit.utils import base_wrapper, rdkit_wrapper\n'), ((2701, 2731), 'distutils.spawn.find_executable', 'find_executable', (['"""antechamber"""'], {}), "('antechamber')\n", (2716, 2731), False, 'from distutils.spawn import find_executable\n'), ((7684, 7714), 'distutils.spawn.find_executable', 'find_executable', (['"""antechamber"""'], {}), "('antechamber')\n", (7699, 7714), False, 'from distutils.spawn import find_executable\n'), ((10877, 10923), 'simtk.unit.Quantity', 'unit.Quantity', (['charges', 'unit.elementary_charge'], {}), '(charges, unit.elementary_charge)\n', (10890, 10923), False, 'from simtk import unit\n'), ((12297, 12485), 'warnings.warn', 'warnings.warn', (['"""compute_partial_charges_am1bcc will be deprecated in an upcoming release. Use assign_partial_charges(partial_charge_method=\'am1bcc\') instead."""', 'DeprecationWarning'], {}), '(\n "compute_partial_charges_am1bcc will be deprecated in an upcoming release. Use assign_partial_charges(partial_charge_method=\'am1bcc\') instead."\n , DeprecationWarning)\n', (12310, 12485), False, 'import warnings\n'), ((18508, 18538), 'distutils.spawn.find_executable', 'find_executable', (['"""antechamber"""'], {}), "('antechamber')\n", (18523, 18538), False, 'from distutils.spawn import find_executable\n'), ((20400, 20417), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (20411, 20417), False, 'from collections import defaultdict\n'), ((1660, 1803), 'openff.toolkit.utils.exceptions.ToolkitUnavailableException', 'ToolkitUnavailableException', (['f"""The required toolkit {self._toolkit_name} is not available. {self._toolkit_installation_instructions}"""'], {}), "(\n f'The required toolkit {self._toolkit_name} is not available. {self._toolkit_installation_instructions}'\n )\n", (1687, 1803), False, 'from openff.toolkit.utils.exceptions import AntechamberNotFoundError, ChargeCalculationError, ChargeMethodUnavailableError, ToolkitUnavailableException\n'), ((2920, 2968), 'openff.toolkit.utils.rdkit_wrapper.RDKitToolkitWrapper.is_available', 'rdkit_wrapper.RDKitToolkitWrapper.is_available', ([], {}), '()\n', (2966, 2968), False, 'from openff.toolkit.utils import base_wrapper, rdkit_wrapper\n'), ((7770, 7844), 'openff.toolkit.utils.exceptions.AntechamberNotFoundError', 'AntechamberNotFoundError', (['"""Antechamber not found, cannot run charge_mol()"""'], {}), "('Antechamber not found, cannot run charge_mol()')\n", (7794, 7844), False, 'from openff.toolkit.utils.exceptions import AntechamberNotFoundError, ChargeCalculationError, ChargeMethodUnavailableError, ToolkitUnavailableException\n'), ((7915, 7944), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (7942, 7944), False, 'import tempfile\n'), ((18594, 18722), 'openff.toolkit.utils.exceptions.AntechamberNotFoundError', 'AntechamberNotFoundError', (['"""Antechamber not found, cannot run AmberToolsToolkitWrapper.assign_fractional_bond_orders()"""'], {}), "(\n 'Antechamber not found, cannot run AmberToolsToolkitWrapper.assign_fractional_bond_orders()'\n )\n", (18618, 18722), False, 'from openff.toolkit.utils.exceptions import AntechamberNotFoundError, ChargeCalculationError, ChargeMethodUnavailableError, ToolkitUnavailableException\n'), ((7973, 7993), 'openff.toolkit.utils.utils.temporary_cd', 'temporary_cd', (['tmpdir'], {}), '(tmpdir)\n', (7985, 7993), False, 'from openff.toolkit.utils.utils import temporary_cd\n'), ((9251, 9434), 'subprocess.check_output', 'subprocess.check_output', (["['antechamber', '-dr', 'n', '-i', 'charged.mol2', '-fi', 'mol2', '-o',\n 'charges2.mol2', '-fo', 'mol2', '-c', 'wc', '-cf', 'charges.txt', '-pf',\n 'yes']"], {}), "(['antechamber', '-dr', 'n', '-i', 'charged.mol2',\n '-fi', 'mol2', '-o', 'charges2.mol2', '-fo', 'mol2', '-c', 'wc', '-cf',\n 'charges.txt', '-pf', 'yes'])\n", (9274, 9434), False, 'import subprocess\n'), ((10604, 10644), 'numpy.zeros', 'np.zeros', (['[molecule.n_atoms]', 'np.float64'], {}), '([molecule.n_atoms], np.float64)\n', (10612, 10644), True, 'import numpy as np\n'), ((20486, 20515), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (20513, 20515), False, 'import tempfile\n'), ((9984, 10013), 'os.path.exists', 'os.path.exists', (['"""charges.txt"""'], {}), "('charges.txt')\n", (9998, 10013), False, 'import os\n'), ((20549, 20569), 'openff.toolkit.utils.utils.temporary_cd', 'temporary_cd', (['tmpdir'], {}), '(tmpdir)\n', (20561, 20569), False, 'from openff.toolkit.utils.utils import temporary_cd\n'), ((21931, 22002), 'subprocess.check_output', 'subprocess.check_output', (["['sqm', '-i', 'sqm.in', '-o', 'sqm.out', '-O']"], {}), "(['sqm', '-i', 'sqm.in', '-o', 'sqm.out', '-O'])\n", (21954, 22002), False, 'import subprocess\n'), ((6982, 7017), 'openff.toolkit.utils.rdkit_wrapper.RDKitToolkitWrapper', 'rdkit_wrapper.RDKitToolkitWrapper', ([], {}), '()\n', (7015, 7017), False, 'from openff.toolkit.utils import base_wrapper, rdkit_wrapper\n')] |
import time
import shutil
import unittest
from os import cpu_count
import numpy as np
import pandas as pd
from numba import set_num_threads
from scipy.sparse import csr_matrix
# from agtool.download.movielen import ml_10m
from agtool.download.movielen import ml_100k
from agtool.cm.sampling import negative_sampling
from agtool.sampling import negative_sampling as numba_negative_sampling
class TestNegativeSampling(unittest.TestCase):
@classmethod
def setUpClass(cls):
# cls.DIR = ml_10m()
cls.DIR = ml_100k()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.DIR)
print(f"Remove {cls.DIR}")
def _load_data(self):
df_indptr = pd.read_csv(f'{self.DIR}/processed/indptr', header=None)
indptr = df_indptr.to_numpy().squeeze().astype(np.int32)
df_indices = pd.read_csv(f'{self.DIR}/processed/indices', header=None)
indices = df_indices.to_numpy().squeeze().astype(np.int32)
num_items = max(indices) + 1
return indptr, indices, num_items
def test01_negative_sampling(self):
indptr, indices, num_items = self._load_data()
print("num_items: ", num_items)
for num_threads in range(1, min(7, cpu_count())):
start = time.time()
uids, negatives = negative_sampling(
indptr, indices, 5, num_items, num_threads
)
print(f"[negative_sampling] takes {time.time() - start:.6f} seconds for [{num_threads}] threads")
print("10 samples: ", uids[:10])
print("10 negatives: ", negatives[:10])
negative_matrix = csr_matrix((np.ones_like(uids), (uids, negatives)), shape=(len(indptr) - 1, num_items))
for i in range(len(indptr) - 1):
positives = indices[indptr[i]: indptr[i + 1]]
negatives = negative_matrix[i].nonzero()[1]
intersect = np.intersect1d(positives, negatives)
is_empty = intersect.size == 0
msg = f"Negative sampling [Invalid] intersect[user {i}]: {intersect.tolist()}"
self.assertTrue(is_empty, msg=msg)
def test02_speed_experiments(self):
indptr, indices, num_items = self._load_data()
start = time.time()
numba_negative_sampling(indptr, indices, 5, num_items)
print(f"[numba_negative_sampling] build time takes {time.time() - start:.6f} seconds")
for num_threads in range(1, min(cpu_count(), 7)):
set_num_threads(num_threads)
start = time.time()
numba_negative_sampling(indptr, indices, 5, num_items)
print(f"[numba_negative_sampling] takes {time.time() - start:.6f} seconds for [{num_threads}] threads")
| [
"numba.set_num_threads",
"numpy.ones_like",
"pandas.read_csv",
"time.time",
"os.cpu_count",
"agtool.cm.sampling.negative_sampling",
"agtool.download.movielen.ml_100k",
"shutil.rmtree",
"numpy.intersect1d",
"agtool.sampling.negative_sampling"
] | [((529, 538), 'agtool.download.movielen.ml_100k', 'ml_100k', ([], {}), '()\n', (536, 538), False, 'from agtool.download.movielen import ml_100k\n'), ((593, 615), 'shutil.rmtree', 'shutil.rmtree', (['cls.DIR'], {}), '(cls.DIR)\n', (606, 615), False, 'import shutil\n'), ((698, 754), 'pandas.read_csv', 'pd.read_csv', (['f"""{self.DIR}/processed/indptr"""'], {'header': 'None'}), "(f'{self.DIR}/processed/indptr', header=None)\n", (709, 754), True, 'import pandas as pd\n'), ((841, 898), 'pandas.read_csv', 'pd.read_csv', (['f"""{self.DIR}/processed/indices"""'], {'header': 'None'}), "(f'{self.DIR}/processed/indices', header=None)\n", (852, 898), True, 'import pandas as pd\n'), ((2215, 2226), 'time.time', 'time.time', ([], {}), '()\n', (2224, 2226), False, 'import time\n'), ((2235, 2289), 'agtool.sampling.negative_sampling', 'numba_negative_sampling', (['indptr', 'indices', '(5)', 'num_items'], {}), '(indptr, indices, 5, num_items)\n', (2258, 2289), True, 'from agtool.sampling import negative_sampling as numba_negative_sampling\n'), ((1259, 1270), 'time.time', 'time.time', ([], {}), '()\n', (1268, 1270), False, 'import time\n'), ((1301, 1362), 'agtool.cm.sampling.negative_sampling', 'negative_sampling', (['indptr', 'indices', '(5)', 'num_items', 'num_threads'], {}), '(indptr, indices, 5, num_items, num_threads)\n', (1318, 1362), False, 'from agtool.cm.sampling import negative_sampling\n'), ((1885, 1921), 'numpy.intersect1d', 'np.intersect1d', (['positives', 'negatives'], {}), '(positives, negatives)\n', (1899, 1921), True, 'import numpy as np\n'), ((2455, 2483), 'numba.set_num_threads', 'set_num_threads', (['num_threads'], {}), '(num_threads)\n', (2470, 2483), False, 'from numba import set_num_threads\n'), ((2504, 2515), 'time.time', 'time.time', ([], {}), '()\n', (2513, 2515), False, 'import time\n'), ((2528, 2582), 'agtool.sampling.negative_sampling', 'numba_negative_sampling', (['indptr', 'indices', '(5)', 'num_items'], {}), '(indptr, indices, 5, num_items)\n', (2551, 2582), True, 'from agtool.sampling import negative_sampling as numba_negative_sampling\n'), ((1224, 1235), 'os.cpu_count', 'cpu_count', ([], {}), '()\n', (1233, 1235), False, 'from os import cpu_count\n'), ((1630, 1648), 'numpy.ones_like', 'np.ones_like', (['uids'], {}), '(uids)\n', (1642, 1648), True, 'import numpy as np\n'), ((2425, 2436), 'os.cpu_count', 'cpu_count', ([], {}), '()\n', (2434, 2436), False, 'from os import cpu_count\n'), ((2350, 2361), 'time.time', 'time.time', ([], {}), '()\n', (2359, 2361), False, 'import time\n'), ((1440, 1451), 'time.time', 'time.time', ([], {}), '()\n', (1449, 1451), False, 'import time\n'), ((2636, 2647), 'time.time', 'time.time', ([], {}), '()\n', (2645, 2647), False, 'import time\n')] |
"""
Copyright (c) 2020, <NAME>.
All rights reserved.
"""
import os
import shutil
import numpy as np
from graphviz import Graph
import matplotlib.pyplot as plt
from typing import Optional, List
import logging
from tqdm import tqdm
from joblib import Parallel, delayed
from .lsmi import lsmi1D
class MISSO:
def __init__(self,
num_centers:Optional[int] = 200,
rbf_sigma: Optional[float] = None,
alpha: Optional[float] = None,
verbose:bool = False,
random_seed:int = 42,
mp:bool = None) -> None:
"""
:param num_centers: Number of centers to use when computing the RBF kernel
:param rbf_sigma: Length-scale for the RBF kernel
:param alpha: L2 regularizer weight for the LSMI
:param verbose: Boolean to display computation progress
:param random_seed: Integer seed for reproducibility (default: 42)
:param mp: Boolean to use multiprocessing. If `None`, will use multprocessing is the
current device has multiple cores.
"""
self.num_centers = num_centers
self.rbf_sigma = rbf_sigma
self.alpha = alpha
if mp is None:
self.use_mp = os.cpu_count() >= 4
else:
self.use_mp = mp
if self.use_mp:
self.folder = "./joblib_memmap"
self.verbose = verbose
if self.verbose:
if self.use_mp:
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
self.info = logging.info
else:
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
self.info = logging.info
np.random.seed(random_seed)
self.random_seed = random_seed
def compute_smi(self, *args):
mim, x, y, i, j = args
if self.verbose:
self.info(f"Computing SMI for [{i}, {j}]")
smi, _ = lsmi1D(x, y,
num_centers=self.num_centers,
rbf_sigma=self.rbf_sigma,
alpha=self.alpha,
random_seed = self.random_seed)
mim[i, j] = smi
mim[j, i] = smi
if self.verbose:
self.info(f"Finished SMI for [{i}, {i}]")
def fit(self,
X:np.ndarray,
Y:Optional[np.ndarray] = None) -> np.ndarray:
"""
Computes the sparse mutual information matrix using the LSMI-LASSO method.
:param X: [M x N] Set of N random variables with M samples each
:param Y: [N x M] Set of N random variables with M samples each. Default: None (Will use X)
:return: [N x N] Sparse Mutual Information Matrix
"""
if Y is None:
Y = X
M, N = X.shape
My, Ny = Y.shape
assert M == My, "Both X & Y must have the same number of samples (dim 1)"
assert N == Ny, "Both X & Y must have the same # of random variables (dim 2)"
self.N = N
process_args = [(X[:, i].reshape(-1, 1), Y[:, j].reshape(-1, 1), i, j)
for i in range(N) for j in range(i + 1)]
if self.use_mp: # Multiprocessing Code
os.makedirs(self.folder, exist_ok=True)
shared_mimfile = os.path.join(self.folder, 'mim_memmap')
shared_mim = np.memmap(shared_mimfile, dtype=np.float, shape=(N,N), mode='w+')
if not self.verbose:
Parallel(n_jobs = os.cpu_count())(
delayed(self.compute_smi)(shared_mim, *p_arg) for p_arg in tqdm(process_args, desc='Computing MIM'))
else:
Parallel(n_jobs=os.cpu_count())(
delayed(self.compute_smi)(shared_mim, *p_arg) for p_arg in process_args)
self.MIM = np.array(shared_mim)
shutil.rmtree(self.folder)
else: # Sequential Processing
self.MIM = np.zeros((N, N))
if self.verbose:
pbar = process_args
else:
pbar = tqdm(process_args, desc = 'Computing MIM')
for args in pbar:
self.compute_smi(self.MIM, *args)
return self.MIM
def show_graph(self,
M:np.ndarray,
threshold:float,
node_labels:List,
title:str) -> Graph:
"""
:param M:
:param threshold:
:param node_labels:
:param title:
:return:
"""
g = Graph('G', filename=title+'.gv', engine='dot')
M = np.round(M, 3)
for i in range(M.shape[0]):
for j in range(i + 1):
if(M[i, j] >= threshold and i!= j):
g.edge(node_labels[i], node_labels[j], label=str(M[i,j]))
return g
def show_matrix(self,
M:np.ndarray,
xlabels: List,
ylabels: List = None):
"""
:param M:
:param xlabels:
:param ylabels:
:return:
"""
if ylabels is None:
ylabels = xlabels
fig, ax = plt.subplots()
im = ax.matshow(M, cmap=plt.cm.summer)
plt.xticks(np.arange(0, M.shape[0]), xlabels)
plt.yticks(np.arange(0, M.shape[1]), ylabels)
plt.colorbar(im)
for i in range(M.shape[0]):
for j in range(M.shape[1]):
c = np.round(M[i, j], 3)
ax.text(i, j, str(c), va='center', ha='center')
plt.grid(False)
plt.show()
| [
"tqdm.tqdm",
"numpy.random.seed",
"matplotlib.pyplot.show",
"os.makedirs",
"os.path.join",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.colorbar",
"os.cpu_count",
"graphviz.Graph",
"numpy.array",
"numpy.arange",
"shutil.rmtree",
"joblib.delayed",
"numpy.memmap",
"num... | [((1792, 1819), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (1806, 1819), True, 'import numpy as np\n'), ((4609, 4657), 'graphviz.Graph', 'Graph', (['"""G"""'], {'filename': "(title + '.gv')", 'engine': '"""dot"""'}), "('G', filename=title + '.gv', engine='dot')\n", (4614, 4657), False, 'from graphviz import Graph\n'), ((4668, 4682), 'numpy.round', 'np.round', (['M', '(3)'], {}), '(M, 3)\n', (4676, 4682), True, 'import numpy as np\n'), ((5227, 5241), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5239, 5241), True, 'import matplotlib.pyplot as plt\n'), ((5406, 5422), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {}), '(im)\n', (5418, 5422), True, 'import matplotlib.pyplot as plt\n'), ((5612, 5627), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (5620, 5627), True, 'import matplotlib.pyplot as plt\n'), ((5636, 5646), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5644, 5646), True, 'import matplotlib.pyplot as plt\n'), ((3291, 3330), 'os.makedirs', 'os.makedirs', (['self.folder'], {'exist_ok': '(True)'}), '(self.folder, exist_ok=True)\n', (3302, 3330), False, 'import os\n'), ((3360, 3399), 'os.path.join', 'os.path.join', (['self.folder', '"""mim_memmap"""'], {}), "(self.folder, 'mim_memmap')\n", (3372, 3399), False, 'import os\n'), ((3425, 3491), 'numpy.memmap', 'np.memmap', (['shared_mimfile'], {'dtype': 'np.float', 'shape': '(N, N)', 'mode': '"""w+"""'}), "(shared_mimfile, dtype=np.float, shape=(N, N), mode='w+')\n", (3434, 3491), True, 'import numpy as np\n'), ((3893, 3913), 'numpy.array', 'np.array', (['shared_mim'], {}), '(shared_mim)\n', (3901, 3913), True, 'import numpy as np\n'), ((3926, 3952), 'shutil.rmtree', 'shutil.rmtree', (['self.folder'], {}), '(self.folder)\n', (3939, 3952), False, 'import shutil\n'), ((4015, 4031), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (4023, 4031), True, 'import numpy as np\n'), ((5308, 5332), 'numpy.arange', 'np.arange', (['(0)', 'M.shape[0]'], {}), '(0, M.shape[0])\n', (5317, 5332), True, 'import numpy as np\n'), ((5362, 5386), 'numpy.arange', 'np.arange', (['(0)', 'M.shape[1]'], {}), '(0, M.shape[1])\n', (5371, 5386), True, 'import numpy as np\n'), ((1262, 1276), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (1274, 1276), False, 'import os\n'), ((1510, 1529), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1527, 1529), False, 'import logging\n'), ((1671, 1690), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1688, 1690), False, 'import logging\n'), ((4139, 4179), 'tqdm.tqdm', 'tqdm', (['process_args'], {'desc': '"""Computing MIM"""'}), "(process_args, desc='Computing MIM')\n", (4143, 4179), False, 'from tqdm import tqdm\n'), ((5519, 5539), 'numpy.round', 'np.round', (['M[i, j]', '(3)'], {}), '(M[i, j], 3)\n', (5527, 5539), True, 'import numpy as np\n'), ((3559, 3573), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (3571, 3573), False, 'import os\n'), ((3608, 3633), 'joblib.delayed', 'delayed', (['self.compute_smi'], {}), '(self.compute_smi)\n', (3615, 3633), False, 'from joblib import Parallel, delayed\n'), ((3667, 3707), 'tqdm.tqdm', 'tqdm', (['process_args'], {'desc': '"""Computing MIM"""'}), "(process_args, desc='Computing MIM')\n", (3671, 3707), False, 'from tqdm import tqdm\n'), ((3759, 3773), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (3771, 3773), False, 'import os\n'), ((3796, 3821), 'joblib.delayed', 'delayed', (['self.compute_smi'], {}), '(self.compute_smi)\n', (3803, 3821), False, 'from joblib import Parallel, delayed\n')] |
import sys, json, numpy as np
from numba import jit
def consume():
return json.loads(input())
@jit
def main(inputs):
weights = [
[0.5, -0.91, 0.26, -0.5],
[0.2, 0.8, -0.5, 1.0],
[-0.5, 0.91, -0.26, 0.5],
[-0.26, -0.27, 0.17, 0.87]
]
biases = [2.0, 3.0, 0.5, 0.2]
output = np.dot(weights, inputs) + biases
return output
if __name__ == '__main__':
output = main(consume())
output = json.dumps(output.tolist())
print(output)
| [
"numpy.dot"
] | [((328, 351), 'numpy.dot', 'np.dot', (['weights', 'inputs'], {}), '(weights, inputs)\n', (334, 351), True, 'import sys, json, numpy as np\n')] |
import numpy as np
import pytest
from podium.experimental.models.impl.fc_model import ScikitMLPClassifier
X = np.array([[1, 0, 1], [1, 1, 1], [0, 0, 1]])
Y = np.array([0, 1, 0])
def test_scikit_mlp_model_shape():
pytest.importorskip("sklearn")
model = ScikitMLPClassifier(classes=np.unique(Y))
model.fit(X=X, y=Y)
result = model.predict(X=X)
assert result.get(model.PREDICTION_KEY) is not None
assert result.get(model.PREDICTION_KEY).shape == (3,)
| [
"pytest.importorskip",
"numpy.array",
"numpy.unique"
] | [((113, 156), 'numpy.array', 'np.array', (['[[1, 0, 1], [1, 1, 1], [0, 0, 1]]'], {}), '([[1, 0, 1], [1, 1, 1], [0, 0, 1]])\n', (121, 156), True, 'import numpy as np\n'), ((161, 180), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (169, 180), True, 'import numpy as np\n'), ((222, 252), 'pytest.importorskip', 'pytest.importorskip', (['"""sklearn"""'], {}), "('sklearn')\n", (241, 252), False, 'import pytest\n'), ((293, 305), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (302, 305), True, 'import numpy as np\n')] |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.sparse as scs # sparse matrix construction
import scipy.linalg as scl # linear algebra algorithms
import scipy.optimize as sco # for minimization use
import matplotlib.pylab as plt # for visualization
import time
import sys
from multiprocessing import Pool
from sudoku_solver import solver
# We test the following algoritm on small data set.
if __name__ == "__main__":
if len(sys.argv) < 2:
data = pd.read_csv("./small2.csv")
else:
data = pd.read_csv(sys.argv[1])
corr_cnt = 0
start = time.time()
random_seed = 42
sample_max = 1000
np.random.seed(random_seed)
if len(data) > sample_max:
samples = np.random.choice(len(data), sample_max)
else:
samples = np.arange(len(data))
pool = Pool()
quizzes = data["quizzes"][samples]
solutions = data["solutions"][samples]
res = pool.map(solver, quizzes)
corr_cnt = np.sum(1*(res == solutions))
end = time.time()
# report:
print("Aver Time: {t:6.2f} secs. Success rate: {corr} / {all} ".format(t=(end-start)/(len(samples)), corr=corr_cnt, all=len(samples)) )
| [
"numpy.random.seed",
"numpy.sum",
"pandas.read_csv",
"time.time",
"multiprocessing.Pool"
] | [((619, 630), 'time.time', 'time.time', ([], {}), '()\n', (628, 630), False, 'import time\n'), ((672, 699), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (686, 699), True, 'import numpy as np\n'), ((836, 842), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (840, 842), False, 'from multiprocessing import Pool\n'), ((968, 998), 'numpy.sum', 'np.sum', (['(1 * (res == solutions))'], {}), '(1 * (res == solutions))\n', (974, 998), True, 'import numpy as np\n'), ((1005, 1016), 'time.time', 'time.time', ([], {}), '()\n', (1014, 1016), False, 'import time\n'), ((525, 552), 'pandas.read_csv', 'pd.read_csv', (['"""./small2.csv"""'], {}), "('./small2.csv')\n", (536, 552), True, 'import pandas as pd\n'), ((570, 594), 'pandas.read_csv', 'pd.read_csv', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (581, 594), True, 'import pandas as pd\n')] |
'''
Tests for stoqcompiler.hamiltonian modules.
'''
import pytest
import numpy as np
from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm
from stoqcompiler.compiler import CompilerResult
from stoqcompiler.unitary import Unitary, UnitarySequence
qubit_dimension = 2
class TestHamiltonian:
def test_no_terms(self) -> None:
terms = None
with pytest.raises(Exception):
Hamiltonian(terms)
def test_terms_mismatched_dimension(self) -> None:
term1 = HamiltonianTerm(np.array([
[3, 2 + 1j],
[2 - 1j, 3]]))
term2 = HamiltonianTerm(np.array([
[3, 2 + 1j, 0, 0],
[2 - 1j, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 3]]))
with pytest.raises(Exception):
Hamiltonian([term1, term2])
def test_simple_terms(self) -> None:
term = HamiltonianTerm(np.array([[3, 2 + 1j], [2 - 1j, 3]]))
identity = Unitary.identity(term.get_dimension())
h_2terms = Hamiltonian([term, term])
h_3terms = Hamiltonian([term, term, term])
assert term.get_dimension() == h_2terms.get_dimension()
assert term.get_dimension() == h_3terms.get_dimension()
u = h_2terms.get_time_evolution_operator(0)
assert isinstance(u, Unitary)
assert u.close_to(identity)
time = 1.234
u_2terms = h_2terms.get_time_evolution_operator(3 * time)
u_3terms = h_3terms.get_time_evolution_operator(2 * time)
assert not u_2terms.close_to(identity)
assert u_2terms.close_to(u_3terms)
def test_ideal_sequence(self) -> None:
sigmax = np.array([[0, 1], [1, 0]])
sigmay = np.array([[0, -1j], [1j, 0]])
x_term = HamiltonianTerm(2 * sigmax)
y_term = HamiltonianTerm(-3 * sigmay)
terms = [x_term, y_term]
h = Hamiltonian(terms)
time = 1.234
u = h.get_time_evolution_operator(time)
num_steps = 1000
ideal_sequence = h.get_ideal_sequence(time, num_steps)
assert isinstance(ideal_sequence, UnitarySequence)
assert ideal_sequence.get_length() == num_steps
assert u.close_to(ideal_sequence.product()), \
u.distance_from(ideal_sequence.product())
def test_trotterization(self) -> None:
sigmax = np.array([[0, 1], [1, 0]])
sigmay = np.array([[0, -1j], [1j, 0]])
x_term = HamiltonianTerm(2 * sigmax)
y_term = HamiltonianTerm(-3 * sigmay)
terms = [x_term, y_term]
h = Hamiltonian(terms)
time = 1.234
u = h.get_time_evolution_operator(time)
num_trotter_steps = 20
trotter_sequence = h.get_trotter_sequence(time, num_trotter_steps)
assert isinstance(trotter_sequence, UnitarySequence)
assert trotter_sequence.get_length() == num_trotter_steps * len(terms)
randomized_trotter_sequence = h.get_trotter_sequence(
time, num_trotter_steps, randomize=True)
assert isinstance(randomized_trotter_sequence, UnitarySequence)
assert (randomized_trotter_sequence.get_length()
== num_trotter_steps * len(terms))
assert u.close_to(trotter_sequence.product(), 0.95), \
u.distance_from(trotter_sequence.product())
assert u.close_to(randomized_trotter_sequence.product(), 0.95), \
u.distance_from(randomized_trotter_sequence.product())
assert not trotter_sequence.product().close_to(
randomized_trotter_sequence.product())
def test_qdrift(self) -> None:
sigmax = np.array([[0, 1], [1, 0]])
sigmay = np.array([[0, -1j], [1j, 0]])
x_term = HamiltonianTerm(10 * sigmax)
y_term = HamiltonianTerm(-1 * sigmay)
terms = [x_term, y_term]
h = Hamiltonian(terms)
time = 0.543
u = h.get_time_evolution_operator(time)
num_repetitions = 1000
qdrift_sequence = h.get_qdrift_sequence(time, num_repetitions)
assert isinstance(qdrift_sequence, UnitarySequence)
assert qdrift_sequence.get_length() == num_repetitions
assert u.close_to(qdrift_sequence.product(), 0.95), \
u.distance_from(qdrift_sequence.product())
def test_stoq(self) -> None:
sigmax = np.array([[0, 1], [1, 0]])
sigmay = np.array([[0, -1j], [1j, 0]])
x_term = HamiltonianTerm(2 * sigmax)
y_term = HamiltonianTerm(-3 * sigmay)
terms = [x_term, y_term]
h = Hamiltonian(terms)
time = 0.543
u = h.get_time_evolution_operator(time)
max_t_step = time / 10
threshold = 0.9
stoq_compiler_result = h.compile_stoq_sequence(
time, max_t_step, threshold, allow_simultaneous_terms=False)
assert isinstance(stoq_compiler_result, CompilerResult)
assert u.close_to(
stoq_compiler_result.compiled_sequence.product(), threshold), \
u.distance_from(stoq_compiler_result.compiled_sequence.product())
threshold = 0.9
stoq_compiler_result = h.compile_stoq_sequence(
time, max_t_step, threshold, allow_simultaneous_terms=True)
assert isinstance(stoq_compiler_result, CompilerResult)
assert u.close_to(
stoq_compiler_result.compiled_sequence.product(), threshold), \
u.distance_from(stoq_compiler_result.compiled_sequence.product())
assert stoq_compiler_result.compiled_sequence.get_qasm()
def test_rav(self) -> None:
sigmax = np.array([[0, 1], [1, 0]])
sigmay = np.array([[0, -1j], [1j, 0]])
x_term = HamiltonianTerm(2 * sigmax)
y_term = HamiltonianTerm(-3 * sigmay)
terms = [x_term, y_term]
h = Hamiltonian(terms)
time = 0.543
max_t_step = time / 10
threshold = 0.9
rav_result = h.compile_rav_sequence(
time, max_t_step, threshold, allow_simultaneous_terms=True)
assert isinstance(rav_result, CompilerResult)
product = rav_result.compiled_sequence.product()
assert product.close_to(
Unitary.identity(h.get_dimension()), threshold), \
product.distance_from(Unitary.identity(h.get_dimension()))
assert rav_result.compiled_sequence.get_qasm()
def test_two_qubits(self) -> None:
sigmax = np.array([[0, 1], [1, 0]])
sigmay = np.array([[0, -1j], [1j, 0]])
xx = np.kron(sigmax, sigmax)
y1 = np.kron(sigmay, np.identity(qubit_dimension))
y2 = np.kron(np.identity(qubit_dimension), sigmay)
terms = [
HamiltonianTerm(2 * xx),
HamiltonianTerm(1.5 * y1),
HamiltonianTerm(1.1 * y2)]
h = Hamiltonian(terms)
u = h.get_time_evolution_operator(0)
assert isinstance(u, Unitary)
assert u.get_dimension() == h.get_dimension()
assert u.close_to(Unitary.identity(h.get_dimension()))
time = 1.234
u = h.get_time_evolution_operator(time)
num_trotter_steps = 20
randomized_trotter_sequence = h.get_trotter_sequence(
time, num_trotter_steps, randomize=True)
assert isinstance(randomized_trotter_sequence, UnitarySequence)
num_repetitions = 1000
qdrift_sequence = h.get_qdrift_sequence(time, num_repetitions)
assert isinstance(qdrift_sequence, UnitarySequence)
assert qdrift_sequence.get_length() == num_repetitions
# should be close
assert u.close_to(randomized_trotter_sequence.product(), 0.95), \
u.distance_from(randomized_trotter_sequence.product())
assert u.close_to(qdrift_sequence.product(), 0.95), \
u.distance_from(qdrift_sequence.product())
# but should not be exactly the same
assert not u.close_to(randomized_trotter_sequence.product())
assert not u.close_to(qdrift_sequence.product())
assert not randomized_trotter_sequence.product().close_to(
qdrift_sequence.product())
class TestHamiltonianTerm:
def test_no_matrix(self) -> None:
matrix = None
with pytest.raises(Exception):
HamiltonianTerm(matrix)
def test_non_hermitian_matrix(self) -> None:
matrix = np.array([[1, 0], [1, 1]])
with pytest.raises(Exception):
HamiltonianTerm(matrix)
def test_simple_hermitian_matrix(self) -> None:
matrix = np.array([[3, 2 + 1j], [2 - 1j, 3]])
term = HamiltonianTerm(matrix)
assert term.get_dimension() == matrix.shape[0]
assert np.allclose(matrix, term.get_matrix())
coefficient = term.get_coefficient()
normalized_matrix = term.get_normalized_matrix()
assert np.isreal(coefficient)
assert coefficient >= 0.0
assert np.allclose(matrix, coefficient * normalized_matrix)
_, s, _ = np.linalg.svd(normalized_matrix)
assert np.isclose(np.max(s), 1.0)
| [
"numpy.isreal",
"stoqcompiler.hamiltonian.Hamiltonian",
"numpy.allclose",
"numpy.identity",
"pytest.raises",
"numpy.linalg.svd",
"numpy.array",
"numpy.kron",
"numpy.max",
"stoqcompiler.hamiltonian.HamiltonianTerm"
] | [((1011, 1036), 'stoqcompiler.hamiltonian.Hamiltonian', 'Hamiltonian', (['[term, term]'], {}), '([term, term])\n', (1022, 1036), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((1056, 1087), 'stoqcompiler.hamiltonian.Hamiltonian', 'Hamiltonian', (['[term, term, term]'], {}), '([term, term, term])\n', (1067, 1087), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((1648, 1674), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (1656, 1674), True, 'import numpy as np\n'), ((1692, 1725), 'numpy.array', 'np.array', (['[[0, -1.0j], [1.0j, 0]]'], {}), '([[0, -1.0j], [1.0j, 0]])\n', (1700, 1725), True, 'import numpy as np\n'), ((1739, 1766), 'stoqcompiler.hamiltonian.HamiltonianTerm', 'HamiltonianTerm', (['(2 * sigmax)'], {}), '(2 * sigmax)\n', (1754, 1766), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((1784, 1812), 'stoqcompiler.hamiltonian.HamiltonianTerm', 'HamiltonianTerm', (['(-3 * sigmay)'], {}), '(-3 * sigmay)\n', (1799, 1812), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((1858, 1876), 'stoqcompiler.hamiltonian.Hamiltonian', 'Hamiltonian', (['terms'], {}), '(terms)\n', (1869, 1876), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((2322, 2348), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (2330, 2348), True, 'import numpy as np\n'), ((2366, 2399), 'numpy.array', 'np.array', (['[[0, -1.0j], [1.0j, 0]]'], {}), '([[0, -1.0j], [1.0j, 0]])\n', (2374, 2399), True, 'import numpy as np\n'), ((2413, 2440), 'stoqcompiler.hamiltonian.HamiltonianTerm', 'HamiltonianTerm', (['(2 * sigmax)'], {}), '(2 * sigmax)\n', (2428, 2440), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((2458, 2486), 'stoqcompiler.hamiltonian.HamiltonianTerm', 'HamiltonianTerm', (['(-3 * sigmay)'], {}), '(-3 * sigmay)\n', (2473, 2486), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((2532, 2550), 'stoqcompiler.hamiltonian.Hamiltonian', 'Hamiltonian', (['terms'], {}), '(terms)\n', (2543, 2550), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((3585, 3611), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (3593, 3611), True, 'import numpy as np\n'), ((3629, 3662), 'numpy.array', 'np.array', (['[[0, -1.0j], [1.0j, 0]]'], {}), '([[0, -1.0j], [1.0j, 0]])\n', (3637, 3662), True, 'import numpy as np\n'), ((3676, 3704), 'stoqcompiler.hamiltonian.HamiltonianTerm', 'HamiltonianTerm', (['(10 * sigmax)'], {}), '(10 * sigmax)\n', (3691, 3704), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((3722, 3750), 'stoqcompiler.hamiltonian.HamiltonianTerm', 'HamiltonianTerm', (['(-1 * sigmay)'], {}), '(-1 * sigmay)\n', (3737, 3750), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((3796, 3814), 'stoqcompiler.hamiltonian.Hamiltonian', 'Hamiltonian', (['terms'], {}), '(terms)\n', (3807, 3814), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((4280, 4306), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (4288, 4306), True, 'import numpy as np\n'), ((4324, 4357), 'numpy.array', 'np.array', (['[[0, -1.0j], [1.0j, 0]]'], {}), '([[0, -1.0j], [1.0j, 0]])\n', (4332, 4357), True, 'import numpy as np\n'), ((4371, 4398), 'stoqcompiler.hamiltonian.HamiltonianTerm', 'HamiltonianTerm', (['(2 * sigmax)'], {}), '(2 * sigmax)\n', (4386, 4398), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((4416, 4444), 'stoqcompiler.hamiltonian.HamiltonianTerm', 'HamiltonianTerm', (['(-3 * sigmay)'], {}), '(-3 * sigmay)\n', (4431, 4444), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((4490, 4508), 'stoqcompiler.hamiltonian.Hamiltonian', 'Hamiltonian', (['terms'], {}), '(terms)\n', (4501, 4508), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((5522, 5548), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (5530, 5548), True, 'import numpy as np\n'), ((5566, 5599), 'numpy.array', 'np.array', (['[[0, -1.0j], [1.0j, 0]]'], {}), '([[0, -1.0j], [1.0j, 0]])\n', (5574, 5599), True, 'import numpy as np\n'), ((5613, 5640), 'stoqcompiler.hamiltonian.HamiltonianTerm', 'HamiltonianTerm', (['(2 * sigmax)'], {}), '(2 * sigmax)\n', (5628, 5640), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((5658, 5686), 'stoqcompiler.hamiltonian.HamiltonianTerm', 'HamiltonianTerm', (['(-3 * sigmay)'], {}), '(-3 * sigmay)\n', (5673, 5686), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((5732, 5750), 'stoqcompiler.hamiltonian.Hamiltonian', 'Hamiltonian', (['terms'], {}), '(terms)\n', (5743, 5750), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((6337, 6363), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (6345, 6363), True, 'import numpy as np\n'), ((6381, 6414), 'numpy.array', 'np.array', (['[[0, -1.0j], [1.0j, 0]]'], {}), '([[0, -1.0j], [1.0j, 0]])\n', (6389, 6414), True, 'import numpy as np\n'), ((6424, 6447), 'numpy.kron', 'np.kron', (['sigmax', 'sigmax'], {}), '(sigmax, sigmax)\n', (6431, 6447), True, 'import numpy as np\n'), ((6711, 6729), 'stoqcompiler.hamiltonian.Hamiltonian', 'Hamiltonian', (['terms'], {}), '(terms)\n', (6722, 6729), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((8241, 8267), 'numpy.array', 'np.array', (['[[1, 0], [1, 1]]'], {}), '([[1, 0], [1, 1]])\n', (8249, 8267), True, 'import numpy as np\n'), ((8413, 8453), 'numpy.array', 'np.array', (['[[3, 2 + 1.0j], [2 - 1.0j, 3]]'], {}), '([[3, 2 + 1.0j], [2 - 1.0j, 3]])\n', (8421, 8453), True, 'import numpy as np\n'), ((8465, 8488), 'stoqcompiler.hamiltonian.HamiltonianTerm', 'HamiltonianTerm', (['matrix'], {}), '(matrix)\n', (8480, 8488), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((8717, 8739), 'numpy.isreal', 'np.isreal', (['coefficient'], {}), '(coefficient)\n', (8726, 8739), True, 'import numpy as np\n'), ((8789, 8841), 'numpy.allclose', 'np.allclose', (['matrix', '(coefficient * normalized_matrix)'], {}), '(matrix, coefficient * normalized_matrix)\n', (8800, 8841), True, 'import numpy as np\n'), ((8860, 8892), 'numpy.linalg.svd', 'np.linalg.svd', (['normalized_matrix'], {}), '(normalized_matrix)\n', (8873, 8892), True, 'import numpy as np\n'), ((376, 400), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (389, 400), False, 'import pytest\n'), ((414, 432), 'stoqcompiler.hamiltonian.Hamiltonian', 'Hamiltonian', (['terms'], {}), '(terms)\n', (425, 432), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((521, 561), 'numpy.array', 'np.array', (['[[3, 2 + 1.0j], [2 - 1.0j, 3]]'], {}), '([[3, 2 + 1.0j], [2 - 1.0j, 3]])\n', (529, 561), True, 'import numpy as np\n'), ((616, 701), 'numpy.array', 'np.array', (['[[3, 2 + 1.0j, 0, 0], [2 - 1.0j, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 3]]'], {}), '([[3, 2 + 1.0j, 0, 0], [2 - 1.0j, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 3]]\n )\n', (624, 701), True, 'import numpy as np\n'), ((756, 780), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (769, 780), False, 'import pytest\n'), ((794, 821), 'stoqcompiler.hamiltonian.Hamiltonian', 'Hamiltonian', (['[term1, term2]'], {}), '([term1, term2])\n', (805, 821), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((895, 935), 'numpy.array', 'np.array', (['[[3, 2 + 1.0j], [2 - 1.0j, 3]]'], {}), '([[3, 2 + 1.0j], [2 - 1.0j, 3]])\n', (903, 935), True, 'import numpy as np\n'), ((6477, 6505), 'numpy.identity', 'np.identity', (['qubit_dimension'], {}), '(qubit_dimension)\n', (6488, 6505), True, 'import numpy as np\n'), ((6528, 6556), 'numpy.identity', 'np.identity', (['qubit_dimension'], {}), '(qubit_dimension)\n', (6539, 6556), True, 'import numpy as np\n'), ((6596, 6619), 'stoqcompiler.hamiltonian.HamiltonianTerm', 'HamiltonianTerm', (['(2 * xx)'], {}), '(2 * xx)\n', (6611, 6619), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((6633, 6658), 'stoqcompiler.hamiltonian.HamiltonianTerm', 'HamiltonianTerm', (['(1.5 * y1)'], {}), '(1.5 * y1)\n', (6648, 6658), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((6672, 6697), 'stoqcompiler.hamiltonian.HamiltonianTerm', 'HamiltonianTerm', (['(1.1 * y2)'], {}), '(1.1 * y2)\n', (6687, 6697), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((8112, 8136), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8125, 8136), False, 'import pytest\n'), ((8150, 8173), 'stoqcompiler.hamiltonian.HamiltonianTerm', 'HamiltonianTerm', (['matrix'], {}), '(matrix)\n', (8165, 8173), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((8281, 8305), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8294, 8305), False, 'import pytest\n'), ((8319, 8342), 'stoqcompiler.hamiltonian.HamiltonianTerm', 'HamiltonianTerm', (['matrix'], {}), '(matrix)\n', (8334, 8342), False, 'from stoqcompiler.hamiltonian import Hamiltonian, HamiltonianTerm\n'), ((8919, 8928), 'numpy.max', 'np.max', (['s'], {}), '(s)\n', (8925, 8928), True, 'import numpy as np\n')] |
import cPickle as pickle
import numpy as np
import os
import argparse
import torch
import h5py
from HICO_DET_utils import calc_ap, obj_range, rare, getSigmoid, hoi_no_inter_all
from HICO_DET_utils import obj_range, getSigmoid, hoi_no_inter_all
def parse_args():
parser = argparse.ArgumentParser(description='Generate detection file')
parser.add_argument('--model', dest='model',
help='Select model to generate',
default='', type=str)
args = parser.parse_args()
return args
args = parse_args()
im_index = np.zeros(10000).astype(np.int32)
mapping = pickle.load(open('key_mapping.pkl', 'rb'))
#with open('key_mapping.pkl', 'rb') as f:
# mapping = pkl.load(f)
#pkl.dump(mapping, open('key_mapping.pkl',"wb"), protocol=2)
for key in mapping.keys():
im_index[key] = mapping[key]
with h5py.File('hico_caffe600.h5', 'r') as f:
score_I = f['w'][:, :]
score_H = pickle.load(open('TIN/score_H.pkl', 'rb'))
score_O = pickle.load(open('TIN/score_O.pkl', 'rb'))
score_sp = pickle.load(open('TIN/score_sp.pkl', 'rb'))
hdet = pickle.load(open('TIN/hdet.pkl', 'rb'))
odet = pickle.load(open('TIN/odet.pkl', 'rb'))
keys = pickle.load(open('TIN/keys.pkl', 'rb'))
pos = pickle.load(open('TIN/pos.pkl', 'rb'))
neg = pickle.load(open('TIN/neg.pkl', 'rb'))
bboxes = pickle.load(open('TIN/bboxes.pkl', 'rb'))
score_P = pickle.load(open(args.model + '/scores_P.pkl', 'rb'))
score_A = pickle.load(open(args.model + '/scores_A.pkl', 'rb'))
score_L = pickle.load(open(args.model + '/scores_L.pkl', 'rb'))
h_fac, o_fac, sp_fac, P_fac, A_fac, L_fac, hthresh, othresh, athresh, bthresh, P_weight, A_weight, L_weight = pickle.load(open('generation_args.pkl', 'rb'))
detection = {}
detection['bboxes'] = []
detection['scores'] = []
detection['index'] = []
detection['keys'] = []
for i in range(600):
detection['index'].append([])
detection['scores'].append([])
detection['bboxes'].append([])
detection['keys'].append([])
for obj_index in range(80):
x, y = obj_range[obj_index]
x -= 1
inter_det_mask = (hdet[obj_index] > hthresh[x]) * (odet[obj_index] > othresh[x])
no_inter_det_mask = (hdet[obj_index] > hthresh[y-1]) * (odet[obj_index] > othresh[y-1])
for hoi_index in range(x, y):
score_H[obj_index][:, hoi_index - x] /= h_fac[hoi_index]
score_O[obj_index][:, hoi_index - x] /= o_fac[hoi_index]
score_sp[obj_index][:, hoi_index - x] /= sp_fac[hoi_index]
score_P[obj_index][:, hoi_index - x] /= P_fac[hoi_index]
score_A[obj_index][:, hoi_index - x] /= A_fac[hoi_index]
score_L[obj_index][:, hoi_index - x] /= L_fac[hoi_index]
hod = getSigmoid(9, 1, 3, 0, hdet[obj_index].reshape(-1, 1)) * getSigmoid(9, 1, 3, 0, odet[obj_index].reshape(-1, 1))
sH = torch.sigmoid(torch.from_numpy(score_H[obj_index]).cuda()).cpu().numpy()
sO = torch.sigmoid(torch.from_numpy(score_O[obj_index]).cuda()).cpu().numpy()
ssp = torch.sigmoid(torch.from_numpy(score_sp[obj_index]).cuda()).cpu().numpy()
sP = torch.sigmoid(torch.from_numpy(score_P[obj_index]).cuda()).cpu().numpy() * P_weight
sA = torch.sigmoid(torch.from_numpy(score_A[obj_index]).cuda()).cpu().numpy() * A_weight
sL = torch.sigmoid(torch.from_numpy(score_L[obj_index]).cuda()).cpu().numpy() * L_weight
sHO = (((sH + sO) * ssp + sP + sA + sL) * score_I[im_index[keys[obj_index]], x:y]) * hod
for hoi_index in range(x, y):
at, bt = athresh[hoi_index], bthresh[hoi_index]
if hoi_index + 1 in hoi_no_inter_all:
nis_mask = 1 - (pos[obj_index] > at) * (neg[obj_index] < bt)
mask = no_inter_det_mask * nis_mask
else:
nis_mask = 1 - (pos[obj_index] < at) * (neg[obj_index] > bt)
mask = inter_det_mask * nis_mask
select = np.where(mask > 0)[0]
detection['scores'][hoi_index] = sHO[select, hoi_index - x]
detection['bboxes'][hoi_index] = bboxes[obj_index][select]
detection['keys'][hoi_index] = keys[obj_index][select]
pickle.dump(detection, open('Detection_' + args.model + '.pkl', 'wb'))
| [
"h5py.File",
"argparse.ArgumentParser",
"numpy.zeros",
"numpy.where",
"torch.from_numpy"
] | [((276, 338), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate detection file"""'}), "(description='Generate detection file')\n", (299, 338), False, 'import argparse\n'), ((836, 870), 'h5py.File', 'h5py.File', (['"""hico_caffe600.h5"""', '"""r"""'], {}), "('hico_caffe600.h5', 'r')\n", (845, 870), False, 'import h5py\n'), ((548, 563), 'numpy.zeros', 'np.zeros', (['(10000)'], {}), '(10000)\n', (556, 563), True, 'import numpy as np\n'), ((3854, 3872), 'numpy.where', 'np.where', (['(mask > 0)'], {}), '(mask > 0)\n', (3862, 3872), True, 'import numpy as np\n'), ((2837, 2873), 'torch.from_numpy', 'torch.from_numpy', (['score_H[obj_index]'], {}), '(score_H[obj_index])\n', (2853, 2873), False, 'import torch\n'), ((2920, 2956), 'torch.from_numpy', 'torch.from_numpy', (['score_O[obj_index]'], {}), '(score_O[obj_index])\n', (2936, 2956), False, 'import torch\n'), ((3003, 3040), 'torch.from_numpy', 'torch.from_numpy', (['score_sp[obj_index]'], {}), '(score_sp[obj_index])\n', (3019, 3040), False, 'import torch\n'), ((3087, 3123), 'torch.from_numpy', 'torch.from_numpy', (['score_P[obj_index]'], {}), '(score_P[obj_index])\n', (3103, 3123), False, 'import torch\n'), ((3181, 3217), 'torch.from_numpy', 'torch.from_numpy', (['score_A[obj_index]'], {}), '(score_A[obj_index])\n', (3197, 3217), False, 'import torch\n'), ((3275, 3311), 'torch.from_numpy', 'torch.from_numpy', (['score_L[obj_index]'], {}), '(score_L[obj_index])\n', (3291, 3311), False, 'import torch\n')] |
#!/usr/bin/env python
import pandas
import numpy as np
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, f1_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn.metrics import silhouette_score
from sklearn import preprocessing
import itertools
filename = 'Database/pfh_6.csv'
df = pandas.read_csv(filename, delimiter=',')
# Choose how many objects you want to cluster.
df = df[df.Id < 7]
# drop by Name
df = df.drop(['Name'], axis=1)
X = df.loc[:, df.columns != 'Id']
Y = df.loc[:, df.columns == 'Id'].values.ravel()
# Find the number of objects in your dataset.
temp = np.unique(Y, return_counts=1)
n_labels = len(temp[0])
# Preprocess the features.
# Preprocessing Method 1
# axis used to normalize the data along. If 1, independently normalize each sample,
# otherwise (if 0) normalize each feature.
# X = preprocessing.normalize(X, norm='max', axis=0)
# Preprocessing Method 2
# X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
# X_scaled = X_std * (max - min) + min
min_max = preprocessing.MinMaxScaler(feature_range=(0, 1))
X = min_max.fit(X).transform(X)
# Preprocessing Method 3
# standard = preprocessing.StandardScaler().fit(X)
# X = standard.transform(X)
# Split the dataset into training and validation sets.
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = \
model_selection.train_test_split(X, Y, test_size=validation_size, random_state=seed)
# Spot Check Algorithms
models = [('KNN', KNeighborsClassifier()), ('CART', DecisionTreeClassifier())]
# evaluate each model in turn based on scoring
results = []
names = []
scoring = 'accuracy'
for name, model in models:
k_fold = model_selection.KFold(n_splits=10, random_state=seed)
cv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=k_fold, scoring=scoring, n_jobs=-1)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# Make predictions on validation dataset with k-nearest neighbors algorithm
knn = KNeighborsClassifier(n_neighbors=10, n_jobs=-1)
knn.fit(X_train, Y_train)
predictions = knn.predict(X_validation)
print("\nSpecifically for kNN model:")
print("Accuracy for validation set is " + str(accuracy_score(Y_validation, predictions)))
print("Confusion Matrix is \n" + str(confusion_matrix(Y_validation, predictions)))
print("Classification report is\n " + str(classification_report(Y_validation, predictions)))
# Check if silhouette works
max_k = 0
max_silhouette_avg = 0
# TODO check other methods of clustering
k_means = KMeans(n_clusters=n_labels, n_jobs=-1, max_iter=500, n_init=20).fit(X)
cluster_labels = k_means.fit_predict(X)
# Classes to Cluster evaluation
combinations = np.array(list(itertools.permutations(range(n_labels))))
max_f1_score = 0
for comb in combinations:
new_cluster_labels = list(cluster_labels)
for i in range(0, len(cluster_labels)):
new_cluster_labels[i] = int(comb[cluster_labels[i]])
if max_f1_score < f1_score(Y, new_cluster_labels, average="micro"):
max_f1_score = f1_score(Y, new_cluster_labels, average="micro")
saved_cluster_labels = list(new_cluster_labels)
print("\nFor Classes to Cluster evaluation:")
print("F1_score is " + str(f1_score(Y, saved_cluster_labels, average="micro")))
print("Confusion Matrix is \n" + str(confusion_matrix(Y, saved_cluster_labels)))
print("Clustering report is\n " + str(classification_report(Y, saved_cluster_labels)))
# for k in range(2, 50):
# k_means = KMeans(n_clusters=k, n_jobs=-1, max_iter=500, n_init=20).fit(X)
# cluster_labels = k_means.fit_predict(X)
#
# # The silhouette_score gives the average value for all the samples.
# # This gives a perspective into the density and separation of the formed clusters.
# silhouette_avg = silhouette_score(X, cluster_labels)
# # Find the value of k, for which the silhouette is being maximized
# if silhouette_avg > max_silhouette_avg and k > 3:
# max_silhouette_avg = silhouette_avg
# max_k = k
# print("For n_clusters=" + str(k) + ", the average silhouette score is :" + str(silhouette_avg))
# print("\nExcluding the values k = 2 and k = 3 for comparison.")
# print("Best n_clusters is " + str(max_k) + " with average silhouette score: " + str(max_silhouette_avg) +
# " while the real number of classes is " + str(n_labels) + ".\n")
| [
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.cross_val_score",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.cluster.KMeans",
"sklearn.metrics.accuracy_score",
"sklearn.model_selection.KFold",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.metrics.classif... | [((544, 584), 'pandas.read_csv', 'pandas.read_csv', (['filename'], {'delimiter': '""","""'}), "(filename, delimiter=',')\n", (559, 584), False, 'import pandas\n'), ((834, 863), 'numpy.unique', 'np.unique', (['Y'], {'return_counts': '(1)'}), '(Y, return_counts=1)\n', (843, 863), True, 'import numpy as np\n'), ((1259, 1307), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (1285, 1307), False, 'from sklearn import preprocessing\n'), ((1585, 1673), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['X', 'Y'], {'test_size': 'validation_size', 'random_state': 'seed'}), '(X, Y, test_size=validation_size,\n random_state=seed)\n', (1617, 1673), False, 'from sklearn import model_selection\n'), ((2295, 2342), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(10)', 'n_jobs': '(-1)'}), '(n_neighbors=10, n_jobs=-1)\n', (2315, 2342), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1906, 1959), 'sklearn.model_selection.KFold', 'model_selection.KFold', ([], {'n_splits': '(10)', 'random_state': 'seed'}), '(n_splits=10, random_state=seed)\n', (1927, 1959), False, 'from sklearn import model_selection\n'), ((1977, 2077), 'sklearn.model_selection.cross_val_score', 'model_selection.cross_val_score', (['model', 'X_train', 'Y_train'], {'cv': 'k_fold', 'scoring': 'scoring', 'n_jobs': '(-1)'}), '(model, X_train, Y_train, cv=k_fold, scoring\n =scoring, n_jobs=-1)\n', (2008, 2077), False, 'from sklearn import model_selection\n'), ((1713, 1735), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (1733, 1735), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1747, 1771), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (1769, 1771), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2827, 2890), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_labels', 'n_jobs': '(-1)', 'max_iter': '(500)', 'n_init': '(20)'}), '(n_clusters=n_labels, n_jobs=-1, max_iter=500, n_init=20)\n', (2833, 2890), False, 'from sklearn.cluster import KMeans\n'), ((3258, 3306), 'sklearn.metrics.f1_score', 'f1_score', (['Y', 'new_cluster_labels'], {'average': '"""micro"""'}), "(Y, new_cluster_labels, average='micro')\n", (3266, 3306), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((3331, 3379), 'sklearn.metrics.f1_score', 'f1_score', (['Y', 'new_cluster_labels'], {'average': '"""micro"""'}), "(Y, new_cluster_labels, average='micro')\n", (3339, 3379), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((2494, 2535), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_validation', 'predictions'], {}), '(Y_validation, predictions)\n', (2508, 2535), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((2575, 2618), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['Y_validation', 'predictions'], {}), '(Y_validation, predictions)\n', (2591, 2618), False, 'from sklearn.metrics import confusion_matrix\n'), ((2663, 2711), 'sklearn.metrics.classification_report', 'classification_report', (['Y_validation', 'predictions'], {}), '(Y_validation, predictions)\n', (2684, 2711), False, 'from sklearn.metrics import classification_report\n'), ((3510, 3560), 'sklearn.metrics.f1_score', 'f1_score', (['Y', 'saved_cluster_labels'], {'average': '"""micro"""'}), "(Y, saved_cluster_labels, average='micro')\n", (3518, 3560), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((3600, 3641), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['Y', 'saved_cluster_labels'], {}), '(Y, saved_cluster_labels)\n', (3616, 3641), False, 'from sklearn.metrics import confusion_matrix\n'), ((3682, 3728), 'sklearn.metrics.classification_report', 'classification_report', (['Y', 'saved_cluster_labels'], {}), '(Y, saved_cluster_labels)\n', (3703, 3728), False, 'from sklearn.metrics import classification_report\n')] |
import numpy as np
import random as rand
class AbstractPolicy:
def __call__(self, q_values: np.ndarray) -> np.array:
""" Return number of action. """
raise NotImplementedError
def update(self):
pass
def __str__(self):
raise NotImplementedError
class Greedy(AbstractPolicy):
def __call__(self, q_values: np.ndarray) -> np.array:
return np.argmax(q_values)
def __str__(self):
return "\n" \
"Greedy policy.\n\n"
class EpsGreedy(AbstractPolicy):
def __init__(self, **kwargs):
self.initial_eps = kwargs['initial_eps']
self.eps = kwargs['initial_eps']
self.eps_update = kwargs['eps_update']
self.eps_min = kwargs['eps_min']
def __call__(self, q_values: np.ndarray) -> np.array:
if self.eps > rand.random():
return np.random.randint(0, q_values.shape[0])
else:
return np.argmax(q_values)
def __str__(self):
return "\n" \
"Eps-greedy policy. \n" \
"Initial epsilon value: " + str(self.initial_eps) + "\n" + \
"Epsilon update vaulue: " + str(self.eps_update) + "\n" + \
"Minimal epsilon value: " + str(self.eps_min) + "\n\n"
def update(self):
if self.eps > self.eps_min:
self.eps *= self.eps_update
else:
self.eps = self.eps_min
| [
"random.random",
"numpy.random.randint",
"numpy.argmax"
] | [((399, 418), 'numpy.argmax', 'np.argmax', (['q_values'], {}), '(q_values)\n', (408, 418), True, 'import numpy as np\n'), ((830, 843), 'random.random', 'rand.random', ([], {}), '()\n', (841, 843), True, 'import random as rand\n'), ((864, 903), 'numpy.random.randint', 'np.random.randint', (['(0)', 'q_values.shape[0]'], {}), '(0, q_values.shape[0])\n', (881, 903), True, 'import numpy as np\n'), ((937, 956), 'numpy.argmax', 'np.argmax', (['q_values'], {}), '(q_values)\n', (946, 956), True, 'import numpy as np\n')] |
# Training params.
from tensorflow.contrib.keras.python import keras
from tensorflow.contrib.keras.python.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.contrib.keras.python.keras.datasets import cifar10
from tensorflow.contrib.keras.python.keras.engine import Input, Model
from tensorflow.contrib.keras.python.keras.layers import Conv2D, BatchNormalization, Activation, MaxPooling2D, \
AveragePooling2D, Flatten, Dense
from tensorflow.contrib.keras.python.keras.optimizers import Adam
from tensorflow.contrib.keras.python.keras import backend as K
import os
import numpy as np
from tensorflow.contrib.keras.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.contrib.keras.python.keras.regularizers import l2
batch_size = 32
epochs = 100
data_augmentation = False
num_classes = 10
num_filters = 64
num_blocks = 4
num_sub_blocks = 2
use_max_pool = False
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
img_rows = x_train.shape[1]
img_cols = x_train.shape[2]
channels = x_train.shape[3]
if K.image_data_format() == 'channels_first':
img_rows = x_train.shape[2]
img_cols = x_train.shape[3]
channels = x_train.shape[1]
x_train = x_train.reshape(x_train.shape[0], channels, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], channels, img_rows, img_cols)
input_shape = (channels, img_rows, img_cols)
else:
img_rows = x_train.shape[1]
img_cols = x_train.shape[2]
channels = x_train.shape[3]
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, channels)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, channels)
input_shape = (img_rows, img_cols, channels)
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
inputs = Input(shape=input_shape)
x = Conv2D(num_filters,
kernel_size=7,
padding='same',
strides=2,
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))(inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)
if use_max_pool:
x = MaxPooling2D(pool_size=3, strides=2, padding='same')(x)
num_blocks = 3
# convolutional base (stack of blocks).
for i in range(num_blocks):
for j in range(num_sub_blocks):
strides = 1
is_first_layer_but_not_first_block = j == 0 and i > 0
if is_first_layer_but_not_first_block:
strides = 2
y = Conv2D(num_filters,
kernel_size=3,
padding='same',
strides=strides,
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))(x)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv2D(num_filters,
kernel_size=3,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))(y)
y = BatchNormalization()(y)
if is_first_layer_but_not_first_block:
x = Conv2D(num_filters,
kernel_size=1,
padding='same',
strides=2,
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))(x)
x = keras.layers.add([x, y])
x = Activation('relu')(x)
num_filters = 2 * num_filters
x = AveragePooling2D()(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='he_normal')(y)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy',
optimizer=Adam(),
metrics=['accuracy'])
model.summary()
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'cifar10_resnet_model.h5'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
checkpoint = ModelCheckpoint(filepath=filepath,
verbose=1,
save_best_only=True)
#earning rate decaying.
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
cooldown=0,
patience=5,
min_lr=0.5e-6)
callbacks = [checkpoint, lr_reducer]
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks)
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1]) | [
"tensorflow.contrib.keras.python.keras.datasets.cifar10.load_data",
"os.path.join",
"tensorflow.contrib.keras.python.keras.layers.BatchNormalization",
"tensorflow.contrib.keras.python.keras.layers.Flatten",
"tensorflow.contrib.keras.python.keras.backend.image_data_format",
"tensorflow.contrib.keras.python... | [((980, 999), 'tensorflow.contrib.keras.python.keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (997, 999), False, 'from tensorflow.contrib.keras.python.keras.datasets import cifar10\n'), ((1860, 1908), 'tensorflow.contrib.keras.python.keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (1886, 1908), False, 'from tensorflow.contrib.keras.python import keras\n'), ((1919, 1966), 'tensorflow.contrib.keras.python.keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (1945, 1966), False, 'from tensorflow.contrib.keras.python import keras\n'), ((1979, 2003), 'tensorflow.contrib.keras.python.keras.engine.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (1984, 2003), False, 'from tensorflow.contrib.keras.python.keras.engine import Input, Model\n'), ((3797, 3834), 'tensorflow.contrib.keras.python.keras.engine.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), '(inputs=inputs, outputs=outputs)\n', (3802, 3834), False, 'from tensorflow.contrib.keras.python.keras.engine import Input, Model\n'), ((4140, 4174), 'os.path.join', 'os.path.join', (['save_dir', 'model_name'], {}), '(save_dir, model_name)\n', (4152, 4174), False, 'import os\n'), ((4193, 4259), 'tensorflow.contrib.keras.python.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'filepath', 'verbose': '(1)', 'save_best_only': '(True)'}), '(filepath=filepath, verbose=1, save_best_only=True)\n', (4208, 4259), False, 'from tensorflow.contrib.keras.python.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\n'), ((1097, 1118), 'tensorflow.contrib.keras.python.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (1116, 1118), True, 'from tensorflow.contrib.keras.python.keras import backend as K\n'), ((2205, 2225), 'tensorflow.contrib.keras.python.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2223, 2225), False, 'from tensorflow.contrib.keras.python.keras.layers import Conv2D, BatchNormalization, Activation, MaxPooling2D, AveragePooling2D, Flatten, Dense\n'), ((2234, 2252), 'tensorflow.contrib.keras.python.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2244, 2252), False, 'from tensorflow.contrib.keras.python.keras.layers import Conv2D, BatchNormalization, Activation, MaxPooling2D, AveragePooling2D, Flatten, Dense\n'), ((3623, 3641), 'tensorflow.contrib.keras.python.keras.layers.AveragePooling2D', 'AveragePooling2D', ([], {}), '()\n', (3639, 3641), False, 'from tensorflow.contrib.keras.python.keras.layers import Conv2D, BatchNormalization, Activation, MaxPooling2D, AveragePooling2D, Flatten, Dense\n'), ((3650, 3659), 'tensorflow.contrib.keras.python.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3657, 3659), False, 'from tensorflow.contrib.keras.python.keras.layers import Conv2D, BatchNormalization, Activation, MaxPooling2D, AveragePooling2D, Flatten, Dense\n'), ((3674, 3746), 'tensorflow.contrib.keras.python.keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""', 'kernel_initializer': '"""he_normal"""'}), "(num_classes, activation='softmax', kernel_initializer='he_normal')\n", (3679, 3746), False, 'from tensorflow.contrib.keras.python.keras.layers import Conv2D, BatchNormalization, Activation, MaxPooling2D, AveragePooling2D, Flatten, Dense\n'), ((3999, 4010), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4008, 4010), False, 'import os\n'), ((4076, 4099), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (4089, 4099), False, 'import os\n'), ((4106, 4127), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (4117, 4127), False, 'import os\n'), ((2287, 2339), 'tensorflow.contrib.keras.python.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3)', 'strides': '(2)', 'padding': '"""same"""'}), "(pool_size=3, strides=2, padding='same')\n", (2299, 2339), False, 'from tensorflow.contrib.keras.python.keras.layers import Conv2D, BatchNormalization, Activation, MaxPooling2D, AveragePooling2D, Flatten, Dense\n'), ((3519, 3543), 'tensorflow.contrib.keras.python.keras.layers.add', 'keras.layers.add', (['[x, y]'], {}), '([x, y])\n', (3535, 3543), False, 'from tensorflow.contrib.keras.python import keras\n'), ((3908, 3914), 'tensorflow.contrib.keras.python.keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (3912, 3914), False, 'from tensorflow.contrib.keras.python.keras.optimizers import Adam\n'), ((4386, 4398), 'numpy.sqrt', 'np.sqrt', (['(0.1)'], {}), '(0.1)\n', (4393, 4398), True, 'import numpy as np\n'), ((2182, 2192), 'tensorflow.contrib.keras.python.keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (2184, 2192), False, 'from tensorflow.contrib.keras.python.keras.regularizers import l2\n'), ((2891, 2911), 'tensorflow.contrib.keras.python.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2909, 2911), False, 'from tensorflow.contrib.keras.python.keras.layers import Conv2D, BatchNormalization, Activation, MaxPooling2D, AveragePooling2D, Flatten, Dense\n'), ((2928, 2946), 'tensorflow.contrib.keras.python.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2938, 2946), False, 'from tensorflow.contrib.keras.python.keras.layers import Conv2D, BatchNormalization, Activation, MaxPooling2D, AveragePooling2D, Flatten, Dense\n'), ((3171, 3191), 'tensorflow.contrib.keras.python.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3189, 3191), False, 'from tensorflow.contrib.keras.python.keras.layers import Conv2D, BatchNormalization, Activation, MaxPooling2D, AveragePooling2D, Flatten, Dense\n'), ((3557, 3575), 'tensorflow.contrib.keras.python.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3567, 3575), False, 'from tensorflow.contrib.keras.python.keras.layers import Conv2D, BatchNormalization, Activation, MaxPooling2D, AveragePooling2D, Flatten, Dense\n'), ((2865, 2875), 'tensorflow.contrib.keras.python.keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (2867, 2875), False, 'from tensorflow.contrib.keras.python.keras.regularizers import l2\n'), ((3145, 3155), 'tensorflow.contrib.keras.python.keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (3147, 3155), False, 'from tensorflow.contrib.keras.python.keras.regularizers import l2\n'), ((3493, 3503), 'tensorflow.contrib.keras.python.keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (3495, 3503), False, 'from tensorflow.contrib.keras.python.keras.regularizers import l2\n')] |
#!/usr/bin/env python3
"""Data preprocessing of the Gas Sensor Array Drift Dataset Data Set:
https://archive.ics.uci.edu/ml/datasets/gas+sensor+array+drift+dataset
This file may be executed to preprocess the data and save the result to files.
"""
import numpy as np
import os
import re
import pickle
ODOR_DATA_DIR = "Dataset"
PREPROCESSED_DATA_PATH = os.path.join(ODOR_DATA_DIR, "dataset_preprocessed.pkl")
PREPROCESSED_DATA_PATH_NO_6 = os.path.join(ODOR_DATA_DIR, "dataset_preprocessed_no_6.pkl")
def _load_raw_data(include_gas_6):
# Load each batch, one by one, into a dictionary
batch_pattern = re.compile("batch(\d+)\.dat")
batches_d = {}
for entry in os.scandir(ODOR_DATA_DIR):
match = batch_pattern.match(entry.name)
if match is None:
continue
batch_num = int(match.group(1))-1
# Read the file line by line
feats = []
labels = []
with open(entry.path, "r") as fp:
for line in fp.readlines():
parts = line.split(" ")
label = int(parts[0])-1
if include_gas_6 == False and label == 5:
continue
labels.append(label)
feat = []
feats.append(feat)
for i, part in enumerate(parts[1:]):
feat_label, value = part.split(":")
feat_num = int(feat_label)-1
assert(feat_num==i)
val = float(value)
feat.append(val)
batches_d[batch_num] = (feats, labels)
# Combine all batches into a single object and create an index
batch_ind = []
last_batch_i = max(batches_d.keys())
all_features = []
all_labels = []
k = 0
for batch_i in range(0, last_batch_i+1):
feats, labels = batches_d[batch_i]
all_features.append(np.array(feats))
all_labels.append(np.array(labels))
batch_ind.append((k, k+len(feats)))
k+=len(feats)
features = np.concatenate(all_features)
labels = np.concatenate(all_labels)
return features, labels, batch_ind
def preprocess_and_save(include_gas_6=True):
"""Read all data in the gas odor sensor dataset, then:
1. Format all data into numpy arrays
2. z-score the features
"""
features, labels, batch_ind = _load_raw_data(include_gas_6)
# z-score all features along the sample axis, so that each feature has the same weight.
import scipy.stats as stats
features_z = stats.zscore(features, axis=0)
if include_gas_6:
pickle.dump((features_z, labels, batch_ind), open(PREPROCESSED_DATA_PATH, "wb"))
else:
pickle.dump((features_z, labels, batch_ind), open(PREPROCESSED_DATA_PATH_NO_6, "wb"))
def load_features_labels(include_gas_6=True):
"""
returns a tuple (features, labels, batch_ind), where:
features - an ndarray of shape (n_samples, n_features)
labels - an ndarray of shape (n_samples,) and values ranging from 0 to n_labels-1
batch_ind - a list of tuples (batch_start, batch_end) which are valid slice indices
"""
if include_gas_6:
return pickle.load(open(PREPROCESSED_DATA_PATH, "rb"))
else:
return pickle.load(open(PREPROCESSED_DATA_PATH_NO_6, "rb"))
if __name__ == "__main__":
preprocess_and_save()
preprocess_and_save(include_gas_6=False)
features, labels, batch_ind = load_features_labels()
assert(features.shape[0]==labels.shape[0])
assert(batch_ind[-1][1]==features.shape[0])
import ipdb; ipdb.set_trace() | [
"scipy.stats.zscore",
"ipdb.set_trace",
"numpy.array",
"os.path.join",
"os.scandir",
"numpy.concatenate",
"re.compile"
] | [((357, 412), 'os.path.join', 'os.path.join', (['ODOR_DATA_DIR', '"""dataset_preprocessed.pkl"""'], {}), "(ODOR_DATA_DIR, 'dataset_preprocessed.pkl')\n", (369, 412), False, 'import os\n'), ((443, 503), 'os.path.join', 'os.path.join', (['ODOR_DATA_DIR', '"""dataset_preprocessed_no_6.pkl"""'], {}), "(ODOR_DATA_DIR, 'dataset_preprocessed_no_6.pkl')\n", (455, 503), False, 'import os\n'), ((613, 644), 're.compile', 're.compile', (['"""batch(\\\\d+)\\\\.dat"""'], {}), "('batch(\\\\d+)\\\\.dat')\n", (623, 644), False, 'import re\n'), ((679, 704), 'os.scandir', 'os.scandir', (['ODOR_DATA_DIR'], {}), '(ODOR_DATA_DIR)\n', (689, 704), False, 'import os\n'), ((2027, 2055), 'numpy.concatenate', 'np.concatenate', (['all_features'], {}), '(all_features)\n', (2041, 2055), True, 'import numpy as np\n'), ((2069, 2095), 'numpy.concatenate', 'np.concatenate', (['all_labels'], {}), '(all_labels)\n', (2083, 2095), True, 'import numpy as np\n'), ((2524, 2554), 'scipy.stats.zscore', 'stats.zscore', (['features'], {'axis': '(0)'}), '(features, axis=0)\n', (2536, 2554), True, 'import scipy.stats as stats\n'), ((3569, 3585), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (3583, 3585), False, 'import ipdb\n'), ((1884, 1899), 'numpy.array', 'np.array', (['feats'], {}), '(feats)\n', (1892, 1899), True, 'import numpy as np\n'), ((1927, 1943), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1935, 1943), True, 'import numpy as np\n')] |
### Author: <NAME>
### Description: Traffic Coordinator for handling autonomous vehicle reservations
### as they entire an intersection.
import numpy as np
import queue as queue
import copy as copy
from shapely.geometry import Point, box
from shapely.affinity import rotate
import kinematics as kn
import AutonomousVehicle as AV
class MatrixTrafficCoordinator:
''' Generic traffic coordinator that uses a reservation matrix of the intersection
and collects requests for reservations. The main role of the coordinator is to ensure
that there are no collisions through the intersection and provide reservation windows
to incoming vehicles. Based on Dresner, Kurt, and Peter Stone AAMAS 2004.
'''
def __init__(self):
self.reservation_counter = 0
self.agents_with_reservations = {}
self.queue_cars = queue.Queue()
self.car_ids_in_queue = set()
self.next_car_id = -1
self.current_lane = "11"
self.priority_tickets_given_out = {"1": -1, "-1": -1, "2": -1, "-2": -1}
self.next_customer_get_reservation = {"1": 0, "-1": 0, "2": 0, "-2": 0}
self.priority_dict = {}
self.next_available = -1
self.time_matrix = {} #starts in top left corner of intersection
self.time = 0
self.dx = 0.1
self.time_buffer_interval = 0.02
# self.time_start_matrix = np.ones((int(2/self.dx)+1,int(2/self.dx)+1))*99999999
self.m = int(2/self.dx)+1
self.n = int(2/self.dx)+1
self.number_tiles_buffer = 2
self.time_available_matrix = {i: {j: 0 for j in range(self.m)} for i in range(self.n)}
self.time_matrix_intervals = {i: {j: [] for j in range(self.m)} for i in range(self.n)}
self.bid_queue = []
self.discretized_grid_points = [Point(x,y) for x in np.arange(-1,1+self.dx,self.dx) for y in np.arange(-1,1+self.dx,self.dx)]
self.k_number_iterations_between_cleaning_time_matrix = 50
def add_interval(self, interval, i, j, agent_id, time_matrix_intervals, consolidate_intervals=False):
''' Insert an interval (start_time, end_time) into time_matrix_intervals.'''
if type(interval) != tuple:
raise Exception("Not a tuple")
elif interval[1] < interval[0]:
raise Exception("Error: Interval End Time < Start Time ")
elif consolidate_intervals:
interval_with_agent = (interval[0], interval[1], agent_id)
current_intervals_ij = time_matrix_intervals[i][j]
if len(time_matrix_intervals[i][j]) == 0:
time_matrix_intervals[i][j] = [interval_with_agent]
return time_matrix_intervals
else:
for i_interval in range(len(current_intervals_ij)): #THIS DOESN"T WORK PERFECTLY
(other_start, other_end, other_agent_id) = current_intervals_ij[i_interval]
if interval[0] < other_start: #
if (agent_id == other_agent_id) and interval[1] > other_start:
current_intervals_ij = current_intervals_ij[:i_interval] + [(interval[0], other_end, agent_id)] + current_intervals_ij[i_interval+1:]
else:
current_intervals_ij = current_intervals_ij[:i_interval] + [interval_with_agent] + current_intervals_ij[i_interval:]
time_matrix_intervals[i][j] = current_intervals_ij
return time_matrix_intervals
current_intervals_ij += [interval_with_agent]
time_matrix_intervals[i][j] = current_intervals_ij
return time_matrix_intervals
else:
interval_with_agent = (interval[0], interval[1], agent_id)
time_matrix_intervals[i][j].append(interval_with_agent)
return time_matrix_intervals
def conflicts_in_interval_bool(self, interval, i, j, time_matrix_intervals):
''' Checks if an interval of time (start_time,end_time) conflicts at location (i,j) in time_matrix_intervals dictionary '''
new_start, new_end, new_agent_id = interval
epsilon = 0.001
for (start, end, agent_id) in time_matrix_intervals[i][j]:
if new_agent_id != agent_id:
if (start-epsilon < new_start < end+epsilon) or (start-epsilon < new_end < end+epsilon):
return True
return False
def add_points_to_reservation_matrix(self, intersection_points_to_reserve, agent_id, reservation_interval_matrix, reservation_available_matrix):
''' Add list of points with start and end times to the interval matrix'''
#TODO: Do some pre-processing to consolidate the number of points to add since some intervals will overlap. Iterate over each window
for t_start, t_end, i, j in intersection_points_to_reserve:
if (0 <= i <= self.m-1) and (0 <= j <= self.n-1):
reservation_interval_matrix = self.add_interval((t_start,t_end),i,j,agent_id,reservation_interval_matrix)
reservation_available_matrix[i][j] = max(t_end + self.time_buffer_interval,reservation_available_matrix[i][j]) #I'm adding a fixed buffer size
return reservation_interval_matrix, reservation_available_matrix
def remove_expired_intervals_from_matrix(self, current_time=None):
''' Helper function to frequently cleanup the matrix interval dictionary with expired intervals'''
if current_time == None:
current_time = self.time
epsilon = 0.001
for i in self.time_matrix_intervals.keys():
for j in self.time_matrix_intervals[i].keys():
cleaned_list_of_intervals = []
for (start, end, agent_id) in self.time_matrix_intervals[i][j]:
if current_time < (end + epsilon):
cleaned_list_of_intervals += [(start, end, agent_id)]
# self.time_matrix_intervals[i][j].remove((start,end))
self.time_matrix_intervals[i][j] = cleaned_list_of_intervals
def forward_simulate(self, start_time, intersection_lane, car_object):
start_pose = car_object.pose # TODO CURRENT POSE NEEDS TO NOT BE HARD CODED
if intersection_lane == "22" or intersection_lane == "2-1" or intersection_lane == "21" or intersection_lane == "2?":
start_pose = kn.Pose(0.5,-1,np.pi/2.0)
elif intersection_lane == "11" or intersection_lane == "1-2" or intersection_lane == "12" or intersection_lane == "1?":
start_pose = kn.Pose(-1, -0.5, 0)
elif intersection_lane == "-1-1" or intersection_lane == "-12" or intersection_lane == "-1-2" or intersection_lane == "-1?":
start_pose = kn.Pose(1, 0.5, np.pi)
elif intersection_lane == "-2-2" or intersection_lane == "-2-1" or intersection_lane == "-21" or intersection_lane == "-2?":
start_pose = kn.Pose(-0.5,1, -np.pi/2.0)
else:
print("Weird current lane,",intersection_lane)
list_of_positions_time = []
all_intersection_lanes = []
if intersection_lane[-1] == "?": #notcommunicating DRIVER
# print("notcommunicating DRIVER!")
all_intersection_lanes = [
car_object.turn_direction_to_intersection_lane(car_object.current_lane,AV.TurnDirection.LEFT),
car_object.turn_direction_to_intersection_lane(car_object.current_lane,AV.TurnDirection.STRAIGHT),
car_object.turn_direction_to_intersection_lane(car_object.current_lane,AV.TurnDirection.RIGHT),
]
else:
all_intersection_lanes = [intersection_lane]
for intersection_lane in all_intersection_lanes:
current_time = start_time + 0.0
current_pose = copy.deepcopy(start_pose)
while not car_object.check_in_intersection_exit(current_pose):
list_of_positions_time += [(current_pose.x,current_pose.y,current_pose.phi,current_time)]
dt_simulation = car_object.time_increment/2.0
distance = car_object._maxSpeed * dt_simulation #Using max speed
current_pose = car_object.get_next_step_pose_lane(intersection_lane, distance, current_pose)
current_time += dt_simulation
# print('%0.03f'%current_time,'%0.03f'%current_pose.x,",",'%0.03f'%current_pose.y)
if len(list_of_positions_time)>200*self.n: raise Exception("Vehicle",car_object.agent_id,"Never reaches intersection exit") #TODO: Why 20?
return list_of_positions_time
def matrix_index_from_xy(self, x, y):
x0 = -1 - self.dx/2 #this sould be +self.lane_width, slash come from the map
y0 = 1 + self.dx/2 #this should be -self.lane_width
i = int((y0-y)/self.dx)
j = int((x-x0)/self.dx)
if i<0 or j<0:
print("Weird,ij",i,",",j,"xy",x,",",y)
return (i,j)
def request_intersection_lane(self, start_time, end_time,intersection_lane, agent_id, car_object, bid=-1):
self.reservation_counter += 1
# if agent_id not in self.priority_dict.keys():
# next_ticket = self.priority_tickets_given_out[intersection_lane]+1
# self.priority_tickets_given_out[intersection_lane] = next_ticket
# self.priority_dict[agent_id] = next_ticket
# # print("ID",agent_id,"T",next_ticket)
# # print("NextCustomer",self.next_customer_get_reservation[intersection_lane])
if agent_id not in self.car_ids_in_queue:
self.car_ids_in_queue.add(agent_id)
self.queue_cars.put(agent_id)
if self.next_car_id == -1:
self.next_car_id = self.queue_cars.get_nowait()
## FCFS
if self.next_car_id == agent_id or self.next_car_id == -1: #Preserving priority
list_of_positions_time = self.forward_simulate(start_time,intersection_lane,car_object)
conflicting_trajectory = False
for (x, y, phi, t) in list_of_positions_time:
(i,j) = self.matrix_index_from_xy(x,y)
for i_tiles_buffer in range(-self.number_tiles_buffer,self.number_tiles_buffer):
for j_tiles_buffer in range(-self.number_tiles_buffer,self.number_tiles_buffer):
i_buff = i + i_tiles_buffer
j_buff = j + j_tiles_buffer
if (0 <= i_buff <= self.m-1) and (0 <= j_buff <= self.n-1):
if self.conflicts_in_interval_bool((t-self.time_buffer_interval,t+self.time_buffer_interval,agent_id),i_buff,j_buff, self.time_matrix_intervals):
# print("Conflicts!",(t-self.time_buffer_interval,t+self.time_buffer_interval),i_buff,j_buff)
return (-1,-1)
if conflicting_trajectory: #Redudent
return (-1,-1)
else:
for (x,y,phi,t) in list_of_positions_time:
(i,j) = self.matrix_index_from_xy(x,y)
for i_tiles_buffer in range(-self.number_tiles_buffer,self.number_tiles_buffer):
for j_tiles_buffer in range(-self.number_tiles_buffer,self.number_tiles_buffer):
i_buff = i + i_tiles_buffer
j_buff = j + j_tiles_buffer
# for (i_buff,j_buff) in [(i,j),(i+1,j),(i-1,j),(i,j+1),(i,j-1),(i+1,j+1),(i-1,j-1),(i+1,j-1),(i-1,j+1),(i+2,j),(i-2,j),(i,j+2),(i,j-2),(i+2,j+2),(i-2,j-2),(i+2,j-2),(i-2,j+2)]:
if (0 <= i_buff <= self.m-1) and (0 <= j_buff <= self.n-1):
self.time_matrix_intervals = self.add_interval((t-self.time_buffer_interval,t+self.time_buffer_interval),i_buff,j_buff,agent_id,self.time_matrix_intervals)
self.time_available_matrix[i_buff][j_buff] = t + self.time_buffer_interval #I'm adding a fixed buffer size
# self.time_start_matrix[i_buff,j_buff] = t-0.01
if self.queue_cars.empty():
self.next_car_id = -1
else:
self.next_car_id = self.queue_cars.get_nowait()
return (start_time, t+self.time_buffer_interval*10) #WHAT DOES THIS DO?
else:
return (-1,-1)
if self.next_customer_get_reservation[intersection_lane] == self.priority_dict[agent_id]:
if self.current_lane == intersection_lane and (start_time-self.next_available < 0.05):
self.next_available = end_time
print("Received Reservation Request #",self.reservation_counter, start_time,end_time)
self.next_customer_get_reservation[intersection_lane]+=1
return (start_time,end_time)
elif start_time > self.next_available:
self.next_available = end_time
print("Received Reservation Request #",self.reservation_counter, start_time,end_time, "Same Lane")
self.next_customer_get_reservation[intersection_lane]+=1
self.current_lane = intersection_lane #Update the current lane being used
return (start_time,end_time)
else:
print('Rejected Reservation Request # %.2f %.2f %.2f Next Avail: %.2f' % (self.reservation_counter, start_time,end_time, self.next_available))
return (-1,-1)
else:
print('Rejected Reservation Request # %.2f %.2f %.2f Waiting for Preceding Agents to Get Reservation' % (self.reservation_counter, start_time,end_time))
return (-1,-1)
def return_reservation(self):
return (-1,-1)
def get_reservation_matrix(self):
return self.time_matrix_intervals
def increment(self, dt):
self.dt = dt
return None
def get_intersection_entrance_free_time(self, incoming_lane, time_available_matrix):
lane_width = 1.0 #TODO: Do not hardcode the lane_width
if incoming_lane == "1":
x_entrance = -lane_width
y_entrance = -lane_width/2.0
elif incoming_lane == "-1":
x_entrance = lane_width
y_entrance = lane_width/2.0
elif incoming_lane == "2":
x_entrance = lane_width/2.0
y_entrance = -lane_width
elif incoming_lane == "-2":
x_entrance = -lane_width/2.0
y_entrance = lane_width
i_entrance, j_entrance = self.matrix_index_from_xy(x_entrance, y_entrance)
return time_available_matrix[i_entrance][j_entrance]
def get_points_covered_by_car(self,x,y,phi,car_width,car_length, notcommunicating=False):
''' Returns a list of points from the intersection grid that intersect with the car's body'''
# TODO: THIS NEEDS TO BE FIXED
k_number_buffer_tiles = 1.5
# if notcommunicating: k_number_buffer_tiles = 4
minx = x-car_length-k_number_buffer_tiles*self.dx
maxx = x+k_number_buffer_tiles*self.dx
miny = y-car_width/2.0-k_number_buffer_tiles*self.dx
maxy = y+car_width/2.0+k_number_buffer_tiles*self.dx
buffered_car = box(minx,miny,maxx,maxy)
rotate_about_pt = Point(x,y)
rotated_buffered_car = rotate(buffered_car,phi,origin=rotate_about_pt,use_radians=True)
conflicted_points = []
for point in self.discretized_grid_points:
if rotated_buffered_car.contains(point):
conflicted_points += [point]
return conflicted_points
######################################################################################
#
class BatchTrafficCoordinator(MatrixTrafficCoordinator):
''' Our SVO coordinator which considers the SVO's of incoming agents
and allows for swapping positions based on SVO bids.
Coordinator batches requests so that it can compare multiple bids at a time.
'''
def __init__(self):
MatrixTrafficCoordinator.__init__(self)
self.reservation_counter = 0
self.lane_queues = {"1": [], "-1": [], "2": [], "-2": []}
self.agents_with_reservations = {}
self.agents_last_attempted_reservation = {}
self.car_ids_with_reservation = set()
self.dx = 0.1
self.time_buffer_interval = 0.02
# self.time_start_matrix = np.ones((int(2/self.dx)+1,int(2/self.dx)+1))*99999999
self.number_tiles_buffer = 2
# self.bid_queue: List[Tuple[float, float, float, AV.TurnDirection, int, AV.AutonomousVehicle]] = []
self.bid_queue = []
self.sorted_queue = []
self.batch_counter = 0
self.time = 0
self.last_batch_assignment = 0.0
self.max_bid_per_agent = {}
self.returned_reservations = []
self.list_of_swapped_reservations = []
# Tax Information
self.bid_tax = 0.0 #seconds
self.svo_squared = False
self.strict_priority = False
self.swaps_allowed = True
# Parameters for the batch assignment
self.k_replan_increment_sec = 0.5
self.k_max_retries = 60 #
self.k_max_cars_planning_lane_FCFS = 1 #TODO: This is an important variable
self.k_batch_intervals_time = 2.0 # seconds between batch planning
self.k_min_number_cars_without_reservations = 8
self.k_max_number_cars_assigned = 8
self.k_epsilon_bid = 0.001
self.k_max_start_time_horizon = 45.0
self.prob_swap_back_comm = 1.001
self.prob_swap_back_notcomm = 1.001 #
self.nocomm_no_swap = False # If True, notcommunicatings are not allowed to swapped
self.nocollab_no_swap = False
self.bid_queue_sort_type = "FCFSlane"
def increment(self, dt):
''' Simulator calls this method at each timestep'''
self.dt = dt
self.number_cars_without_reservations = len(self.bid_queue)
#TODO: Add something to ensure single agents can be allocated
batch_assign_flag = False
for bid in self.bid_queue:
car = bid[-1]
if car.intersection_window[0]==-1 and (car.pose.x)**2 + car.pose.y**2 <= 2.0*car.lane_width:
batch_assign_flag = True
if ((self.time - self.last_batch_assignment) > self.k_batch_intervals_time) and self.number_cars_without_reservations >= self.k_min_number_cars_without_reservations:
batch_assign_flag = True
if batch_assign_flag:
self.bid_queue = self.one_bid_per_agent_queue(self.bid_queue)
# self.new_batch_intersection_FCFS()
if len(self.bid_queue) > 0:
reservations = self.one_swap_sort(self.bid_queue,verbose=False)
else:
reservations = []
self.returned_reservations += [reservations]
self.last_batch_assignment = self.time
self.bid_queue = []
self.max_bid_per_agent = {}
def one_swap_sort(self, bid_queue,verbose=False):
''' Looks for single swap improvements for switching the queue. Iterate through a queue.
Each agent (first request) can consider swapping with the next agent. This will only occur if the utility
for both agents improve (its a consentual swap)
Used in IROS2019
'''
self.lane_queues = self.update_lane_queues(self.lane_queues) # Make sure lane queue is up-to-date
sorted_queue = self.sort_queue(bid_queue,self.bid_queue_sort_type)
final_reservation = []
print("Received bids from %d agents"%len(sorted_queue),":",[r[-1].agent_id for r in sorted_queue])
if len(sorted_queue) == 1:
first_request = sorted_queue[0]
requested_start_time1, fake_interval_matrix, fake_available_matrix, fake_lane_queues = self.attempt_one_reservation(first_request)
if requested_start_time1 > 0:
self.time_matrix_intervals, self.time_available_matrix, self.lane_queues = fake_interval_matrix, fake_available_matrix, fake_lane_queues
final_reservation += [(first_request[-1], requested_start_time1)] # Reserve the first request and increment the request
for car, reservation_start_time in final_reservation:
car.intersection_window = (reservation_start_time, 9999999)
print("Reserving intersection for vehicles...",[car_reservation[0].agent_id for car_reservation in final_reservation])
return final_reservation
else:
first_request = sorted_queue[0]
for queue_index in range(min(self.k_max_number_cars_assigned,len(sorted_queue)-1)):
second_request = sorted_queue[queue_index+1]
#
u1, u2, requested_start_time1, requested_start_time2, fake_interval_matrix, fake_available_matrix, fake_lane_queues = self.attempt_two_reservations(first_request, second_request)
if verbose: print("Original Attempted: Agent %d then %d. t_s1t= %.02f, t_s1= %.02f u1= %0.02f u2=%0.02f"%(first_request[-2],second_request[-2], requested_start_time1, requested_start_time2, u1, u2))
if requested_start_time1 < 0 or requested_start_time2 < 0:
print("Did not return solution for %dth car in queue"%(queue_index+1))
break
if first_request[-1].current_lane == second_request[-1].current_lane or (self.nocomm_no_swap and (first_request[-1].notcommunicating or second_request[-1].notcommunicating)) or (self.nocollab_no_swap and (first_request[-1].notcollaborating or second_request[-1].notcollaborating)) :
'''Second request in same lane as first request. Swap is not possible'''
self.time_matrix_intervals, self.time_available_matrix, self.lane_queues = fake_interval_matrix, fake_available_matrix, fake_lane_queues
final_reservation += [(first_request[-1], requested_start_time1)] # Reserve the first request and increment the request
first_request = second_request
continue
u1_swap, u2_swap, requested_start_time1_swap, requested_start_time2_swap, fake_interval_matrix_swap, fake_available_matrix_swap, fake_lane_queues_swap = self.attempt_two_reservations(second_request, first_request)
if requested_start_time1_swap < 0 or requested_start_time2_swap < 0:
print("Did not return solution for %dth car in queue in swap"%(queue_index+1))
break
if verbose:
print("\n First Agent %d. t_start_original = %.02f t_start_swap = %.02f. Original U= %.02f Swap U= %.02f"%(first_request[-2], requested_start_time1, requested_start_time2_swap, u1, u2_swap))
print("Second Agent %d. t_start_original = %.02f t_start_swap = %.02f. Original U= %.02f Swap U= %.02f"%(second_request[-2], requested_start_time2, requested_start_time1_swap, u2, u1_swap))
u_epsilon = 0.001
if second_request[-1].notcommunicating:
p_swap_back = first_request[-1].prob_swap_back_notcomm # Non-communicating swap back 100%
else:
p_swap_back = first_request[-1].prob_swap_back_comm # communicating
if (u1_swap - u2 > u_epsilon) and (u2_swap - u1 > u_epsilon) and self.swaps_allowed and np.random.uniform() < p_swap_back:
print("SWAP! Agent %d H%r before Agent %d H%r"%(second_request[-1].agent_id, second_request[-1].notcommunicating, first_request[-1].agent_id, first_request[-1].notcommunicating))
self.time_matrix_intervals, self.time_available_matrix, self.lane_queues = fake_interval_matrix_swap, fake_available_matrix_swap, fake_lane_queues_swap
final_reservation += [(second_request[-1], requested_start_time1_swap)] # Reserve the second request due to swap
n_notcommunicatings = second_request[-1].notcommunicating + first_request[-1].notcommunicating
self.list_of_swapped_reservations += [(u1, u2, u1_swap, u2_swap, first_request[-1].agent_id, second_request[-1].agent_id, n_notcommunicatings)]
else:
self.time_matrix_intervals, self.time_available_matrix, self.lane_queues = fake_interval_matrix, fake_available_matrix, fake_lane_queues
final_reservation += [(first_request[-1], requested_start_time1)] # Reserve the first request and increment the request
first_request = second_request
print("Reserving intersection for vehicles...",[car_reservation[0].agent_id for car_reservation in final_reservation])
for car, reservation_start_time in final_reservation:
car.intersection_window = (reservation_start_time, 9999999)
return final_reservation
def new_batch_intersection_FCFS(self):
"""Takes the queue of reservation requests and returns a list of return
reservations.
1. Ensures that lane queues are up-to-date with cars and existing reservations
2. For each request, forward simulate car trajectory and check for conflicts in reservation-matrix
3. If no conflict, add reservation. Else, repeat 10 times with offset.
4. Set reservation for each agent from final list of reservations
"""
self.lane_queues = self.update_lane_queues(self.lane_queues) # Make sure lane queue is up-to-date (subroutine?)
# #3/4 edition
# new_queue = []
# for bid, requested_start_time, end_time, intersection_lane, agentID, current_car in self.bid_queue:
# bid = self.time - current_car.time_entered_control
# new_queue += [(bid, requested_start_time, end_time, intersection_lane, agentID, current_car)]
# self.bid_queue = new_queue
# self.sorted_queue = self.sort_queue(self.bid_queue,"FCFS")
# ### 3/4 Edition
final_reservations = []
self.car_ids_with_reservation = set() #TODO: Is this the right thing to have?
# previous_priority_car_time_entered_control = 0.0 #TODO THIS NEEDS TO GET INFO
for bid, requested_start_time, end_time, intersection_lane, agent_id, current_car in self.sorted_queue:
if agent_id in self.car_ids_with_reservation: continue # Agent already received reservation, maybe we don't need this
if len(self.lane_queues[current_car.current_lane]) > self.k_max_cars_planning_lane_FCFS: break
time_car_can_enter_intersection = self.get_intersection_available_for_car(current_car.current_lane, current_car, self.lane_queues)
reservation_window_start_time, self.time_matrix_intervals, self.time_available_matrix = self.attempt_reservation(time_car_can_enter_intersection, intersection_lane, current_car, agent_id, self.time_matrix_intervals, self.time_available_matrix)
if reservation_window_start_time < 0:
break
else:
final_reservations += [(reservation_window_start_time,100000)]
time_intersection_entrance_free = self.get_intersection_entrance_free_time(current_car.current_lane, self.time_available_matrix)
self.lane_queues[current_car.current_lane] += [(current_car, time_intersection_entrance_free)] #TODO: This needs to be when I'm out of the control region
current_car.intersection_window = (reservation_window_start_time, 100000)
self.car_ids_with_reservation.add(agent_id)
self.bid_queue = []
return final_reservations
def get_intersection_available_for_car(self, incoming_lane, current_car, lane_queues, strict_priority=False):
''' Calculates the time at which point car will be at the intersection entrance'''
if len(lane_queues[incoming_lane]) > 0:
previous_car, previous_car_clears_entrance = lane_queues[incoming_lane][-1]
distance_to_previous_car = current_car.pose.dist(previous_car.pose)
car_intersection_start_time = previous_car_clears_entrance + distance_to_previous_car/current_car._maxSpeed
else:
'''No preceeding cars (i.e. start time = time to interesection)'''
car_intersection_start_time = current_car.time + current_car.get_time_to_intersection()
if strict_priority:
all_other_lane_availabilities = [last_time[1] for lane in lane_queues.keys() for last_time in lane_queues[lane] if lane != incoming_lane]
start_time_to_preserve_order = max(all_other_lane_availabilities+[0])-current_car.car_length/current_car._maxSpeed
car_intersection_start_time = max([car_intersection_start_time, start_time_to_preserve_order])
if current_car.agent_id in self.agents_last_attempted_reservation.keys():
car_intersection_start_time = max(car_intersection_start_time, self.agents_last_attempted_reservation[current_car.agent_id])
return car_intersection_start_time
def attempt_reservation(self, requested_start_time, intersection_lane, current_car, agent_id, time_matrix_intervals, time_available_matrix, last_entry_time=-1):
''' Attempts to reserve the interesection for entering at car_intersection_start_time. Returns True/False depending on whether it is possible.'''
new_time_matrix_intervals = copy.deepcopy(time_matrix_intervals)
new_time_available_matrix = copy.deepcopy(time_available_matrix)
current_reservation_attempt = 0
reservation_start_time = requested_start_time
# if last_entry_time > 0:
# reservation_start_time = last_entry_time # You need to start after the last entry
while current_reservation_attempt < self.k_max_retries:
intersection_points_to_reserve = self.calculate_reservation(reservation_start_time, intersection_lane, current_car, agent_id, new_time_matrix_intervals)
if len(intersection_points_to_reserve) > 0:
new_time_matrix_intervals, new_time_available_matrix = self.add_points_to_reservation_matrix(intersection_points_to_reserve, agent_id, new_time_matrix_intervals, new_time_available_matrix) #I'm adding a fixed buffer size
# time_matrix_intervals, time_available_matrix = self.set_reservation(intersection_points_to_reserve, agent_id, current_car, reservation_start_time, time_matrix_intervals, time_available_matrix)
return reservation_start_time, new_time_matrix_intervals, new_time_available_matrix
else:
current_reservation_attempt += 1
reservation_start_time += self.k_replan_increment_sec #TODO: THIS SHOULD PROBABLY HAPPEN ELSEWHERE?
self.agents_last_attempted_reservation[agent_id] = reservation_start_time
print("RetryMaxOut: Car %d, Tried up to starting @ t %.03f"%(agent_id,reservation_start_time))
return -1, new_time_matrix_intervals, new_time_available_matrix
def calculate_reservation(self, car_intersection_start_time, intersection_lane, current_car, agent_id, time_matrix_intervals):
''' Calculate and returns the positions/intervals required by the vehicle in the intersection. If there is a conflict, no points are returned
An additional time_buffer_interval is added when reserving an interval
'''
list_of_positions_time = self.forward_simulate(car_intersection_start_time, intersection_lane, current_car)
intersection_points_to_reserve = []
for x, y, phi, t in list_of_positions_time:
points_covered_by_car = self.get_points_covered_by_car(x, y, phi, current_car.car_width, current_car.car_length, current_car.notcommunicating)
for point in points_covered_by_car:
i,j = self.matrix_index_from_xy(point.x, point.y)
if self.conflicts_in_interval_bool((t-self.time_buffer_interval,t+self.time_buffer_interval,agent_id), i, j, time_matrix_intervals):
return []
else:
intersection_points_to_reserve += [(t - self.time_buffer_interval, t + self.time_buffer_interval, i, j)]
return intersection_points_to_reserve
def request_intersection_lane(self,start_time,end_time,intersection_lane,agent_id,car_object,bid=-1):
''' Public: Cars request a reservation by specifying the intersection lane'''
if bid != -1:
self.insert_bid_to_queue(start_time,end_time,intersection_lane,agent_id,car_object,bid)
return (-1,-1)
def insert_bid_to_queue(self,start_time,end_time,intersection_lane,agent_id,car_object,bid):
''' Insert bid into the queue but first only keep one bid per agent'''
if car_object.agent_id not in self.max_bid_per_agent.keys():
if car_object.agent_id >= 0:
self.max_bid_per_agent[car_object.agent_id] = (bid,start_time,end_time,intersection_lane,agent_id,car_object)
elif bid > self.max_bid_per_agent[car_object.agent_id][0]:
self.max_bid_per_agent[car_object.agent_id] = (bid,start_time,end_time,intersection_lane,agent_id,car_object)
self.bid_queue = [self.max_bid_per_agent[agent_id] for agent_id in self.max_bid_per_agent.keys()]
def sort_queue(self, bid_queue, sorting_string):
''' Sorts the queue by predefined sorting. Assumes queue only has one bid per agent
sorting_string == "FCFS": highest bid is first in queue
sorting_string == "FCFSagent_id": highest bid is first, ties broken by agent_id
'''
sorted_queue = []
if sorting_string == "FCFS":
sorted_queue = sorted(bid_queue,key=lambda b: -b[0])
elif sorting_string == "FCFSagent_id":
sorted_queue = self.sort_and_break_ties_id(bid_queue)
elif sorting_string == "FCFSlane":
sorted_queue = self.sort_and_break_ties_lane(bid_queue)
else:
raise Exception("Unknown Sorting String")
return sorted_queue
def sort_and_break_ties_id(self, bid_queue, epsilon_bid = -1):
''' Sort of list of bids, find the ties, and sort by agent_id'''
if epsilon_bid == -1:
epsilon_bid = self.k_epsilon_bid
sorted_queue_by_bids = sorted(bid_queue, key=lambda b: -b[0])
final_sorted_queue_id_tiebreaker = []
previous_bid_value = -999999
previous_tied_bids = []
for current_bid in sorted_queue_by_bids:
if abs(current_bid[0]-previous_bid_value) <= epsilon_bid:
previous_tied_bids += [current_bid]
else:
''' Sort the previously tied bids and use the agent id as the sorting key'''
if len(previous_tied_bids) > 0:
sorted_bids_by_agent_id = sorted(previous_tied_bids, key=lambda b: b[-1].agent_id)
final_sorted_queue_id_tiebreaker += sorted_bids_by_agent_id
previous_tied_bids = [current_bid]
previous_bid_value = current_bid[0]
if len(previous_tied_bids) > 0:
sorted_bids_by_agent_id = sorted(previous_tied_bids, key=lambda b: b[-1].agent_id)
final_sorted_queue_id_tiebreaker += sorted_bids_by_agent_id
return final_sorted_queue_id_tiebreaker
def sort_and_break_ties_lane(self, bid_queue, epsilon_bid = -1):
''' Sort of list of bids, find the ties, and sort by agent_id'''
if epsilon_bid == -1:
epsilon_bid = self.k_epsilon_bid
sorted_queue_by_bids = sorted(bid_queue, key=lambda b: -b[0])
final_sorted_queue_id_tiebreaker = []
previous_bid_value = -999999
previous_tied_bids = []
for current_bid in sorted_queue_by_bids:
if abs(current_bid[0]-previous_bid_value) <= epsilon_bid:
previous_tied_bids += [current_bid]
else:
''' Sort the previously tied bids and use the lane number as the sorting key'''
if len(previous_tied_bids) > 0:
sorted_bids_by_agent_id = sorted(previous_tied_bids, key=lambda b: int(b[-1].initial_lane))
final_sorted_queue_id_tiebreaker += sorted_bids_by_agent_id
previous_tied_bids = [current_bid]
previous_bid_value = current_bid[0]
if len(previous_tied_bids) > 0:
sorted_bids_by_agent_id = sorted(previous_tied_bids, key=lambda b: int(b[-1].initial_lane))
final_sorted_queue_id_tiebreaker += sorted_bids_by_agent_id
return final_sorted_queue_id_tiebreaker
def attempt_one_reservation(self, request):
''' Try to assign a single vehicle to the intersection. First checks time intersection would be available for vehicle.
Then if it is available, reserves the matrix and returns the updated reservation matrices
'''
fake_lane_queues = self.copy_queues(self.lane_queues)
(bid, requested_start_time, end_time, intersection_lane, agent_id, current_car) = request
time_car_can_enter_intersection = self.get_intersection_available_for_car(current_car.current_lane,
current_car, fake_lane_queues, self.strict_priority)
start_time, t_interval_matrix_after_reservation, t_available_matrix_after_reservation = self.attempt_reservation(time_car_can_enter_intersection, intersection_lane, current_car, agent_id, self.time_matrix_intervals, self.time_available_matrix)
if start_time < 0 or (start_time > self.time + self.k_max_start_time_horizon): #NOTE THIS IS RISKY BUSINESS
return -1, self.time_matrix_intervals, self.time_available_matrix, self.lane_queues
else:
time_intersection_entrance_free = self.get_intersection_entrance_free_time(current_car.current_lane, t_available_matrix_after_reservation)
fake_lane_queues[current_car.current_lane] += [(current_car, time_intersection_entrance_free)] #TODO: This needs to be when I'm out of the control region
return start_time, t_interval_matrix_after_reservation, t_available_matrix_after_reservation, fake_lane_queues
def attempt_two_reservations(self, first_request, second_request):
'''
First request, find when the intersection is available for car and then reserve the intersection for that time.
Repeat w/ second request, using the (updated) reservation matrices which now include the first request.
If either request fails, return -1, -1, ... -1
If both request succeed, return the utilities for both reservations, both reservations, and the updated queues/matrices
'''
lane_queues = self.copy_queues(self.lane_queues)
(bid1, requested_start_time1, end_time1, intersection_lane1, agent_id1, current_car1) = first_request
(bid2, requested_start_time2, end_time2, intersection_lane2, agent_id2, current_car2) = second_request
t_enter_intersection_car1 = self.get_intersection_available_for_car(current_car1.current_lane, current_car1, lane_queues, self.strict_priority)
start_time_1, t_interval_matrix_after_reservation1, t_available_matrix_after_reservation1 = self.attempt_reservation(t_enter_intersection_car1, intersection_lane1, current_car1, agent_id1, self.time_matrix_intervals, self.time_available_matrix)
if start_time_1 < 0 or start_time_1 > self.time + 45.0: #NOTE THIS IS RISKY BUSINESS
# print("Couldn't find reservation for 1")
return -1, -1, -1, -1, -1, -1, -1
pass
else:
time_intersection_entrance_free = self.get_intersection_entrance_free_time(current_car1.current_lane, t_available_matrix_after_reservation1)
lane_queues[current_car1.current_lane] += [(current_car1, time_intersection_entrance_free)] #TODO: This needs to be when I'm out of the control region
time_car2_can_enter_intersection = self.get_intersection_available_for_car(current_car2.current_lane, current_car2, lane_queues, self.strict_priority)
start_time_2, fake_interval_matrix_after2, fake_available_matrixafter2 = self.attempt_reservation(time_car2_can_enter_intersection, intersection_lane2, current_car2, agent_id2, t_interval_matrix_after_reservation1, t_available_matrix_after_reservation1)
if start_time_2 < 0:
# print("Couldn't find reservation for 2")
return -1, -1, -1, -1, -1, -1, -1
else:
time_intersection_entrance_free = self.get_intersection_entrance_free_time(current_car2.current_lane, fake_available_matrixafter2)
# fake_lane_queues[current_car2.current_lane] += [(current_car2, time_intersection_entrance_free)] #TODO: This needs to be when I'm out of the control region
if self.svo_squared:
utility1 = self.get_svo_utility_squared(start_time_1, start_time_2, current_car1, current_car2)
utility2 = self.get_svo_utility_squared(start_time_2, start_time_1, current_car2, current_car1)
else:
utility1 = self.get_svo_utility(start_time_1, start_time_2, current_car1, current_car2)
utility2 = self.get_svo_utility(start_time_2, start_time_1, current_car2, current_car1)
return utility1, utility2, start_time_1, start_time_2, t_interval_matrix_after_reservation1, t_available_matrix_after_reservation1, lane_queues
def get_svo_utility(self, start_time_ego, start_time_2, car_ego, car_2):
''' SVO for an ego vehicle, calculated using time in the control regions'''
time_in_control_ego = start_time_ego - car_ego.time_entered_control
time_in_control_2 = start_time_2 - car_2.time_entered_control
utility_ego = time_in_control_ego*np.cos(car_ego.svo_theta) + time_in_control_2*np.sin(car_ego.svo_theta)
return -utility_ego
def get_svo_utility_squared(self, start_time_ego, start_time_2, car_ego, car_2):
time_in_control_ego = start_time_ego - car_ego.time_entered_control
time_in_control_2 = start_time_2 - car_2.time_entered_control
utility_ego = (time_in_control_ego**2)*np.cos(car_ego.svo_theta) + (time_in_control_2**2)*np.sin(car_ego.svo_theta)
return -utility_ego
def one_bid_per_agent_queue(self, bid_queue):
''' Returns a queue that only has one bid/request per agent'''
agent_max_requests = {}
cleaned_queue = []
for bid, requested_start_time, end_time, intersection_lane, agent_id, current_car in bid_queue:
if agent_id in agent_max_requests.keys():
if bid > agent_max_requests[agent_id][0]:
agent_max_requests[agent_id] = (bid, requested_start_time, end_time, intersection_lane, agent_id, current_car)
else:
agent_max_requests[agent_id] = (bid, requested_start_time, end_time, intersection_lane, agent_id, current_car)
for agent_id in agent_max_requests.keys():
cleaned_queue += [agent_max_requests[agent_id]]
return cleaned_queue
def update_lane_queues(self, lane_queues):
""" Remove cars in self.lane_queues whose start time window already began. """
for lane in lane_queues.keys():
updated_lane_queue = []
for (car, start_time) in lane_queues[lane]:
if start_time >= car.time:
updated_lane_queue += [(car,start_time)] #Only keep cars that haven't entered intersection
lane_queues[lane] = updated_lane_queue
return lane_queues
def copy_queues(self, lane_queues):
new_queues = {}
for lane in lane_queues.keys():
new_queues[lane] = []
for vehicle_time in lane_queues[lane]:
new_queues[lane] += [vehicle_time]
return new_queues
| [
"numpy.random.uniform",
"shapely.geometry.Point",
"copy.deepcopy",
"kinematics.Pose",
"shapely.affinity.rotate",
"numpy.sin",
"numpy.arange",
"numpy.cos",
"queue.Queue",
"shapely.geometry.box"
] | [((873, 886), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (884, 886), True, 'import queue as queue\n'), ((15522, 15549), 'shapely.geometry.box', 'box', (['minx', 'miny', 'maxx', 'maxy'], {}), '(minx, miny, maxx, maxy)\n', (15525, 15549), False, 'from shapely.geometry import Point, box\n'), ((15573, 15584), 'shapely.geometry.Point', 'Point', (['x', 'y'], {}), '(x, y)\n', (15578, 15584), False, 'from shapely.geometry import Point, box\n'), ((15615, 15682), 'shapely.affinity.rotate', 'rotate', (['buffered_car', 'phi'], {'origin': 'rotate_about_pt', 'use_radians': '(True)'}), '(buffered_car, phi, origin=rotate_about_pt, use_radians=True)\n', (15621, 15682), False, 'from shapely.affinity import rotate\n'), ((30173, 30209), 'copy.deepcopy', 'copy.deepcopy', (['time_matrix_intervals'], {}), '(time_matrix_intervals)\n', (30186, 30209), True, 'import copy as copy\n'), ((30246, 30282), 'copy.deepcopy', 'copy.deepcopy', (['time_available_matrix'], {}), '(time_available_matrix)\n', (30259, 30282), True, 'import copy as copy\n'), ((1837, 1848), 'shapely.geometry.Point', 'Point', (['x', 'y'], {}), '(x, y)\n', (1842, 1848), False, 'from shapely.geometry import Point, box\n'), ((6499, 6528), 'kinematics.Pose', 'kn.Pose', (['(0.5)', '(-1)', '(np.pi / 2.0)'], {}), '(0.5, -1, np.pi / 2.0)\n', (6506, 6528), True, 'import kinematics as kn\n'), ((7953, 7978), 'copy.deepcopy', 'copy.deepcopy', (['start_pose'], {}), '(start_pose)\n', (7966, 7978), True, 'import copy as copy\n'), ((1857, 1892), 'numpy.arange', 'np.arange', (['(-1)', '(1 + self.dx)', 'self.dx'], {}), '(-1, 1 + self.dx, self.dx)\n', (1866, 1892), True, 'import numpy as np\n'), ((1898, 1933), 'numpy.arange', 'np.arange', (['(-1)', '(1 + self.dx)', 'self.dx'], {}), '(-1, 1 + self.dx, self.dx)\n', (1907, 1933), True, 'import numpy as np\n'), ((6678, 6698), 'kinematics.Pose', 'kn.Pose', (['(-1)', '(-0.5)', '(0)'], {}), '(-1, -0.5, 0)\n', (6685, 6698), True, 'import kinematics as kn\n'), ((42604, 42629), 'numpy.cos', 'np.cos', (['car_ego.svo_theta'], {}), '(car_ego.svo_theta)\n', (42610, 42629), True, 'import numpy as np\n'), ((42650, 42675), 'numpy.sin', 'np.sin', (['car_ego.svo_theta'], {}), '(car_ego.svo_theta)\n', (42656, 42675), True, 'import numpy as np\n'), ((42995, 43020), 'numpy.cos', 'np.cos', (['car_ego.svo_theta'], {}), '(car_ego.svo_theta)\n', (43001, 43020), True, 'import numpy as np\n'), ((43046, 43071), 'numpy.sin', 'np.sin', (['car_ego.svo_theta'], {}), '(car_ego.svo_theta)\n', (43052, 43071), True, 'import numpy as np\n'), ((6857, 6879), 'kinematics.Pose', 'kn.Pose', (['(1)', '(0.5)', 'np.pi'], {}), '(1, 0.5, np.pi)\n', (6864, 6879), True, 'import kinematics as kn\n'), ((7038, 7068), 'kinematics.Pose', 'kn.Pose', (['(-0.5)', '(1)', '(-np.pi / 2.0)'], {}), '(-0.5, 1, -np.pi / 2.0)\n', (7045, 7068), True, 'import kinematics as kn\n'), ((23954, 23973), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (23971, 23973), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
PI = np.pi
class Arrow:
def __init__(self, x, y, theta, L, c):
angle = np.deg2rad(30)
d = 0.5 * L
w = 2
x_start = x
y_start = y
x_end = x + L * np.cos(theta)
y_end = y + L * np.sin(theta)
theta_hat_L = theta + PI - angle
theta_hat_R = theta + PI + angle
x_hat_start = x_end
x_hat_end_L = x_hat_start + d * np.cos(theta_hat_L)
x_hat_end_R = x_hat_start + d * np.cos(theta_hat_R)
y_hat_start = y_end
y_hat_end_L = y_hat_start + d * np.sin(theta_hat_L)
y_hat_end_R = y_hat_start + d * np.sin(theta_hat_R)
plt.plot([x_start, x_end], [y_start, y_end], color=c, linewidth=w)
plt.plot([x_hat_start, x_hat_end_L],
[y_hat_start, y_hat_end_L], color=c, linewidth=w)
plt.plot([x_hat_start, x_hat_end_R],
[y_hat_start, y_hat_end_R], color=c, linewidth=w)
class Car:
def __init__(self, x, y, yaw, w, L):
theta_B = PI + yaw
xB = x + L / 4 * np.cos(theta_B)
yB = y + L / 4 * np.sin(theta_B)
theta_BL = theta_B + PI / 2
theta_BR = theta_B - PI / 2
x_BL = xB + w / 2 * np.cos(theta_BL) # Bottom-Left vertex
y_BL = yB + w / 2 * np.sin(theta_BL)
x_BR = xB + w / 2 * np.cos(theta_BR) # Bottom-Right vertex
y_BR = yB + w / 2 * np.sin(theta_BR)
x_FL = x_BL + L * np.cos(yaw) # Front-Left vertex
y_FL = y_BL + L * np.sin(yaw)
x_FR = x_BR + L * np.cos(yaw) # Front-Right vertex
y_FR = y_BR + L * np.sin(yaw)
plt.plot([x_BL, x_BR, x_FR, x_FL, x_BL],
[y_BL, y_BR, y_FR, y_FL, y_BL],
linewidth=1, color='black')
Arrow(x, y, yaw, L / 2, 'black')
# plt.axis("equal")
# plt.show()
if __name__ == '__main__':
# Arrow(-1, 2, 60)
Car(0, 0, 1, 2, 60)
| [
"numpy.sin",
"numpy.cos",
"matplotlib.pyplot.plot",
"numpy.deg2rad"
] | [((136, 150), 'numpy.deg2rad', 'np.deg2rad', (['(30)'], {}), '(30)\n', (146, 150), True, 'import numpy as np\n'), ((692, 758), 'matplotlib.pyplot.plot', 'plt.plot', (['[x_start, x_end]', '[y_start, y_end]'], {'color': 'c', 'linewidth': 'w'}), '([x_start, x_end], [y_start, y_end], color=c, linewidth=w)\n', (700, 758), True, 'import matplotlib.pyplot as plt\n'), ((767, 857), 'matplotlib.pyplot.plot', 'plt.plot', (['[x_hat_start, x_hat_end_L]', '[y_hat_start, y_hat_end_L]'], {'color': 'c', 'linewidth': 'w'}), '([x_hat_start, x_hat_end_L], [y_hat_start, y_hat_end_L], color=c,\n linewidth=w)\n', (775, 857), True, 'import matplotlib.pyplot as plt\n'), ((879, 969), 'matplotlib.pyplot.plot', 'plt.plot', (['[x_hat_start, x_hat_end_R]', '[y_hat_start, y_hat_end_R]'], {'color': 'c', 'linewidth': 'w'}), '([x_hat_start, x_hat_end_R], [y_hat_start, y_hat_end_R], color=c,\n linewidth=w)\n', (887, 969), True, 'import matplotlib.pyplot as plt\n'), ((1689, 1793), 'matplotlib.pyplot.plot', 'plt.plot', (['[x_BL, x_BR, x_FR, x_FL, x_BL]', '[y_BL, y_BR, y_FR, y_FL, y_BL]'], {'linewidth': '(1)', 'color': '"""black"""'}), "([x_BL, x_BR, x_FR, x_FL, x_BL], [y_BL, y_BR, y_FR, y_FL, y_BL],\n linewidth=1, color='black')\n", (1697, 1793), True, 'import matplotlib.pyplot as plt\n'), ((250, 263), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (256, 263), True, 'import numpy as np\n'), ((288, 301), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (294, 301), True, 'import numpy as np\n'), ((454, 473), 'numpy.cos', 'np.cos', (['theta_hat_L'], {}), '(theta_hat_L)\n', (460, 473), True, 'import numpy as np\n'), ((514, 533), 'numpy.cos', 'np.cos', (['theta_hat_R'], {}), '(theta_hat_R)\n', (520, 533), True, 'import numpy as np\n'), ((603, 622), 'numpy.sin', 'np.sin', (['theta_hat_L'], {}), '(theta_hat_L)\n', (609, 622), True, 'import numpy as np\n'), ((663, 682), 'numpy.sin', 'np.sin', (['theta_hat_R'], {}), '(theta_hat_R)\n', (669, 682), True, 'import numpy as np\n'), ((1090, 1105), 'numpy.cos', 'np.cos', (['theta_B'], {}), '(theta_B)\n', (1096, 1105), True, 'import numpy as np\n'), ((1131, 1146), 'numpy.sin', 'np.sin', (['theta_B'], {}), '(theta_B)\n', (1137, 1146), True, 'import numpy as np\n'), ((1249, 1265), 'numpy.cos', 'np.cos', (['theta_BL'], {}), '(theta_BL)\n', (1255, 1265), True, 'import numpy as np\n'), ((1322, 1338), 'numpy.sin', 'np.sin', (['theta_BL'], {}), '(theta_BL)\n', (1328, 1338), True, 'import numpy as np\n'), ((1367, 1383), 'numpy.cos', 'np.cos', (['theta_BR'], {}), '(theta_BR)\n', (1373, 1383), True, 'import numpy as np\n'), ((1441, 1457), 'numpy.sin', 'np.sin', (['theta_BR'], {}), '(theta_BR)\n', (1447, 1457), True, 'import numpy as np\n'), ((1485, 1496), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (1491, 1496), True, 'import numpy as np\n'), ((1557, 1568), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (1563, 1568), True, 'import numpy as np\n'), ((1595, 1606), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (1601, 1606), True, 'import numpy as np\n'), ((1668, 1679), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (1674, 1679), True, 'import numpy as np\n')] |
import asyncio
from datetime import datetime
from typing import Collection, Dict, Optional, Sequence, Tuple
import aiomcache
import morcilla
import numpy as np
import pandas as pd
from sqlalchemy import and_, func, select
from sqlalchemy.orm.attributes import InstrumentedAttribute
from athenian.api.controllers.jira import JIRAConfig
from athenian.api.controllers.miners.filters import LabelFilter
from athenian.api.controllers.miners.jira.issue import fetch_jira_issues
from athenian.api.controllers.settings import LogicalRepositorySettings, ReleaseSettings
from athenian.api.models.metadata.jira import Issue
from athenian.api.tracing import sentry_span
@sentry_span
async def filter_epics(jira_ids: JIRAConfig,
time_from: Optional[datetime],
time_to: Optional[datetime],
exclude_inactive: bool,
labels: LabelFilter,
priorities: Collection[str],
reporters: Collection[str],
assignees: Collection[Optional[str]],
commenters: Collection[str],
default_branches: Dict[str, str],
release_settings: ReleaseSettings,
logical_settings: LogicalRepositorySettings,
account: int,
meta_ids: Tuple[int, ...],
mdb: morcilla.Database,
pdb: morcilla.Database,
cache: Optional[aiomcache.Client],
extra_columns: Collection[InstrumentedAttribute] = (),
) -> Tuple[pd.DataFrame,
pd.DataFrame,
asyncio.Task, # -> List[Mapping[str, Union[str, int]]]
Dict[str, Sequence[int]]]:
"""
Fetch JIRA epics and their children issues according to the given filters.
:return: 1. epics \
2. children \
3. asyncio Task to fetch the subtask counts \
4. map from epic_id to the indexes of the corresponding children in (2)
"""
# filter the epics according to the passed filters
epics = await fetch_jira_issues(
jira_ids, time_from, time_to, exclude_inactive, labels,
priorities, ["epic"], [], reporters, assignees, commenters, True,
default_branches, release_settings, logical_settings, account, meta_ids, mdb, pdb, cache,
extra_columns=extra_columns)
if epics.empty:
async def noop():
return []
return (epics,
pd.DataFrame(columns=[
Issue.priority_id.name, Issue.status_id.name, Issue.project_id.name]),
asyncio.create_task(noop()),
{})
# discover the issues belonging to those epics
extra_columns = list(extra_columns)
if Issue.parent_id not in extra_columns:
extra_columns.append(Issue.parent_id)
children = await fetch_jira_issues(
jira_ids, None, None, False, LabelFilter.empty(),
[], [], epics[Issue.key.name].values, [], [], [], False,
default_branches, release_settings, logical_settings, account, meta_ids, mdb, pdb, cache,
extra_columns=extra_columns)
# plan to fetch the subtask counts, but not await it now
subtasks = asyncio.create_task(
mdb.fetch_all(select([Issue.parent_id, func.count(Issue.id).label("subtasks")])
.where(and_(Issue.acc_id == jira_ids[0],
Issue.project_id.in_(jira_ids[1]),
Issue.is_deleted.is_(False),
Issue.parent_id.in_(children.index)))
.group_by(Issue.parent_id)),
name="fetch JIRA subtask counts",
)
await asyncio.sleep(0)
empty_epic_ids_mask = children[Issue.epic_id.name].isnull()
children.loc[empty_epic_ids_mask, Issue.epic_id.name] = \
children[Issue.parent_id.name][empty_epic_ids_mask]
children_epic_ids = children[Issue.epic_id.name].values
order = np.argsort(children_epic_ids)
children_epic_ids = children_epic_ids[order]
unique_children_epic_ids, counts = np.unique(children_epic_ids, return_counts=True)
children_indexes = np.split(np.arange(len(order))[order], np.cumsum(counts)[:-1])
epic_id_to_children_indexes = dict(zip(unique_children_epic_ids, children_indexes))
return epics, children, subtasks, epic_id_to_children_indexes
| [
"pandas.DataFrame",
"athenian.api.models.metadata.jira.Issue.project_id.in_",
"athenian.api.controllers.miners.jira.issue.fetch_jira_issues",
"asyncio.sleep",
"numpy.argsort",
"numpy.cumsum",
"athenian.api.controllers.miners.filters.LabelFilter.empty",
"sqlalchemy.func.count",
"athenian.api.models.m... | [((4139, 4168), 'numpy.argsort', 'np.argsort', (['children_epic_ids'], {}), '(children_epic_ids)\n', (4149, 4168), True, 'import numpy as np\n'), ((4257, 4305), 'numpy.unique', 'np.unique', (['children_epic_ids'], {'return_counts': '(True)'}), '(children_epic_ids, return_counts=True)\n', (4266, 4305), True, 'import numpy as np\n'), ((2244, 2514), 'athenian.api.controllers.miners.jira.issue.fetch_jira_issues', 'fetch_jira_issues', (['jira_ids', 'time_from', 'time_to', 'exclude_inactive', 'labels', 'priorities', "['epic']", '[]', 'reporters', 'assignees', 'commenters', '(True)', 'default_branches', 'release_settings', 'logical_settings', 'account', 'meta_ids', 'mdb', 'pdb', 'cache'], {'extra_columns': 'extra_columns'}), "(jira_ids, time_from, time_to, exclude_inactive, labels,\n priorities, ['epic'], [], reporters, assignees, commenters, True,\n default_branches, release_settings, logical_settings, account, meta_ids,\n mdb, pdb, cache, extra_columns=extra_columns)\n", (2261, 2514), False, 'from athenian.api.controllers.miners.jira.issue import fetch_jira_issues\n'), ((3864, 3880), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (3877, 3880), False, 'import asyncio\n'), ((2644, 2740), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': '[Issue.priority_id.name, Issue.status_id.name, Issue.project_id.name]'}), '(columns=[Issue.priority_id.name, Issue.status_id.name, Issue.\n project_id.name])\n', (2656, 2740), True, 'import pandas as pd\n'), ((3082, 3101), 'athenian.api.controllers.miners.filters.LabelFilter.empty', 'LabelFilter.empty', ([], {}), '()\n', (3099, 3101), False, 'from athenian.api.controllers.miners.filters import LabelFilter\n'), ((4368, 4385), 'numpy.cumsum', 'np.cumsum', (['counts'], {}), '(counts)\n', (4377, 4385), True, 'import numpy as np\n'), ((3585, 3618), 'athenian.api.models.metadata.jira.Issue.project_id.in_', 'Issue.project_id.in_', (['jira_ids[1]'], {}), '(jira_ids[1])\n', (3605, 3618), False, 'from athenian.api.models.metadata.jira import Issue\n'), ((3654, 3681), 'athenian.api.models.metadata.jira.Issue.is_deleted.is_', 'Issue.is_deleted.is_', (['(False)'], {}), '(False)\n', (3674, 3681), False, 'from athenian.api.models.metadata.jira import Issue\n'), ((3717, 3752), 'athenian.api.models.metadata.jira.Issue.parent_id.in_', 'Issue.parent_id.in_', (['children.index'], {}), '(children.index)\n', (3736, 3752), False, 'from athenian.api.models.metadata.jira import Issue\n'), ((3447, 3467), 'sqlalchemy.func.count', 'func.count', (['Issue.id'], {}), '(Issue.id)\n', (3457, 3467), False, 'from sqlalchemy import and_, func, select\n')] |
from cellprofiler_core.constants.measurement import COLTYPE_INTEGER
from cellprofiler_core.constants.measurement import FF_CHILDREN_COUNT
from cellprofiler_core.constants.measurement import FF_PARENT
from cellprofiler_core.image import ObjectsImage
from cellprofiler_core.module import Identify
from cellprofiler_core.setting.choice import Choice
from cellprofiler_core.setting.subscriber import ImageSubscriber
from cellprofiler_core.setting.subscriber import LabelSubscriber
from cellprofiler_core.setting.text import LabelName
from cellprofiler_core.utilities.core.module.identify import (
add_object_count_measurements,
)
from cellprofiler_core.utilities.core.module.identify import (
add_object_location_measurements_ijv,
)
from cellprofiler_core.utilities.core.module.identify import (
get_object_measurement_columns,
)
from cellprofiler.modules import _help
__doc__ = """\
EditObjectsManually
===================
**EditObjectsManually** allows you create, remove and edit objects
previously defined.
The interface will show the image that you selected as the guiding
image, overlaid with colored outlines of the selected objects (or filled
objects if you choose). This module allows you to remove or edit
specific objects by pointing and clicking to select objects for removal
or editing. Once editing is complete, the module displays the objects as
originally identified (left) and the objects that remain after this
module (right). More detailed Help is provided in the editing window via
the ‘?’ button. The pipeline pauses once per processed image when it
reaches this module. You must press the *Done* button to accept the
selected objects and continue the pipeline.
|
============ ============ ===============
Supports 2D? Supports 3D? Respects masks?
============ ============ ===============
YES NO YES
============ ============ ===============
Measurements made by this module
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Image measurements:**
- *Count:* The number of edited objects in the image.
**Object measurements:**
- *Location\_X, Location\_Y:* The pixel (X,Y) coordinates of the center
of mass of the edited objects.
See also
^^^^^^^^
See also **FilterObjects**, **MaskObject**, **OverlayOutlines**,
**ConvertToImage**.
{HELP_ON_SAVING_OBJECTS}
""".format(
**{"HELP_ON_SAVING_OBJECTS": _help.HELP_ON_SAVING_OBJECTS}
)
import os
import numpy
from cellprofiler_core.object import Objects
from cellprofiler_core.setting import Binary
from cellprofiler_core.utilities.pathname import pathname2url
###########################################
#
# Choices for the "do you want to renumber your objects" setting
#
###########################################
R_RENUMBER = "Renumber"
R_RETAIN = "Retain"
class EditObjectsManually(Identify):
category = "Object Processing"
variable_revision_number = 4
module_name = "EditObjectsManually"
def create_settings(self):
"""Create your settings by subclassing this function
create_settings is called at the end of initialization.
You should create the setting variables for your module here:
# Ask the user for the input image
self.image_name = .ImageSubscriber(...)
# Ask the user for the name of the output image
self.output_image = .ImageName(...)
# Ask the user for a parameter
self.smoothing_size = .Float(...)
"""
self.object_name = LabelSubscriber(
"Select the objects to be edited",
"None",
doc="""\
Choose a set of previously identified objects
for editing, such as those produced by one of the
**Identify** modules (e.g., "*IdentifyPrimaryObjects*", "*IdentifySecondaryObjects*" etc.).""",
)
self.filtered_objects = LabelName(
"Name the edited objects",
"EditedObjects",
doc="""\
Enter the name for the objects that remain
after editing. These objects will be available for use by
subsequent modules.""",
)
self.allow_overlap = Binary(
"Allow overlapping objects?",
False,
doc="""\
**EditObjectsManually** can allow you to edit an object so that it
overlaps another or it can prevent you from overlapping one object with
another. Objects such as worms or the neurites of neurons may cross each
other and might need to be edited with overlapping allowed, whereas a
monolayer of cells might be best edited with overlapping off.
Select "*Yes*" to allow overlaps or select "*No*" to prevent them.
"""
% globals(),
)
self.renumber_choice = Choice(
"Numbering of the edited objects",
[R_RENUMBER, R_RETAIN],
doc="""\
Choose how to number the objects that remain after editing, which
controls how edited objects are associated with their predecessors:
- *%(R_RENUMBER)s:* The module will number the objects that remain
using consecutive numbers. This is a good choice if you do not plan
to use measurements from the original objects and you only want to
use the edited objects in downstream modules; the objects that remain
after editing will not have gaps in numbering where removed objects
are missing.
- *%(R_RETAIN)s:* This option will retain each object’s original
number so that the edited object’s number matches its original
number. This allows any measurements you make from the edited objects
to be directly aligned with measurements you might have made of the
original, unedited objects (or objects directly associated with
them).
"""
% globals(),
)
self.wants_image_display = Binary(
"Display a guiding image?",
True,
doc="""\
Select "*Yes*" to display an image and outlines of the objects.
Select "*No*" if you do not want a guide image while editing.
"""
% globals(),
)
self.image_name = ImageSubscriber(
"Select the guiding image",
"None",
doc="""\
*(Used only if a guiding image is desired)*
This is the image that will appear when editing objects. Choose an image
supplied by a previous module.
""",
)
def settings(self):
"""Return the settings to be loaded or saved to/from the pipeline
These are the settings (from cellprofiler_core.settings) that are
either read from the strings in the pipeline or written out
to the pipeline. The settings should appear in a consistent
order so they can be matched to the strings in the pipeline.
"""
return [
self.object_name,
self.filtered_objects,
self.renumber_choice,
self.wants_image_display,
self.image_name,
self.allow_overlap,
]
def visible_settings(self):
result = [
self.object_name,
self.filtered_objects,
self.allow_overlap,
self.renumber_choice,
self.wants_image_display,
]
if self.wants_image_display:
result += [self.image_name]
return result
def run(self, workspace):
"""Run the module
workspace - The workspace contains
pipeline - instance of cpp for this run
image_set - the images in the image set being processed
object_set - the objects (labeled masks) in this image set
measurements - the measurements for this run
frame - the parent frame to whatever frame is created. None means don't draw.
"""
orig_objects_name = self.object_name.value
filtered_objects_name = self.filtered_objects.value
orig_objects = workspace.object_set.get_objects(orig_objects_name)
assert isinstance(orig_objects, Objects)
orig_labels = [l for l, c in orig_objects.get_labels()]
if self.wants_image_display:
guide_image = workspace.image_set.get_image(self.image_name.value)
guide_image = guide_image.pixel_data
if numpy.any(guide_image != numpy.min(guide_image)):
guide_image = (guide_image - numpy.min(guide_image)) / (
numpy.max(guide_image) - numpy.min(guide_image)
)
else:
guide_image = None
filtered_labels = workspace.interaction_request(
self, orig_labels, guide_image, workspace.measurements.image_set_number
)
if filtered_labels is None:
# Ask whoever is listening to stop doing stuff
workspace.cancel_request()
# Have to soldier on until the cancel takes effect...
filtered_labels = orig_labels
#
# Renumber objects consecutively if asked to do so
#
unique_labels = numpy.unique(numpy.array(filtered_labels))
unique_labels = unique_labels[unique_labels != 0]
object_count = len(unique_labels)
if self.renumber_choice == R_RENUMBER:
mapping = numpy.zeros(
1 if len(unique_labels) == 0 else numpy.max(unique_labels) + 1, int
)
mapping[unique_labels] = numpy.arange(1, object_count + 1)
filtered_labels = [mapping[l] for l in filtered_labels]
#
# Make the objects out of the labels
#
filtered_objects = Objects()
i, j = numpy.mgrid[
0 : filtered_labels[0].shape[0], 0 : filtered_labels[0].shape[1]
]
ijv = numpy.zeros((0, 3), filtered_labels[0].dtype)
for l in filtered_labels:
ijv = numpy.vstack(
(ijv, numpy.column_stack((i[l != 0], j[l != 0], l[l != 0])))
)
filtered_objects.set_ijv(ijv, orig_labels[0].shape)
if orig_objects.has_unedited_segmented():
filtered_objects.unedited_segmented = orig_objects.unedited_segmented
if orig_objects.parent_image is not None:
filtered_objects.parent_image = orig_objects.parent_image
workspace.object_set.add_objects(filtered_objects, filtered_objects_name)
#
# Add parent/child & other measurements
#
m = workspace.measurements
child_count, parents = orig_objects.relate_children(filtered_objects)
m.add_measurement(
filtered_objects_name, FF_PARENT % orig_objects_name, parents,
)
m.add_measurement(
orig_objects_name, FF_CHILDREN_COUNT % filtered_objects_name, child_count,
)
#
# The object count
#
add_object_count_measurements(m, filtered_objects_name, object_count)
#
# The object locations
#
add_object_location_measurements_ijv(m, filtered_objects_name, ijv)
workspace.display_data.orig_ijv = orig_objects.ijv
workspace.display_data.filtered_ijv = filtered_objects.ijv
workspace.display_data.shape = orig_labels[0].shape
def display(self, workspace, figure):
orig_ijv = workspace.display_data.orig_ijv
filtered_ijv = workspace.display_data.filtered_ijv
shape = workspace.display_data.shape
figure.set_subplots((2, 1))
ax0 = figure.subplot_imshow_ijv(
0, 0, orig_ijv, shape=shape, title=self.object_name.value
)
figure.subplot_imshow_ijv(
1,
0,
filtered_ijv,
shape=shape,
title=self.filtered_objects.value,
sharex=ax0,
sharey=ax0,
)
def run_as_data_tool(self):
from cellprofiler.gui.editobjectsdlg import EditObjectsDialog
import wx
from wx.lib.filebrowsebutton import FileBrowseButton
from bioformats import load_image
with wx.Dialog(None) as dlg:
dlg.Title = "Choose files for editing"
dlg.Sizer = wx.BoxSizer(wx.VERTICAL)
sub_sizer = wx.BoxSizer(wx.HORIZONTAL)
dlg.Sizer.Add(sub_sizer, 0, wx.EXPAND | wx.ALL, 5)
new_or_existing_rb = wx.RadioBox(
dlg, style=wx.RA_VERTICAL, choices=("New", "Existing")
)
sub_sizer.Add(new_or_existing_rb, 0, wx.EXPAND)
objects_file_fbb = FileBrowseButton(
dlg,
size=(300, -1),
fileMask="Objects file (*.tif, *.tiff, *.png, *.bmp, *.jpg)|*.tif;*.tiff;*.png;*.bmp;*.jpg",
dialogTitle="Select objects file",
labelText="Objects file:",
)
objects_file_fbb.Enable(False)
sub_sizer.AddSpacer(5)
sub_sizer.Add(objects_file_fbb, 0, wx.ALIGN_TOP | wx.ALIGN_RIGHT)
def on_radiobox(event):
objects_file_fbb.Enable(new_or_existing_rb.GetSelection() == 1)
new_or_existing_rb.Bind(wx.EVT_RADIOBOX, on_radiobox)
image_file_fbb = FileBrowseButton(
dlg,
size=(300, -1),
fileMask="Objects file (*.tif, *.tiff, *.png, *.bmp, *.jpg)|*.tif;*.tiff;*.png;*.bmp;*.jpg",
dialogTitle="Select guide image file",
labelText="Guide image:",
)
dlg.Sizer.Add(image_file_fbb, 0, wx.EXPAND | wx.ALL, 5)
allow_overlap_checkbox = wx.CheckBox(dlg, -1, "Allow objects to overlap")
allow_overlap_checkbox.Value = True
dlg.Sizer.Add(allow_overlap_checkbox, 0, wx.EXPAND | wx.ALL, 5)
buttons = wx.StdDialogButtonSizer()
dlg.Sizer.Add(
buttons, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT | wx.ALL, 5
)
buttons.Add(wx.Button(dlg, wx.ID_OK))
buttons.Add(wx.Button(dlg, wx.ID_CANCEL))
buttons.Realize()
dlg.Fit()
result = dlg.ShowModal()
if result != wx.ID_OK:
return
self.allow_overlap.value = allow_overlap_checkbox.Value
fullname = objects_file_fbb.GetValue()
guidename = image_file_fbb.GetValue()
if new_or_existing_rb.GetSelection() == 1:
provider = ObjectsImage("InputObjects", pathname2url(fullname), None, None)
image = provider.provide_image(None)
pixel_data = image.pixel_data
shape = pixel_data.shape[:2]
labels = [pixel_data[:, :, i] for i in range(pixel_data.shape[2])]
else:
labels = None
#
# Load the guide image
#
guide_image = load_image(guidename)
if numpy.min(guide_image) != numpy.max(guide_image):
guide_image = (guide_image - numpy.min(guide_image)) / (
numpy.max(guide_image) - numpy.min(guide_image)
)
if labels is None:
shape = guide_image.shape[:2]
labels = [numpy.zeros(shape, int)]
with EditObjectsDialog(
guide_image, labels, self.allow_overlap, self.object_name.value
) as dialog_box:
result = dialog_box.ShowModal()
if result != wx.OK:
return
labels = dialog_box.labels
n_frames = len(labels)
with wx.FileDialog(None, style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) as dlg:
dlg.Path = fullname
dlg.Wildcard = (
"Object image file (*.tif,*.tiff)|*.tif;*.tiff|"
"Ilastik project file (*.ilp)|*.ilp"
)
result = dlg.ShowModal()
fullname = dlg.Path
if result == wx.ID_OK:
if fullname.endswith(".ilp"):
self.save_into_ilp(fullname, labels, guidename)
else:
from bioformats.formatwriter import write_image
from bioformats.omexml import PT_UINT16
if os.path.exists(fullname):
os.unlink(fullname)
for i, l in enumerate(labels):
write_image(fullname, l, PT_UINT16, t=i, size_t=len(labels))
def save_into_ilp(self, project_name, labels, guidename):
import h5py
import wx
with h5py.File(project_name) as f:
g = f["DataSets"]
for k in g:
data_item = g[k]
if data_item.attrs.get("fileName") == guidename:
break
else:
wx.MessageBox(
"Sorry, could not find the file, %s, in the project, %s"
% (guidename, project_name)
)
project_labels = data_item["labels"]["data"]
mask = numpy.ones(project_labels.shape[2:4], project_labels.dtype)
for label in labels:
mask[label != 0] = 2
#
# "only" use the first 100,000 points in the image
#
subsample = 100000
npts = numpy.prod(mask.shape)
if npts > subsample:
r = numpy.random.RandomState()
r.seed(numpy.sum(mask) % (2 ** 16))
i, j = numpy.mgrid[0 : mask.shape[0], 0 : mask.shape[1]]
i0 = i[mask == 1]
j0 = j[mask == 1]
i1 = i[mask == 2]
j1 = j[mask == 2]
if len(i1) < subsample / 2:
p0 = r.permutation(len(i0))[: (subsample - len(i1))]
p1 = numpy.arange(len(i1))
elif len(i0) < subsample / 2:
p0 = numpy.arange(len(i0))
p1 = r.permutation(len(i1))[: (subsample - len(i0))]
else:
p0 = r.permutation(len(i0))[: (subsample / 2)]
p1 = r.permutation(len(i1))[: (subsample / 2)]
mask_copy = numpy.zeros(mask.shape, mask.dtype)
mask_copy[i0[p0], j0[p0]] = 1
mask_copy[i1[p1], j1[p1]] = 2
if "prediction" in data_item:
prediction = data_item["prediction"]
if numpy.max(prediction[0, 0, :, :, 0]) > 0.5:
# Only do if prediction was done (otherwise all == 0)
for n in range(2):
p = prediction[0, 0, :, :, n]
bad = (p < 0.5) & (mask == n + 1)
mask_copy[i[bad], j[bad]] = n + 1
mask = mask_copy
project_labels[0, 0, :, :, 0] = mask
def handle_interaction(self, orig_labels, guide_image, image_set_number):
from cellprofiler.gui.editobjectsdlg import EditObjectsDialog
from wx import OK
title = "%s #%d, image cycle #%d: " % (
self.module_name,
self.module_num,
image_set_number,
)
title += (
"Create, remove and edit %s. Click Help for full instructions"
% self.object_name.value
)
with EditObjectsDialog(
guide_image, orig_labels, self.allow_overlap, title
) as dialog_box:
result = dialog_box.ShowModal()
if result != OK:
return None
return dialog_box.labels
def get_measurement_columns(self, pipeline):
"""Return information to use when creating database columns"""
orig_image_name = self.object_name.value
filtered_image_name = self.filtered_objects.value
columns = get_object_measurement_columns(filtered_image_name)
columns += [
(
orig_image_name,
FF_CHILDREN_COUNT % filtered_image_name,
COLTYPE_INTEGER,
),
(filtered_image_name, FF_PARENT % orig_image_name, COLTYPE_INTEGER,),
]
return columns
def get_object_dictionary(self):
"""Return the dictionary that's used by identify.get_object_*"""
return {self.filtered_objects.value: [self.object_name.value]}
def get_categories(self, pipeline, object_name):
"""Get the measurement categories produced by this module
pipeline - pipeline being run
object_name - fetch categories for this object
"""
categories = self.get_object_categories(
pipeline, object_name, self.get_object_dictionary()
)
return categories
def get_measurements(self, pipeline, object_name, category):
"""Get the measurement features produced by this module
pipeline - pipeline being run
object_name - fetch features for this object
category - fetch features for this category
"""
measurements = self.get_object_measurements(
pipeline, object_name, category, self.get_object_dictionary()
)
return measurements
def upgrade_settings(self, setting_values, variable_revision_number, module_name):
if variable_revision_number == 1:
# Added wants image + image
setting_values = setting_values + ["No", "None"]
variable_revision_number = 2
if variable_revision_number == 2:
# Added allow overlap, default = False
setting_values = setting_values + ["No"]
variable_revision_number = 3
if variable_revision_number == 3:
# Remove wants_outlines, outlines_name
setting_values = setting_values[:2] + setting_values[4:]
variable_revision_number = 4
return setting_values, variable_revision_number
| [
"numpy.sum",
"os.unlink",
"cellprofiler_core.setting.text.LabelName",
"wx.CheckBox",
"numpy.ones",
"numpy.arange",
"wx.RadioBox",
"numpy.prod",
"wx.lib.filebrowsebutton.FileBrowseButton",
"cellprofiler.gui.editobjectsdlg.EditObjectsDialog",
"os.path.exists",
"numpy.random.RandomState",
"cell... | [((3479, 3746), 'cellprofiler_core.setting.subscriber.LabelSubscriber', 'LabelSubscriber', (['"""Select the objects to be edited"""', '"""None"""'], {'doc': '"""Choose a set of previously identified objects\nfor editing, such as those produced by one of the\n**Identify** modules (e.g., "*IdentifyPrimaryObjects*", "*IdentifySecondaryObjects*" etc.)."""'}), '(\'Select the objects to be edited\', \'None\', doc=\n """Choose a set of previously identified objects\nfor editing, such as those produced by one of the\n**Identify** modules (e.g., "*IdentifyPrimaryObjects*", "*IdentifySecondaryObjects*" etc.)."""\n )\n', (3494, 3746), False, 'from cellprofiler_core.setting.subscriber import LabelSubscriber\n'), ((3819, 4014), 'cellprofiler_core.setting.text.LabelName', 'LabelName', (['"""Name the edited objects"""', '"""EditedObjects"""'], {'doc': '"""Enter the name for the objects that remain\nafter editing. These objects will be available for use by\nsubsequent modules."""'}), '(\'Name the edited objects\', \'EditedObjects\', doc=\n """Enter the name for the objects that remain\nafter editing. These objects will be available for use by\nsubsequent modules."""\n )\n', (3828, 4014), False, 'from cellprofiler_core.setting.text import LabelName\n'), ((5980, 6202), 'cellprofiler_core.setting.subscriber.ImageSubscriber', 'ImageSubscriber', (['"""Select the guiding image"""', '"""None"""'], {'doc': '"""*(Used only if a guiding image is desired)*\n\nThis is the image that will appear when editing objects. Choose an image\nsupplied by a previous module.\n"""'}), '(\'Select the guiding image\', \'None\', doc=\n """*(Used only if a guiding image is desired)*\n\nThis is the image that will appear when editing objects. Choose an image\nsupplied by a previous module.\n"""\n )\n', (5995, 6202), False, 'from cellprofiler_core.setting.subscriber import ImageSubscriber\n'), ((9444, 9453), 'cellprofiler_core.object.Objects', 'Objects', ([], {}), '()\n', (9451, 9453), False, 'from cellprofiler_core.object import Objects\n'), ((9583, 9628), 'numpy.zeros', 'numpy.zeros', (['(0, 3)', 'filtered_labels[0].dtype'], {}), '((0, 3), filtered_labels[0].dtype)\n', (9594, 9628), False, 'import numpy\n'), ((10652, 10721), 'cellprofiler_core.utilities.core.module.identify.add_object_count_measurements', 'add_object_count_measurements', (['m', 'filtered_objects_name', 'object_count'], {}), '(m, filtered_objects_name, object_count)\n', (10681, 10721), False, 'from cellprofiler_core.utilities.core.module.identify import add_object_count_measurements\n'), ((10781, 10848), 'cellprofiler_core.utilities.core.module.identify.add_object_location_measurements_ijv', 'add_object_location_measurements_ijv', (['m', 'filtered_objects_name', 'ijv'], {}), '(m, filtered_objects_name, ijv)\n', (10817, 10848), False, 'from cellprofiler_core.utilities.core.module.identify import add_object_location_measurements_ijv\n'), ((14594, 14615), 'bioformats.load_image', 'load_image', (['guidename'], {}), '(guidename)\n', (14604, 14615), False, 'from bioformats import load_image\n'), ((19510, 19561), 'cellprofiler_core.utilities.core.module.identify.get_object_measurement_columns', 'get_object_measurement_columns', (['filtered_image_name'], {}), '(filtered_image_name)\n', (19540, 19561), False, 'from cellprofiler_core.utilities.core.module.identify import get_object_measurement_columns\n'), ((8903, 8931), 'numpy.array', 'numpy.array', (['filtered_labels'], {}), '(filtered_labels)\n', (8914, 8931), False, 'import numpy\n'), ((9250, 9283), 'numpy.arange', 'numpy.arange', (['(1)', '(object_count + 1)'], {}), '(1, object_count + 1)\n', (9262, 9283), False, 'import numpy\n'), ((11850, 11865), 'wx.Dialog', 'wx.Dialog', (['None'], {}), '(None)\n', (11859, 11865), False, 'import wx\n'), ((11949, 11973), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (11960, 11973), False, 'import wx\n'), ((11998, 12024), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (12009, 12024), False, 'import wx\n'), ((12121, 12188), 'wx.RadioBox', 'wx.RadioBox', (['dlg'], {'style': 'wx.RA_VERTICAL', 'choices': "('New', 'Existing')"}), "(dlg, style=wx.RA_VERTICAL, choices=('New', 'Existing'))\n", (12132, 12188), False, 'import wx\n'), ((12310, 12512), 'wx.lib.filebrowsebutton.FileBrowseButton', 'FileBrowseButton', (['dlg'], {'size': '(300, -1)', 'fileMask': '"""Objects file (*.tif, *.tiff, *.png, *.bmp, *.jpg)|*.tif;*.tiff;*.png;*.bmp;*.jpg"""', 'dialogTitle': '"""Select objects file"""', 'labelText': '"""Objects file:"""'}), "(dlg, size=(300, -1), fileMask=\n 'Objects file (*.tif, *.tiff, *.png, *.bmp, *.jpg)|*.tif;*.tiff;*.png;*.bmp;*.jpg'\n , dialogTitle='Select objects file', labelText='Objects file:')\n", (12326, 12512), False, 'from wx.lib.filebrowsebutton import FileBrowseButton\n'), ((12968, 13173), 'wx.lib.filebrowsebutton.FileBrowseButton', 'FileBrowseButton', (['dlg'], {'size': '(300, -1)', 'fileMask': '"""Objects file (*.tif, *.tiff, *.png, *.bmp, *.jpg)|*.tif;*.tiff;*.png;*.bmp;*.jpg"""', 'dialogTitle': '"""Select guide image file"""', 'labelText': '"""Guide image:"""'}), "(dlg, size=(300, -1), fileMask=\n 'Objects file (*.tif, *.tiff, *.png, *.bmp, *.jpg)|*.tif;*.tiff;*.png;*.bmp;*.jpg'\n , dialogTitle='Select guide image file', labelText='Guide image:')\n", (12984, 13173), False, 'from wx.lib.filebrowsebutton import FileBrowseButton\n'), ((13365, 13413), 'wx.CheckBox', 'wx.CheckBox', (['dlg', '(-1)', '"""Allow objects to overlap"""'], {}), "(dlg, -1, 'Allow objects to overlap')\n", (13376, 13413), False, 'import wx\n'), ((13561, 13586), 'wx.StdDialogButtonSizer', 'wx.StdDialogButtonSizer', ([], {}), '()\n', (13584, 13586), False, 'import wx\n'), ((14627, 14649), 'numpy.min', 'numpy.min', (['guide_image'], {}), '(guide_image)\n', (14636, 14649), False, 'import numpy\n'), ((14653, 14675), 'numpy.max', 'numpy.max', (['guide_image'], {}), '(guide_image)\n', (14662, 14675), False, 'import numpy\n'), ((14953, 15040), 'cellprofiler.gui.editobjectsdlg.EditObjectsDialog', 'EditObjectsDialog', (['guide_image', 'labels', 'self.allow_overlap', 'self.object_name.value'], {}), '(guide_image, labels, self.allow_overlap, self.object_name\n .value)\n', (14970, 15040), False, 'from cellprofiler.gui.editobjectsdlg import EditObjectsDialog\n'), ((15255, 15317), 'wx.FileDialog', 'wx.FileDialog', (['None'], {'style': '(wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)'}), '(None, style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)\n', (15268, 15317), False, 'import wx\n'), ((16233, 16256), 'h5py.File', 'h5py.File', (['project_name'], {}), '(project_name)\n', (16242, 16256), False, 'import h5py\n'), ((16709, 16768), 'numpy.ones', 'numpy.ones', (['project_labels.shape[2:4]', 'project_labels.dtype'], {}), '(project_labels.shape[2:4], project_labels.dtype)\n', (16719, 16768), False, 'import numpy\n'), ((16980, 17002), 'numpy.prod', 'numpy.prod', (['mask.shape'], {}), '(mask.shape)\n', (16990, 17002), False, 'import numpy\n'), ((19018, 19088), 'cellprofiler.gui.editobjectsdlg.EditObjectsDialog', 'EditObjectsDialog', (['guide_image', 'orig_labels', 'self.allow_overlap', 'title'], {}), '(guide_image, orig_labels, self.allow_overlap, title)\n', (19035, 19088), False, 'from cellprofiler.gui.editobjectsdlg import EditObjectsDialog\n'), ((13734, 13758), 'wx.Button', 'wx.Button', (['dlg', 'wx.ID_OK'], {}), '(dlg, wx.ID_OK)\n', (13743, 13758), False, 'import wx\n'), ((13784, 13812), 'wx.Button', 'wx.Button', (['dlg', 'wx.ID_CANCEL'], {}), '(dlg, wx.ID_CANCEL)\n', (13793, 13812), False, 'import wx\n'), ((14234, 14256), 'cellprofiler_core.utilities.pathname.pathname2url', 'pathname2url', (['fullname'], {}), '(fullname)\n', (14246, 14256), False, 'from cellprofiler_core.utilities.pathname import pathname2url\n'), ((14915, 14938), 'numpy.zeros', 'numpy.zeros', (['shape', 'int'], {}), '(shape, int)\n', (14926, 14938), False, 'import numpy\n'), ((16475, 16579), 'wx.MessageBox', 'wx.MessageBox', (["('Sorry, could not find the file, %s, in the project, %s' % (guidename,\n project_name))"], {}), "('Sorry, could not find the file, %s, in the project, %s' % (\n guidename, project_name))\n", (16488, 16579), False, 'import wx\n'), ((17056, 17082), 'numpy.random.RandomState', 'numpy.random.RandomState', ([], {}), '()\n', (17080, 17082), False, 'import numpy\n'), ((17858, 17893), 'numpy.zeros', 'numpy.zeros', (['mask.shape', 'mask.dtype'], {}), '(mask.shape, mask.dtype)\n', (17869, 17893), False, 'import numpy\n'), ((8165, 8187), 'numpy.min', 'numpy.min', (['guide_image'], {}), '(guide_image)\n', (8174, 8187), False, 'import numpy\n'), ((9717, 9770), 'numpy.column_stack', 'numpy.column_stack', (['(i[l != 0], j[l != 0], l[l != 0])'], {}), '((i[l != 0], j[l != 0], l[l != 0]))\n', (9735, 9770), False, 'import numpy\n'), ((14718, 14740), 'numpy.min', 'numpy.min', (['guide_image'], {}), '(guide_image)\n', (14727, 14740), False, 'import numpy\n'), ((14762, 14784), 'numpy.max', 'numpy.max', (['guide_image'], {}), '(guide_image)\n', (14771, 14784), False, 'import numpy\n'), ((14787, 14809), 'numpy.min', 'numpy.min', (['guide_image'], {}), '(guide_image)\n', (14796, 14809), False, 'import numpy\n'), ((15912, 15936), 'os.path.exists', 'os.path.exists', (['fullname'], {}), '(fullname)\n', (15926, 15936), False, 'import os\n'), ((8235, 8257), 'numpy.min', 'numpy.min', (['guide_image'], {}), '(guide_image)\n', (8244, 8257), False, 'import numpy\n'), ((8283, 8305), 'numpy.max', 'numpy.max', (['guide_image'], {}), '(guide_image)\n', (8292, 8305), False, 'import numpy\n'), ((8308, 8330), 'numpy.min', 'numpy.min', (['guide_image'], {}), '(guide_image)\n', (8317, 8330), False, 'import numpy\n'), ((9165, 9189), 'numpy.max', 'numpy.max', (['unique_labels'], {}), '(unique_labels)\n', (9174, 9189), False, 'import numpy\n'), ((15962, 15981), 'os.unlink', 'os.unlink', (['fullname'], {}), '(fullname)\n', (15971, 15981), False, 'import os\n'), ((17106, 17121), 'numpy.sum', 'numpy.sum', (['mask'], {}), '(mask)\n', (17115, 17121), False, 'import numpy\n'), ((18112, 18148), 'numpy.max', 'numpy.max', (['prediction[0, 0, :, :, 0]'], {}), '(prediction[0, 0, :, :, 0])\n', (18121, 18148), False, 'import numpy\n')] |
import subprocess
import numpy as np
import time
__all__ = [
"all_gpus_currently_idle",
"all_gpus_idle",
"shutdown_when_all_gpus_idle",
"shutdown_when_all_gpus_idle_call",
"shutdown_timed",
"shutdown_timed_call",
]
def as_int(x, default=0):
"""Convert a thing to an integer.
Args:
x (object): Thing to convert.
default (int, optional): Default value to return in case the conversion fails.
Returns:
int: `x` as an integer.
"""
try:
return int(x)
except ValueError:
return default
def all_gpus_currently_idle():
"""Check if all GPUs are now idle.
Returns:
bool: `True` if all GPUs are idle, `False` if not.
"""
p = subprocess.Popen(
[
"nvidia-smi",
"--query-gpu=utilization.gpu",
"--format=csv,noheader,nounits",
],
stdout=subprocess.PIPE,
)
res, _ = p.communicate()
utilisations = np.array([as_int(x.decode()) for x in res.splitlines()])
idle = np.all(utilisations == 0)
return idle
def all_gpus_idle(duration=120):
"""Check if all GPUs are idle for a while.
Args:
duration (int, optional): Number of seconds to check. Defaults to two minutes.
Returns:
bool: `True` if all GPUs are idle for a while, `False` if not.
"""
start = time.time()
while time.time() < start + duration:
if not all_gpus_currently_idle():
return False
time.sleep(0.1)
return True
def shutdown():
"""Shutdown."""
subprocess.call(["shutdown", "-h", "now"])
def shutdown_when_all_gpus_idle(duration=120):
"""Shutdown when all GPUs are idle for a while.
Args:
duration (int, optional): Number of seconds to check the GPUs. Defaults to two
minutes.
"""
while True:
if all_gpus_idle(duration=duration):
shutdown()
time.sleep(60)
def shutdown_when_all_gpus_idle_call(duration=120):
"""Like :func:`.shutdown_when_all_gpus_idle`, but returns the call as a string.
Returns:
str: Call.
"""
return f"shutdown_when_all_gpus_idle(duration={duration})"
def shutdown_timed(duration=120):
"""Shutdown after a while.
Args:
duration (int, optional): Number of seconds to wait before shutting down.
"""
time.sleep(duration)
shutdown()
def shutdown_timed_call(duration=120):
"""Like :func:`.shutdown_timed`, but returns the call as a string.
Returns:
str: Call.
"""
return f"shutdown_timed(duration={duration})"
| [
"subprocess.Popen",
"time.sleep",
"time.time",
"subprocess.call",
"numpy.all"
] | [((734, 858), 'subprocess.Popen', 'subprocess.Popen', (["['nvidia-smi', '--query-gpu=utilization.gpu', '--format=csv,noheader,nounits']"], {'stdout': 'subprocess.PIPE'}), "(['nvidia-smi', '--query-gpu=utilization.gpu',\n '--format=csv,noheader,nounits'], stdout=subprocess.PIPE)\n", (750, 858), False, 'import subprocess\n'), ((1041, 1066), 'numpy.all', 'np.all', (['(utilisations == 0)'], {}), '(utilisations == 0)\n', (1047, 1066), True, 'import numpy as np\n'), ((1368, 1379), 'time.time', 'time.time', ([], {}), '()\n', (1377, 1379), False, 'import time\n'), ((1571, 1613), 'subprocess.call', 'subprocess.call', (["['shutdown', '-h', 'now']"], {}), "(['shutdown', '-h', 'now'])\n", (1586, 1613), False, 'import subprocess\n'), ((2363, 2383), 'time.sleep', 'time.sleep', (['duration'], {}), '(duration)\n', (2373, 2383), False, 'import time\n'), ((1390, 1401), 'time.time', 'time.time', ([], {}), '()\n', (1399, 1401), False, 'import time\n'), ((1497, 1512), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1507, 1512), False, 'import time\n'), ((1934, 1948), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (1944, 1948), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
MuLES client example: data_two_intervals
This example shows the utilization of MuLES to:
- Start acquisition of data from one device
- Get data from MuLES during two periods of 15 and 10 seconds
- Send triggers that will be reflected in the saved data
The scrip is divided as follows
1. Connection with MuLES, and retrieve EEG data parameters
2. A trigger is sent (trigger = 10) + beep
3. Request 15 seconds of EEG data
4. A trigger is sent (trigger = 20) + beep
5. Request 10 seconds of EEG data
6. A trigger is sent (trigger = 30) + beep
7. Acquisition is finished
8. Plot acquired signals
Instructions:
(MuLES and the Client are expected to be in the same computer, if that is not
the case, modify ip address, in Section 1 of this script)
1 Run MuLES
2 Select your device
(Alternatively you can select FILE and the example recording:
log20141210_195303.csv)
3 Select Streamming and Logging
(In casse of reading from a File, You cannot change these options)
4 Click on PLAY
5 Run this script
"""
import mules # The signal acquisition toolbox we'll use (MuLES)
import numpy as np # Module that simplifies computations on matrices
import matplotlib.pyplot as plt # Module used for plotting
if __name__ == "__main__":
plt.close('all')
# 1. Acquisition is started
# creates mules_client object and:
mules_client = mules.MulesClient('127.0.0.1', 30000) # connects with MuLES at 127.0.0.1 : 30000
device_name = mules_client.getdevicename() # get device name
channel_names = mules_client.getnames() # get channel names
fs = 1.0 * mules_client.getfs() # get sampling frequency
# 2. Sending trigger 10
mules_client.sendtrigger(10)
mules_client.tone(600,250)
# 3. Request 15 seconds of EEG data
eeg_data_1 = mules_client.getdata(15)
# 4. Sending trigger 20
mules_client.sendtrigger(20)
mules_client.tone(600,250)
# 5. Request 10 seconds of EEG data
eeg_data_2 = mules_client.getdata(10)
# 6. Sending trigger 30
mules_client.sendtrigger(30)
mules_client.tone(900,250)
# 7. Close connection with MuLES
mules_client.disconnect()
# 8. Plot results
time_vector_1 = np.arange(0,eeg_data_1.shape[0]) / fs
time_vector_2 = np.arange(0,eeg_data_2.shape[0]) / fs
channel = 4;
h, axarr = plt.subplots(2, sharex=True)
h.canvas.set_window_title('EEG data from: ' + device_name + '. Electrode: ' + channel_names[channel-1] )
axarr[0].plot(time_vector_1, eeg_data_1[:,channel - 1])
axarr[1].plot(time_vector_2, eeg_data_2[:,channel - 1])
| [
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"numpy.arange",
"mules.MulesClient"
] | [((1302, 1318), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1311, 1318), True, 'import matplotlib.pyplot as plt\n'), ((1418, 1455), 'mules.MulesClient', 'mules.MulesClient', (['"""127.0.0.1"""', '(30000)'], {}), "('127.0.0.1', 30000)\n", (1435, 1455), False, 'import mules\n'), ((2436, 2464), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'sharex': '(True)'}), '(2, sharex=True)\n', (2448, 2464), True, 'import matplotlib.pyplot as plt\n'), ((2307, 2340), 'numpy.arange', 'np.arange', (['(0)', 'eeg_data_1.shape[0]'], {}), '(0, eeg_data_1.shape[0])\n', (2316, 2340), True, 'import numpy as np\n'), ((2366, 2399), 'numpy.arange', 'np.arange', (['(0)', 'eeg_data_2.shape[0]'], {}), '(0, eeg_data_2.shape[0])\n', (2375, 2399), True, 'import numpy as np\n')] |
import os
import numpy as np
from collections import namedtuple
from pathlib import Path
from typing import Tuple, List, Dict, Optional
from algorithms.actor_critic import ActorCriticParams
from algorithms.discriminator import DiscrimParams
class Count:
def __init__(self, name: str, datatype: type, save_path: Path):
self.name = name
self.datatype = datatype
self.count_save_path = save_path / f"{name}.npy"
if self.count_save_path.exists():
print(f"Loading count: {name}")
try:
self.value = np.load(f"{self.count_save_path}")
except ValueError:
print(self.count_save_path)
self.value = np.load(f"{self.count_save_path}", allow_pickle=True)
else:
self.value = 0 # Count value
def record_count(self, counted_data):
"""
Adds `counted_data` to current count and saves the current count value.
Args:
counted_data: Data to be added to the current count.
"""
assert type(counted_data) == self.datatype
self.value += counted_data
def save_count(self, verbose: bool):
if verbose:
print(f"Saving count: {self.name} at {self.count_save_path}")
np.save(f"{self.count_save_path}", self.value)
class Plot:
def __init__(self, name: str, datatype: type, save_path: Path, max_plot_size: int):
self.name = name
self.datatype = datatype
self.plot_save_path = save_path / self.name
self.max_plot_size = max_plot_size
if (self.plot_save_path / self._get_filename(1)).exists():
print(f"Loading plot: {name}")
self.file_num = self._find_newest_plot()
self.plot = list(
np.load(
f"{self.plot_save_path}/{self._get_filename(self.file_num)}",
allow_pickle=True,
)
)
if len(self.plot) == self.max_plot_size:
del self.plot[:]
self.file_num += 1
else:
self.plot_save_path.mkdir(parents=True, exist_ok=True)
self.plot = []
self.file_num = 1
def _get_filename(self, number: int):
return f"{self.name}_{number}.npy"
def _find_newest_plot(self):
newest_plot_number = 1
while (self.plot_save_path / self._get_filename(newest_plot_number)).exists():
newest_plot_number += 1
return newest_plot_number - 1
def record_plot_data(self, data_entry, verbose: bool):
if not type(data_entry) == self.datatype:
data_entry = data_entry.astype(self.datatype)
if type(data_entry) == np.ndarray:
assert data_entry.dtype == self.datatype
else:
assert type(data_entry) == self.datatype
self.plot.append(data_entry)
if len(self.plot) >= self.max_plot_size:
self.save_plot(verbose)
del self.plot[:]
self.file_num += 1
return self.name
else:
return None
def save_plot(self, verbose: bool):
if verbose:
print(
f"Saving plot: {self.name} at {self.plot_save_path}/{self._get_filename(self.file_num)}"
)
np.save(
f"{self.plot_save_path}/{self._get_filename(self.file_num)}",
np.array(self.plot),
)
class Plotter:
"""
Object that handles Plots and Counts (all data that is saved from training runs
except the neural network params) and neural network params to plot over time.
"""
def __init__(
self,
network_params: namedtuple,
save_path: Path,
plots: List[Tuple],
counts: List[Tuple],
max_plot_size: int,
param_plot_num: int,
state_dim: Tuple,
action_space: Optional[int] = None,
discrim_params: Optional[DiscrimParams] = None,
verbose: bool = False,
):
self.verbose = verbose
self.using_value = type(network_params) == ActorCriticParams
self.using_discrim = discrim_params is not None
self.save_path = save_path
self.param_save = self.save_path / "params"
self.param_names_array = []
self.param_x_array = []
self.param_y_array = []
if self.param_save.exists():
self._load_params()
else:
self._determine_plotted_params(
network_params, param_plot_num, state_dim, action_space, discrim_params
)
self._save_params()
plots += [(name, np.ndarray) for name in self.param_names_array]
self.plots = [
Plot(name, datatype, save_path, max_plot_size) for name, datatype in plots
]
self.counts = [Count(name, datatype, save_path) for name, datatype in counts]
def _load_params(self):
self.param_names_array = np.load(f"{self.param_save}/param_names_array.npy")
self.param_x_array = np.load(f"{self.param_save}/param_x_array.npy")
self.param_y_array = np.load(f"{self.param_save}/param_y_array.npy")
def _save_params(self):
"""
Save params (NOT PLOTS) so param plots can pick up where they left off when
restarting an interrupted training run.
"""
self.param_save.mkdir(parents=True)
np.save(f"{self.param_save}/param_names_array.npy", self.param_names_array)
np.save(f"{self.param_save}/param_x_array.npy", self.param_x_array)
np.save(f"{self.param_save}/param_y_array.npy", self.param_y_array)
def _determine_plotted_params(
self,
network_params: namedtuple,
param_plot_num: int,
state_dim: Tuple,
action_space: Optional[int],
discrim_params: Optional[DiscrimParams],
):
"""
Randomly chooses neural network parameters to be plotted. The number from each
layer which are plotted is `param_plot_num`.
Args:
network_params: Network params namedtuple. Specifies number of layers etc.
param_plot_num: Number of params to plot per layer.
state_dim: Tuple specifying the dimension of the state space.
"""
num_shared = (
network_params.num_shared_layers
if (self.using_value and network_params.num_shared_layers is not None)
else 0
)
prev_layer_size = np.prod(state_dim)
for count, layer in enumerate(network_params.actor_layers):
layer_type = "shared" if count < num_shared else "actor"
layer_num = (
int(2 * (count))
if count < num_shared
else int(2 * (count - num_shared))
)
layer_name = f"{layer_type}_layers.{layer_num}.weight"
self._sample_params(layer_name, layer, prev_layer_size, param_plot_num)
prev_layer_size = layer
if self.using_value:
prev_layer_size = (
np.prod(state_dim)
if num_shared == 0
else network_params.critic_layers[num_shared - 1]
)
for count, layer in enumerate(network_params.critic_layers[num_shared:]):
layer_name = f"critic_layers.{int(2 * count)}.weight"
self._sample_params(layer_name, layer, prev_layer_size, param_plot_num)
prev_layer_size = layer
elif self.using_discrim:
prev_layer_size = np.prod(state_dim) + action_space
for count, layer in enumerate(discrim_params.hidden_layers):
layer_name = f"discrim_layers.{int(2 * count)}.weight"
self._sample_params(layer_name, layer, prev_layer_size, param_plot_num)
prev_layer_size = layer
self.param_names_array = np.array(self.param_names_array)
self.param_x_array = np.array(self.param_x_array)
self.param_y_array = np.array(self.param_y_array)
def determine_demo_nums(self, demo_path: Path, num_demos) -> np.ndarray:
"""
Only used for imitation learning to pick demo numbers to use.
Args:
demo_path: The path to the demo files.
num_demos: The number of demos to use.
"""
demo_nums_save = self.param_save / "demo_nums.npy"
if demo_nums_save.exists():
demo_nums = np.load(f"{demo_nums_save}")
else:
demo_nums = np.random.choice(
os.listdir(f"{demo_path}"), num_demos, replace=False
)
np.save(f"{demo_nums_save}", demo_nums)
print(f"Number of demos: {len(demo_nums)}")
print(f"Demos used: {demo_nums}")
return demo_nums
def _sample_params(self, layer_name, layer, prev_layer_size, param_plot_num):
self.param_names_array.append(layer_name)
self.param_x_array.append(
np.random.randint(low=0, high=layer, size=param_plot_num)
)
self.param_y_array.append(
np.random.randint(low=0, high=prev_layer_size, size=param_plot_num)
)
def get_param_plot_nums(self):
return self.param_names_array, self.param_x_array, self.param_y_array
def record_data(self, data_dict: Dict):
"""
Records data - either Plots or Counts.
Args:
data_dict: dictionary of name: data for each plot/count to be updated.
"""
saved_plots = []
for name, data in data_dict.items():
recorded = False
for plot in self.plots:
if plot.name == name:
saved_plot = plot.record_plot_data(data, self.verbose)
recorded = True
if saved_plot is not None:
saved_plots.append(saved_plot)
if not recorded:
for count in self.counts:
if count.name == name:
count.record_count(data)
recorded = True
if not recorded:
raise ValueError("Name doesn't match any registered plots or counts")
if not saved_plots == []:
print(f"Saving plots: {saved_plots}")
def save_plots(self):
"""Saves data - both plots and counts."""
for plot in self.plots:
plot.save_plot(self.verbose)
for count in self.counts:
count.save_count(self.verbose)
def get_count(self, count_name: str):
"""
Returns value of requested count.
Args:
count_name: Name of the count to be retrieved.
Returns:
The value of the count with `count_name`.
"""
for count in self.counts:
if count.name == count_name:
return count.value
raise FileNotFoundError("Count with name `count_name` has not been found.")
| [
"numpy.load",
"numpy.save",
"numpy.random.randint",
"numpy.array",
"os.listdir",
"numpy.prod"
] | [((1316, 1362), 'numpy.save', 'np.save', (['f"""{self.count_save_path}"""', 'self.value'], {}), "(f'{self.count_save_path}', self.value)\n", (1323, 1362), True, 'import numpy as np\n'), ((5111, 5162), 'numpy.load', 'np.load', (['f"""{self.param_save}/param_names_array.npy"""'], {}), "(f'{self.param_save}/param_names_array.npy')\n", (5118, 5162), True, 'import numpy as np\n'), ((5193, 5240), 'numpy.load', 'np.load', (['f"""{self.param_save}/param_x_array.npy"""'], {}), "(f'{self.param_save}/param_x_array.npy')\n", (5200, 5240), True, 'import numpy as np\n'), ((5271, 5318), 'numpy.load', 'np.load', (['f"""{self.param_save}/param_y_array.npy"""'], {}), "(f'{self.param_save}/param_y_array.npy')\n", (5278, 5318), True, 'import numpy as np\n'), ((5564, 5639), 'numpy.save', 'np.save', (['f"""{self.param_save}/param_names_array.npy"""', 'self.param_names_array'], {}), "(f'{self.param_save}/param_names_array.npy', self.param_names_array)\n", (5571, 5639), True, 'import numpy as np\n'), ((5649, 5716), 'numpy.save', 'np.save', (['f"""{self.param_save}/param_x_array.npy"""', 'self.param_x_array'], {}), "(f'{self.param_save}/param_x_array.npy', self.param_x_array)\n", (5656, 5716), True, 'import numpy as np\n'), ((5726, 5793), 'numpy.save', 'np.save', (['f"""{self.param_save}/param_y_array.npy"""', 'self.param_y_array'], {}), "(f'{self.param_save}/param_y_array.npy', self.param_y_array)\n", (5733, 5793), True, 'import numpy as np\n'), ((6662, 6680), 'numpy.prod', 'np.prod', (['state_dim'], {}), '(state_dim)\n', (6669, 6680), True, 'import numpy as np\n'), ((8097, 8129), 'numpy.array', 'np.array', (['self.param_names_array'], {}), '(self.param_names_array)\n', (8105, 8129), True, 'import numpy as np\n'), ((8160, 8188), 'numpy.array', 'np.array', (['self.param_x_array'], {}), '(self.param_x_array)\n', (8168, 8188), True, 'import numpy as np\n'), ((8219, 8247), 'numpy.array', 'np.array', (['self.param_y_array'], {}), '(self.param_y_array)\n', (8227, 8247), True, 'import numpy as np\n'), ((3516, 3535), 'numpy.array', 'np.array', (['self.plot'], {}), '(self.plot)\n', (3524, 3535), True, 'import numpy as np\n'), ((8668, 8696), 'numpy.load', 'np.load', (['f"""{demo_nums_save}"""'], {}), "(f'{demo_nums_save}')\n", (8675, 8696), True, 'import numpy as np\n'), ((8853, 8892), 'numpy.save', 'np.save', (['f"""{demo_nums_save}"""', 'demo_nums'], {}), "(f'{demo_nums_save}', demo_nums)\n", (8860, 8892), True, 'import numpy as np\n'), ((9200, 9257), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'layer', 'size': 'param_plot_num'}), '(low=0, high=layer, size=param_plot_num)\n', (9217, 9257), True, 'import numpy as np\n'), ((9318, 9385), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'prev_layer_size', 'size': 'param_plot_num'}), '(low=0, high=prev_layer_size, size=param_plot_num)\n', (9335, 9385), True, 'import numpy as np\n'), ((590, 624), 'numpy.load', 'np.load', (['f"""{self.count_save_path}"""'], {}), "(f'{self.count_save_path}')\n", (597, 624), True, 'import numpy as np\n'), ((7261, 7279), 'numpy.prod', 'np.prod', (['state_dim'], {}), '(state_dim)\n', (7268, 7279), True, 'import numpy as np\n'), ((8772, 8798), 'os.listdir', 'os.listdir', (['f"""{demo_path}"""'], {}), "(f'{demo_path}')\n", (8782, 8798), False, 'import os\n'), ((732, 785), 'numpy.load', 'np.load', (['f"""{self.count_save_path}"""'], {'allow_pickle': '(True)'}), "(f'{self.count_save_path}', allow_pickle=True)\n", (739, 785), True, 'import numpy as np\n'), ((7751, 7769), 'numpy.prod', 'np.prod', (['state_dim'], {}), '(state_dim)\n', (7758, 7769), True, 'import numpy as np\n')] |
"""Data loader"""
import random
import numpy as np
import os
import sys
from tqdm import tqdm
import torch
from bert import BertTokenizer
import json
# import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')
# torch.set_printoptions(threshold=sys.maxsize)
# WHITESPACE_PLACEHOLDER = ' □ '
qmark_placeholder = '[unused21]'
uri_placeholder = '[unused22]'
def spo_list2triplets_dict(spo_list):
'''get sub: [pre, obj] dict'''
triplets_dict = {}
for spo in spo_list:
sub = spo['subject'].strip().lower()
pre = spo['predicate']
obj = spo['object'].strip().lower()
if sub in triplets_dict:
triplets_dict[sub].append((pre, obj))
else:
triplets_dict[sub] = [(pre, obj)]
return triplets_dict
def spo_list2triplets(spo_list):
triplets = set()
for spo in spo_list:
sub = spo['subject'].strip().lower()
pre = spo['predicate']
obj = spo['object'].strip().lower()
triplets.add((sub, pre, obj))
return triplets
def replace_placeholder(text):
return text.replace('?x', qmark_placeholder).replace('?uri', uri_placeholder) # uri, for the first 3 dataset uri, ?y --- >uri
# return text
def replace_placeholder_rnn(text):
return text.replace("?x", "xxxxx").replace("?uri", "yyyyy")
import unicodedata
from nltk.tokenize.treebank import TreebankWordTokenizer
class NLTKTokenizer(object):
def __init__(self):
self.tokenizer = TreebankWordTokenizer()
self.vocab_size = 0
self.word2idx = {}
self.idx2word = {}
def tokenize(self, text):
return self.processed_text(text).split()
def set_vocab(self, vocab_size, word2idx, idx2word):
self.vocab_size = vocab_size
self.word2idx = word2idx
self.idx2word = idx2word
def convert_tokens_to_ids(self, tokens):
ids = []
for tok in tokens:
if tok in self.word2idx:
ids.append(self.word2idx[tok])
else:
ids.append(self.word2idx["<unk>"])
return ids
def processed_text(self, text, to_strip_accents=False):
text = text.replace('\\\\', '')
if to_strip_accents:
stripped = strip_accents(text.lower())
else:
stripped = text.lower() # .lower()
toks = self.tokenizer.tokenize(stripped)
return " ".join(toks)
def strip_accents(self, text):
return ''.join(c for c in unicodedata.normalize('NFKD', text)
if unicodedata.category(c) != 'Mn')
class DataLoader(object):
def __init__(self, args):
self.data_dir = args.data_dir
self.pre2idx, self.idx2pre = self.load_predicates()
args.idx2pre = self.idx2pre
self.encoder_type = args.encoder_type
if self.encoder_type == "bert":
self.tokenizer = BertTokenizer.from_pretrained(args.bert_model_dir, do_lower_case=True)
elif self.encoder_type == "rnn":
self.tokenizer = NLTKTokenizer()
self.max_len = args.max_len
self.device = args.device
self.batch_size = args.batch_size
self.vocab_size = 0
self.word2idx = {}
self.idx2word = {}
self.build_vocab(["train", "test"])
def load_predicates(self):
pres = ['Nan']
#todo
with open(os.path.join(self.data_dir, 'schemas'), 'r', encoding='utf-8') as f:
for i, line in enumerate(f):
pre = json.loads(line)['predicate']
if pre not in pres:
pres.append(pre)
pre2idx = {pre:idx for idx, pre in enumerate(pres)}
idx2pre = {idx:pre for idx, pre in enumerate(pres)}
return pre2idx, idx2pre
def build_vocab(self, data_types):
print("building vocabulary ...")
vocab_size = 0
word2idx = {}
idx2word = {}
# add special tokens
special_tokens = ["<pad>", "<unk>", "[CLS]", "[SEP]", "xxxxx", "yyyyy"]
for word in special_tokens:
if word not in word2idx:
word2idx[word] = vocab_size
idx2word[vocab_size] = word
vocab_size += 1
for data_type in data_types:
with open(os.path.join(self.data_dir, "{}_data.json".format(data_type)), 'r', encoding='utf-8') as f:
for line in tqdm(f):
sample = json.loads(line)
for word in self.tokenizer.tokenize(sample['text'].lower()):
if word not in word2idx:
word2idx[word] = vocab_size
idx2word[vocab_size] = word
vocab_size += 1
self.vocab_size = vocab_size
self.word2idx = word2idx
self.idx2word = idx2word
self.tokenizer.set_vocab(vocab_size, word2idx, idx2word)
print("Done.")
def load_data(self, data_type, max_len=300, repeat_multi_sub=False, encoder_type="bert"):
data = []
with open(os.path.join(self.data_dir, '%s_data.json'%data_type), 'r', encoding='utf-8') as f:
for line in tqdm(f):
data_sample = {'sub': {'tokens': [], 'spans': ([], []), 'weight': 0}, 'obj': {}, 'gold_triplets': set(), 'sent': None, 'id': None}
sample = json.loads(line)
data_sample['sent'] = sample['text']
data_sample['id'] = sample['id']
#print("sample:", sample)
#print("max_len:", max_len)
text = sample['text'].lower()[:max_len]
spo_list = sample['spo_list'] if data_type not in ['test', 'test_debug', 'test_final', 'corrected_questions-verb-copy', 'corrected_questions-corrected-copy'] else []
triplets_dict = spo_list2triplets_dict(spo_list)
data_sample['gold_triplets'] = spo_list2triplets(spo_list)
if encoder_type == "bert":
tokens = ['[CLS]'] + self.tokenizer.tokenize(replace_placeholder(text), inference=True) + ['[SEP]']
print("tokens:", tokens)
print(hello)
elif encoder_type == "rnn":
tokens = ['[CLS]'] + self.tokenizer.tokenize(replace_placeholder_rnn(text)) + ['[SEP]']
# print('text:',text, "tokens:", tokens)
data_sample['sub']['tokens'] = tokens
data_sample['sub']['weight'] = len(spo_list)
used_spans = set()
if len(spo_list) == 0 and data_type not in ['test', 'test_debug', 'test_final','corrected_questions-verb-copy', 'corrected_questions-corrected-copy']:
continue
else:
for sub in triplets_dict:
if encoder_type == "bert":
sub_tokens = self.tokenizer.tokenize(replace_placeholder(sub), inference=True)
elif encoder_type == "rnn":
sub_tokens = self.tokenizer.tokenize(replace_placeholder_rnn(sub))
used_spans, span = self._find_span(used_spans, tokens, sub_tokens)
if span:
# print(data_sample, span[0], span[1], len(tokens))
assert span[0] < len(tokens) and span[1] > 0
data_sample['sub']['spans'][0].append(span[0])
data_sample['sub']['spans'][1].append(span[1]-1)
for sub in triplets_dict:
if repeat_multi_sub:
data_sample['obj'] = {}
data_sample['obj'][sub] = {'query_tokens': [], 'token_types': [], 'spans': ([], []), 'weight': 0}
if encoder_type == "bert":
query_tokens_sub = self.tokenizer.tokenize(replace_placeholder(sub), inference=True)
elif encoder_type == "rnn":
query_tokens_sub = self.tokenizer.tokenize(replace_placeholder_rnn(sub))
query_tokens = ['[CLS]'] + query_tokens_sub + ['[SEP]'] + tokens[1:]
token_types = [0] + [0]*len(query_tokens_sub) + [0] + [1]*len(tokens[1:])
assert len(query_tokens) == len(token_types)
data_sample['obj'][sub]['query_tokens'] = query_tokens
data_sample['obj'][sub]['weight'] = len(triplets_dict[sub])
data_sample['obj'][sub]['token_types'] = token_types
used_spans = set() #todo reset used_spans( are used)
for pre, obj in triplets_dict[sub]:
if encoder_type == "bert":
obj_tokens = self.tokenizer.tokenize(replace_placeholder(obj), inference=True)
elif encoder_type == "rnn":
obj_tokens = self.tokenizer.tokenize(replace_placeholder_rnn(obj))
# if obj == 'anaheim, california':
# print(used_spans)
used_spans, span = self._find_span(used_spans, query_tokens, obj_tokens)
# if obj == 'anaheim, california':
# print(used_spans)
# print(obj_tokens, query_tokens, span)
if span:
assert span[0] < len(query_tokens) and span[1] > 0
data_sample['obj'][sub]['spans'][0].append((span[0], self.pre2idx[pre]))
data_sample['obj'][sub]['spans'][1].append(span[1]-1)
if repeat_multi_sub:
data.append(data_sample)
if not repeat_multi_sub:
data.append(data_sample)
# print('data', data_sample)
return data
def data_iterator(self, data, batch_size, seed=None, is_train=False, shuffle=False):
# make a list that decides the order in which we go over the data- this avoids explicit shuffling of data
data_size = len(data)
order = list(range(data_size))
if shuffle:
random.seed(seed)
random.shuffle(order)
# print(data_size , batch_size)
for i in range(data_size // batch_size):
batch_data = [data[idx] for idx in order[i*batch_size: (i+1)*batch_size]]
# subject task
## max_len of sub task
batch_sub_lengths = [len(data_sample['sub']['tokens']) for data_sample in batch_data]
max_len_sub_tokens = max(batch_sub_lengths)
## subtask data
batch_tokens = np.zeros((batch_size, max_len_sub_tokens))
if is_train:
batch_sub_heads = np.zeros((batch_size, max_len_sub_tokens))
batch_sub_tails = np.zeros((batch_size, max_len_sub_tokens))
batch_sub_weights = np.zeros(batch_size)
# object task
## max_len of obj task
batch_subs = [random.choice(list(data_sample['obj'].keys())) for data_sample in batch_data] # random pick a subj key
batch_obj_lengths = [len(data_sample['obj'][batch_subs[i]]['query_tokens']) for i, data_sample in enumerate(batch_data)]
# print('batch subjs', batch_subs)
max_len_obj_tokens = max(batch_obj_lengths)
## objtask data
batch_query_tokens = np.zeros((batch_size, max_len_obj_tokens))
batch_token_types = np.zeros((batch_size, max_len_obj_tokens))
batch_obj_heads = np.zeros((batch_size, max_len_obj_tokens, len(self.pre2idx)))
batch_obj_tails = np.zeros((batch_size, max_len_obj_tokens))
batch_obj_weights = np.zeros(batch_size) # gai
for i, data_sample in enumerate(batch_data):
print('\n', data_sample)
batch_tokens[i, :batch_sub_lengths[i]] = self.tokenizer.convert_tokens_to_ids(data_sample['sub']['tokens'])
print("tokens:", batch_tokens[i, :batch_sub_lengths[i]])
input()
if is_train:
batch_sub_heads[i, data_sample['sub']['spans'][0]] = 1 # todo check if add subjs not sampled -> yes
batch_sub_tails[i, data_sample['sub']['spans'][1]] = 1
# add other subject terms which are not sampled
# print('batch sub heads', batch_sub_heads)
batch_sub_weights[i] = data_sample['sub']['weight']
# object predicate task, todo check if add objs and predicates from the same subjs
sub = batch_subs[i]
batch_query_tokens[i, :batch_obj_lengths[i]] = self.tokenizer.convert_tokens_to_ids(data_sample['obj'][sub]['query_tokens'])
batch_token_types[i, :batch_obj_lengths[i]] = data_sample['obj'][sub]['token_types']
#print("data_sample:", data_sample)
# print(hello)
batch_obj_heads[i, [tup[0] for tup in data_sample['obj'][sub]['spans'][0]], [tup[1] for tup in data_sample['obj'][sub]['spans'][0]]] = 1
batch_obj_tails[i, data_sample['obj'][sub]['spans'][1]] = 1
batch_obj_weights[i] = data_sample['obj'][sub]['weight']
# print('batch obj tails', batch_obj_tails)
# to tensor
batch_tokens = torch.tensor(batch_tokens, dtype=torch.long).to(self.device)
if is_train:
batch_sub_heads = torch.tensor(batch_sub_heads, dtype=torch.float).to(self.device)
batch_sub_tails = torch.tensor(batch_sub_tails, dtype=torch.float).to(self.device)
batch_sub_weights = torch.tensor(batch_sub_weights, dtype=torch.float).to(self.device)
# to tensor
batch_query_tokens = torch.tensor(batch_query_tokens, dtype=torch.long).to(self.device)
batch_token_types = torch.tensor(batch_token_types, dtype=torch.long).to(self.device)
batch_obj_heads = torch.tensor(batch_obj_heads, dtype=torch.float).to(self.device)
batch_obj_tails = torch.tensor(batch_obj_tails, dtype=torch.float).to(self.device)
batch_obj_weights = torch.tensor(batch_obj_weights, dtype=torch.float).to(self.device)
yield (batch_tokens, batch_sub_heads, batch_sub_tails, batch_sub_weights), \
(batch_query_tokens, batch_token_types, batch_obj_heads, batch_obj_tails, batch_obj_weights)
else:
yield batch_tokens
def _find_span(self, used_spans, tokens, entity_tokens):
spans = self._find_all_spans(tokens, entity_tokens)
for span in spans:
if not self._has_intersection(used_spans, span):
used_spans.add(span)
return used_spans, span
return used_spans, None
def _has_intersection(self, used_spans, span):
for used_span in used_spans:
used_span_set = set(range(used_span[0], used_span[1]))
span_set = set(range(span[0], span[1]))
if 0 < len(used_span_set.intersection(span_set)) < max(len(span_set), len(used_span_set)):
return True
return False
def _find_all_spans(self, tokens, entity_tokens):
res = []
for i in range(len(tokens)-len(entity_tokens)+1):
if tokens[i:i+len(entity_tokens)] == entity_tokens:
res.append((i, i+len(entity_tokens)))
return res
def data_loader_test():
class ARGS:
data_dir = 'lic-corrected-70-semantic-embedd-v4' # lic-corrected-70-semantic-embedd-v4; WebQSP_0824_67
# data_dir = 'data'
max_len = 100
device = 'cpu'
bert_model_dir = 'uncased_L-12_H-768_A-12'
batch_size = 10
encoder_type = 'rnn'
dl = DataLoader(ARGS())
dev_data = dl.load_data('test_re_pp', repeat_multi_sub=False, encoder_type='rnn')
# dev_data = dl.load_data('dev')
dg = dl.data_iterator(dev_data, 1, 5, is_train=True, shuffle=False)
for i, (batch_sub, batch_obj) in enumerate(dg):
input()
print(i, dev_data[1*i])
# for tmp in batch_sub:
# print(tmp[0], tmp.size())
# for tmp in batch_obj:
# print(tmp[0], tmp.size())
# dg = dl.data_iterator(dev_data, 1, 6, is_train=True, shuffle=False)
# for i, (batch_sub, batch_obj) in enumerate(dg):
# input()
# print(dev_data[1 * i])
if __name__ == "__main__":
data_loader_test()
# tokens = self.tokenizer.tokenize(replace_placeholder(text), inference=True) | [
"unicodedata.normalize",
"tqdm.tqdm",
"bert.BertTokenizer.from_pretrained",
"json.loads",
"random.shuffle",
"unicodedata.category",
"numpy.zeros",
"random.seed",
"nltk.tokenize.treebank.TreebankWordTokenizer",
"os.path.join",
"torch.tensor"
] | [((1464, 1487), 'nltk.tokenize.treebank.TreebankWordTokenizer', 'TreebankWordTokenizer', ([], {}), '()\n', (1485, 1487), False, 'from nltk.tokenize.treebank import TreebankWordTokenizer\n'), ((2868, 2938), 'bert.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['args.bert_model_dir'], {'do_lower_case': '(True)'}), '(args.bert_model_dir, do_lower_case=True)\n', (2897, 2938), False, 'from bert import BertTokenizer\n'), ((5140, 5147), 'tqdm.tqdm', 'tqdm', (['f'], {}), '(f)\n', (5144, 5147), False, 'from tqdm import tqdm\n'), ((10332, 10349), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (10343, 10349), False, 'import random\n'), ((10362, 10383), 'random.shuffle', 'random.shuffle', (['order'], {}), '(order)\n', (10376, 10383), False, 'import random\n'), ((10830, 10872), 'numpy.zeros', 'np.zeros', (['(batch_size, max_len_sub_tokens)'], {}), '((batch_size, max_len_sub_tokens))\n', (10838, 10872), True, 'import numpy as np\n'), ((3351, 3389), 'os.path.join', 'os.path.join', (['self.data_dir', '"""schemas"""'], {}), "(self.data_dir, 'schemas')\n", (3363, 3389), False, 'import os\n'), ((4369, 4376), 'tqdm.tqdm', 'tqdm', (['f'], {}), '(f)\n', (4373, 4376), False, 'from tqdm import tqdm\n'), ((5032, 5087), 'os.path.join', 'os.path.join', (['self.data_dir', "('%s_data.json' % data_type)"], {}), "(self.data_dir, '%s_data.json' % data_type)\n", (5044, 5087), False, 'import os\n'), ((5321, 5337), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (5331, 5337), False, 'import json\n'), ((10932, 10974), 'numpy.zeros', 'np.zeros', (['(batch_size, max_len_sub_tokens)'], {}), '((batch_size, max_len_sub_tokens))\n', (10940, 10974), True, 'import numpy as np\n'), ((11009, 11051), 'numpy.zeros', 'np.zeros', (['(batch_size, max_len_sub_tokens)'], {}), '((batch_size, max_len_sub_tokens))\n', (11017, 11051), True, 'import numpy as np\n'), ((11088, 11108), 'numpy.zeros', 'np.zeros', (['batch_size'], {}), '(batch_size)\n', (11096, 11108), True, 'import numpy as np\n'), ((11628, 11670), 'numpy.zeros', 'np.zeros', (['(batch_size, max_len_obj_tokens)'], {}), '((batch_size, max_len_obj_tokens))\n', (11636, 11670), True, 'import numpy as np\n'), ((11707, 11749), 'numpy.zeros', 'np.zeros', (['(batch_size, max_len_obj_tokens)'], {}), '((batch_size, max_len_obj_tokens))\n', (11715, 11749), True, 'import numpy as np\n'), ((11880, 11922), 'numpy.zeros', 'np.zeros', (['(batch_size, max_len_obj_tokens)'], {}), '((batch_size, max_len_obj_tokens))\n', (11888, 11922), True, 'import numpy as np\n'), ((11959, 11979), 'numpy.zeros', 'np.zeros', (['batch_size'], {}), '(batch_size)\n', (11967, 11979), True, 'import numpy as np\n'), ((2466, 2501), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFKD"""', 'text'], {}), "('NFKD', text)\n", (2487, 2501), False, 'import unicodedata\n'), ((3483, 3499), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (3493, 3499), False, 'import json\n'), ((4407, 4423), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (4417, 4423), False, 'import json\n'), ((13647, 13691), 'torch.tensor', 'torch.tensor', (['batch_tokens'], {'dtype': 'torch.long'}), '(batch_tokens, dtype=torch.long)\n', (13659, 13691), False, 'import torch\n'), ((2528, 2551), 'unicodedata.category', 'unicodedata.category', (['c'], {}), '(c)\n', (2548, 2551), False, 'import unicodedata\n'), ((13767, 13815), 'torch.tensor', 'torch.tensor', (['batch_sub_heads'], {'dtype': 'torch.float'}), '(batch_sub_heads, dtype=torch.float)\n', (13779, 13815), False, 'import torch\n'), ((13866, 13914), 'torch.tensor', 'torch.tensor', (['batch_sub_tails'], {'dtype': 'torch.float'}), '(batch_sub_tails, dtype=torch.float)\n', (13878, 13914), False, 'import torch\n'), ((13967, 14017), 'torch.tensor', 'torch.tensor', (['batch_sub_weights'], {'dtype': 'torch.float'}), '(batch_sub_weights, dtype=torch.float)\n', (13979, 14017), False, 'import torch\n'), ((14099, 14149), 'torch.tensor', 'torch.tensor', (['batch_query_tokens'], {'dtype': 'torch.long'}), '(batch_query_tokens, dtype=torch.long)\n', (14111, 14149), False, 'import torch\n'), ((14202, 14251), 'torch.tensor', 'torch.tensor', (['batch_token_types'], {'dtype': 'torch.long'}), '(batch_token_types, dtype=torch.long)\n', (14214, 14251), False, 'import torch\n'), ((14302, 14350), 'torch.tensor', 'torch.tensor', (['batch_obj_heads'], {'dtype': 'torch.float'}), '(batch_obj_heads, dtype=torch.float)\n', (14314, 14350), False, 'import torch\n'), ((14401, 14449), 'torch.tensor', 'torch.tensor', (['batch_obj_tails'], {'dtype': 'torch.float'}), '(batch_obj_tails, dtype=torch.float)\n', (14413, 14449), False, 'import torch\n'), ((14502, 14552), 'torch.tensor', 'torch.tensor', (['batch_obj_weights'], {'dtype': 'torch.float'}), '(batch_obj_weights, dtype=torch.float)\n', (14514, 14552), False, 'import torch\n')] |
from typing import Tuple
import networkx as nx
import numpy as np
import torch
from torch import LongTensor, Tensor
import torch_sparse as tsparse
import torch_scatter as tscatter
import torch_geometric
from torch_geometric.data.data import Data
import tqdm
from . import eigenpairs
def check_data(pos:torch.Tensor=None, edges:torch.Tensor=None, faces:torch.Tensor=None, float_type:type=None):
# check input consistency
if pos is not None:
if not torch.is_floating_point(pos):
raise ValueError("The vertices matrix must have floating point type!")
if float_type is None: float_type = pos.dtype
if (len(pos.shape)!= 2 or pos.shape[1] != 3) and pos.dtype != float_type:
raise ValueError("The vertices matrix must have shape [n,3] and type {}!".format(float_type))
if edges is not None and (len(edges.shape) != 2 or edges.shape[1] != 2 or edges.dtype != torch.long):
raise ValueError("The edge index matrix must have shape [m,2] and type long!")
if faces is not None and (len(faces.shape) != 2 or faces.shape[1] != 3 or faces.dtype != torch.long):
raise ValueError("The edge index matrix must have shape [#faces,3] and type long!")
def prediction(classifier:torch.nn.Module, x:torch.Tensor):
Z = classifier(x)
prediction = Z.argmax()
return prediction
def kNN(
pos:torch.Tensor,
edges:torch.LongTensor,
neighbors_num:int=256,
cutoff:int=3):
device = pos.device
if len(pos.shape)!= 2 or pos.shape[1] != 3:
raise ValueError("The vertices matrix must have shape [n,3] and type float!")
if len(edges.shape) != 2 or edges.shape[1] != 2 or edges.dtype != torch.long:
raise ValueError("The edge index matrix must have shape [m,2] and type long!")
n = pos.shape[0]
m = edges.shape[0]
k = neighbors_num
edge_index = edges.cpu().clone().detach().numpy() # they are all necessary unfortunately
graph = nx.Graph()
graph.add_nodes_from(range(n))
graph.add_edges_from(edge_index)
N = np.zeros([n,k], dtype=float)
spiral = nx.all_pairs_shortest_path(graph, cutoff=cutoff)
for node_idx, neighborhood in spiral:
if len(neighborhood) < k:
raise RuntimeError("Node {} has only {} neighbours, increase cutoff value!".format(node_idx, len(neighborhood)))
for i, neighbour_idx in enumerate(neighborhood.keys()):
if i >= k: break
else: N[node_idx, i] = neighbour_idx
node_neighbours_matrix = torch.tensor(N, device=device, dtype=torch.long)
return node_neighbours_matrix
#-------------------------------------------------------------------------------------------------
def heat_kernel(eigvals:torch.Tensor, eigvecs:torch.Tensor, t:float) -> torch.Tensor:
#hk = eigvecs.matmul(torch.diag(torch.exp(-t*eigvals)).matmul(eigvecs.t()))
tmp = torch.exp(-t*eigvals).view(1,-1)
hk = (tmp*eigvecs).matmul(eigvecs.t())
return hk
def diffusion_distance(eigvals:torch.Tensor, eigvecs:torch.Tensor, t:float):
n, k = eigvecs.shape
device = eigvals.device
dtype = eigvals.dtype
hk = heat_kernel(eigvals, eigvecs,2*t)
tmp = torch.diag(hk).repeat(n, 1)
return tmp + tmp.t() -2*hk
def compute_dd_mse(pos, perturbed_pos, faces, K, t):
eigvals1, eigvecs1 = eigenpairs(pos, faces, K)
eigvals2, eigvecs2 = eigenpairs(perturbed_pos, faces, K)
d1 = diffusion_distance(eigvals1,eigvecs1,t)
d2 = diffusion_distance(eigvals2,eigvecs2,t)
return torch.nn.functional.mse_loss(d1, d2)
#----------------------------------------------------------------------------------
def tri_areas(pos, faces):
check_data(pos=pos, faces=faces)
v1 = pos[faces[:, 0], :]
v2 = pos[faces[:, 1], :]
v3 = pos[faces[:, 2], :]
v1 = v1 - v3
v2 = v2 - v3
return torch.norm(torch.cross(v1, v2, dim=1), dim=1) * .5
def pos_areas(pos, faces):
check_data(pos=pos, faces=faces)
n = pos.shape[0]
m = faces.shape[0]
triareas = tri_areas(pos, faces)/3
posareas = torch.zeros(size=[n], device=triareas.device, dtype=triareas.dtype)
for i in range(3):
tmp = tscatter.scatter_add(triareas, faces[:,i], dim_size=n)
posareas += tmp
return posareas
#------------------------------------------------------------------------------
def tri_normals(pos, faces):
check_data(pos=pos, faces=faces)
v1 = pos[faces[:, 0], :]
v2 = pos[faces[:, 1], :]
v3 = pos[faces[:, 2], :]
v1 = v1 - v3
v2 = v2 - v3
normals = torch.cross(v1, v2, dim=1)
return normals/normals.norm(p=2,dim=1,keepdim=True)
def pos_normals(pos, faces):
check_data(pos=pos, faces=faces)
n, m = pos.shape[0], faces.shape[0]
trinormals = tri_normals(pos, faces)
posnormals = torch.zeros(size=[n, 3], device=trinormals.device, dtype=trinormals.dtype)
for i in range(3):
for j in range(3):
posnormals[:,j] += tscatter.scatter_add(trinormals[:,j], faces[:,i], dim_size=n)
return posnormals/posnormals.norm(p=2,dim=1,keepdim=True)
#-----------------------------------------------------------------------------
def l2_distance(pos, ppos, faces, normalize=False):
check_data(pos=pos, faces=faces)
check_data(pos=ppos)
diff = pos - ppos
areas = pos_areas(pos,faces)
weight_diff = diff*torch.sqrt(areas.view(-1,1))
L2 = weight_diff.norm(p="fro")
if normalize: L2 = L2/areas.sum().sqrt()
return L2
#------------------------------------------------------------------------------
def least_square_meshes(pos:Tensor, edges:LongTensor) -> Tensor:
check_data(pos=pos, edges=edges)
laplacian = torch_geometric.utils.get_laplacian(edges.t(), normalization="rw")
n = pos.shape[2]
tmp = tsparse.spmm(*laplacian, n, n, pos) #Least square Meshes problem
return (tmp**2).sum()
#--------------------------------------------------------------------------------
def write_obj(pos:Tensor,faces:Tensor, file:str):
check_data(pos=pos, faces=faces)
file = file if file.split(".")[-1] == "obj" else file + ".obj" # add suffix if necessary
pos = pos.detach().cpu().clone().numpy();
faces = faces.detach().cpu().clone().numpy();
with open(file, 'w') as f:
f.write("# OBJ file\n")
for v in pos:
f.write("v {} {} {}\n".format(v[0], v[1], v[2]))
for face in faces:
f.write("f")
for i in face:
f.write(" %d" % (i + 1))
f.write("\n")
def write_off(pos:Tensor,faces:Tensor, file:str):
check_data(pos=pos, faces=faces)
n, m = pos.shape[0], faces.shape[0]
pos = pos.detach().cpu().clone().numpy();
faces = faces.detach().cpu().clone().numpy();
file = file if file.split(".")[-1] == "off" else file + ".off" # add suffix if necessary
with open(file, 'w') as f:
f.write("OFF\n")
f.write("{} {} 0\n".format(n, m))
for v in pos:
f.write("{} {} {}\n".format(v[0], v[1], v[2]))
for face in faces:
f.write("3 {} {} {}\n".format(face[0],face[1],face[2]))
#---------------------------------------
try:
from knn_cuda import KNN
def knn_grad(ref, query, n, k) -> torch.Tensor: #NOTE output tensor shape [n,k,3]
ref = ref.view(1,n,3)
query = query.view(1,n,3)
d, I = KNN(ref=ref, query=query)
diff = query.view(n,1,3) - ref[0, I.view(-1),:].view(n,k,3) #shape [n,k,3]
return diff.view(n,k,3), I
def knn(ref, query, n, k) -> torch.Tensor:
ref = ref.view(1,n,3)
query = query.view(1,n,3)
d, I = KNN(k, transpose_mode=True)(ref=ref, query=query)
return d.view(n,k), I.view(n*k)
def chamfer(ref, query):
check_data(pos=ref)
check_data(pos=query)
n = ref.shape[0]
nn_d1, nn_idx1 = knn_grad(ref=ref,query=query,n=n,k=1)
nn_d2, nn_idx2 = knn_grad(ref=query,query=ref,n=n,k=1)
chamfer1 = torch.bmm(nn_d1.view(n,1,3), nn_d1.view(n,3,1)).mean()
chamfer2 = torch.bmm(nn_d2.view(n,1,3), nn_d2.view(n,3,1)).mean()
return chamfer1+chamfer1
except ImportError as e:
pass | [
"knn_cuda.KNN",
"torch_sparse.spmm",
"torch.is_floating_point",
"torch.nn.functional.mse_loss",
"numpy.zeros",
"torch.diag",
"torch.exp",
"networkx.Graph",
"torch_scatter.scatter_add",
"torch.zeros",
"torch.cross",
"torch.tensor",
"networkx.all_pairs_shortest_path"
] | [((1915, 1925), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1923, 1925), True, 'import networkx as nx\n'), ((2001, 2030), 'numpy.zeros', 'np.zeros', (['[n, k]'], {'dtype': 'float'}), '([n, k], dtype=float)\n', (2009, 2030), True, 'import numpy as np\n'), ((2041, 2089), 'networkx.all_pairs_shortest_path', 'nx.all_pairs_shortest_path', (['graph'], {'cutoff': 'cutoff'}), '(graph, cutoff=cutoff)\n', (2067, 2089), True, 'import networkx as nx\n'), ((2439, 2487), 'torch.tensor', 'torch.tensor', (['N'], {'device': 'device', 'dtype': 'torch.long'}), '(N, device=device, dtype=torch.long)\n', (2451, 2487), False, 'import torch\n'), ((3436, 3472), 'torch.nn.functional.mse_loss', 'torch.nn.functional.mse_loss', (['d1', 'd2'], {}), '(d1, d2)\n', (3464, 3472), False, 'import torch\n'), ((3958, 4025), 'torch.zeros', 'torch.zeros', ([], {'size': '[n]', 'device': 'triareas.device', 'dtype': 'triareas.dtype'}), '(size=[n], device=triareas.device, dtype=triareas.dtype)\n', (3969, 4025), False, 'import torch\n'), ((4433, 4459), 'torch.cross', 'torch.cross', (['v1', 'v2'], {'dim': '(1)'}), '(v1, v2, dim=1)\n', (4444, 4459), False, 'import torch\n'), ((4674, 4748), 'torch.zeros', 'torch.zeros', ([], {'size': '[n, 3]', 'device': 'trinormals.device', 'dtype': 'trinormals.dtype'}), '(size=[n, 3], device=trinormals.device, dtype=trinormals.dtype)\n', (4685, 4748), False, 'import torch\n'), ((5634, 5669), 'torch_sparse.spmm', 'tsparse.spmm', (['*laplacian', 'n', 'n', 'pos'], {}), '(*laplacian, n, n, pos)\n', (5646, 5669), True, 'import torch_sparse as tsparse\n'), ((4057, 4112), 'torch_scatter.scatter_add', 'tscatter.scatter_add', (['triareas', 'faces[:, i]'], {'dim_size': 'n'}), '(triareas, faces[:, i], dim_size=n)\n', (4077, 4112), True, 'import torch_scatter as tscatter\n'), ((7229, 7254), 'knn_cuda.KNN', 'KNN', ([], {'ref': 'ref', 'query': 'query'}), '(ref=ref, query=query)\n', (7232, 7254), False, 'from knn_cuda import KNN\n'), ((465, 493), 'torch.is_floating_point', 'torch.is_floating_point', (['pos'], {}), '(pos)\n', (488, 493), False, 'import torch\n'), ((2797, 2820), 'torch.exp', 'torch.exp', (['(-t * eigvals)'], {}), '(-t * eigvals)\n', (2806, 2820), False, 'import torch\n'), ((3102, 3116), 'torch.diag', 'torch.diag', (['hk'], {}), '(hk)\n', (3112, 3116), False, 'import torch\n'), ((3765, 3791), 'torch.cross', 'torch.cross', (['v1', 'v2'], {'dim': '(1)'}), '(v1, v2, dim=1)\n', (3776, 3791), False, 'import torch\n'), ((4819, 4882), 'torch_scatter.scatter_add', 'tscatter.scatter_add', (['trinormals[:, j]', 'faces[:, i]'], {'dim_size': 'n'}), '(trinormals[:, j], faces[:, i], dim_size=n)\n', (4839, 4882), True, 'import torch_scatter as tscatter\n'), ((7494, 7521), 'knn_cuda.KNN', 'KNN', (['k'], {'transpose_mode': '(True)'}), '(k, transpose_mode=True)\n', (7497, 7521), False, 'from knn_cuda import KNN\n')] |
# coding:utf-8
import os
from numpy import *
import numpy as np
import cv2
import matplotlib.pyplot as plt
from scipy.linalg import orth
from pylab import mpl
from dataloader import load_PIE, load_FRD
mpl.rcParams['font.sans-serif'] = ['SimHei']
datasets = 'FRD' # PIE or FRD
# 参数
feature_n = 101 # 需要的特征(最多)
min_feature = 10 # 需要的特征(最少)
gap = 10 # 最少到最多的间隔
min_train = 5 # 用于训练的最少样本个数
max_train = 15 # 用于训练的最多样本个数
# 测试=总数-训练
# 定义PCA算法
def PCA(data, r):
data = np.float32(np.mat(data)) # 用mat会比ndarray计算快一些 (1824, 38416) 训练集个数152个人*每个人选择12个,原始图片特征维度
rows, cols = np.shape(data)
data_mean = np.mean(data, 0) # 对列求平均值
A = data - np.tile(data_mean, (rows, 1)) # 将所有样例减去对应均值得到A
# ATA是协方差矩阵
# ATA和AAT有相同的 非零 特征值,但AAT比ATA规模小很多,可以简化计算
# AATα = λα
# ATA(ATα) = λ(ATα)
# 所以ATα 是 ATA的特诊值
C = A * A.T # np.mat可以直接用*作为矩阵乘法
D, V = np.linalg.eig(C) # 求协方差矩阵的特征值和特征向量
# 逆序排序
indices = argsort(D, )[::-1] # eig返回的特征值并不是排序的,所以要排序后再选择前r个主成份
# 选择前r个
V_r = V[:, indices[:r]]
# 贡献率
sum=0
for i in range(r):
sum += D[indices[i]]
print('当选择%d个主成分时,其贡献率为%.3f' %(r, sum/D.sum()))
V_r = A.T * V_r # A.T*V_r是ATA的特征向量
V_r = orth(V_r) # 用scipy.linalg.orth求单位正交向量,orth是用svd做内核的,比直接用np.linalg.eig(ATA)快
final_data = A * V_r
return final_data, data_mean, V_r
# 人脸识别
def face_rec(datasets='PIE'):
for r in range(min_feature, feature_n, gap): # 遍历使用特征数不同时,精度差异
print("当选择%d个主成分时" % r)
x_value = []
y_value = []
# 这里是循环多少个图片作为训练集
# for k in range(min_train, max_train + 1):
for k in range(15,16):
# 加载数据集, train_size=k test_size = IMG_PER_PEOPLE - k
if datasets=='FRD':
train_face, train_label, test_face, test_label, width, height = load_FRD(k=k) # 20个中选择k个作为训练集
elif datasets=='PIE':
train_face, train_label, test_face, test_label, width, height = load_PIE(ratio=0.5)
# 利用PCA算法进行训练
data_train_new, data_mean, V_r = PCA(train_face, r) # 将训练集样本全部投影到低维空间,平均脸是所有训练样本的平均,V_r是投影向量
num_train = data_train_new.shape[0] # 训练脸总数
num_test = test_face.shape[0] # 测试脸总数
temp_face = test_face - np.tile(data_mean, (num_test, 1)) # 中心化,因为训练数据在PCA时也进行了中心化
data_test_new = temp_face * V_r # 把test_face在同一组基下进行投影 (num, features)
# mat to array
data_test_new = np.array(data_test_new)
data_train_new = np.array(data_train_new)
# 测试准确度
true_num = 0
for i in range(num_test):
test_sample = data_test_new[i, :] # (features)
diffMat = data_train_new - np.tile(test_sample, (num_train, 1)) # 训练数据与测试脸之间距离
sqDiffMat = diffMat ** 2 # 找出当前测试样本 和 全部训练集中哪个样本最像
sqDistances = sqDiffMat.sum(axis=1) # 按行求和
sortedDistIndices = sqDistances.argsort() # 对向量从小到大排序,使用的是索引值,得到一个向量
indexMin = sortedDistIndices[0] # 距离最近的索引
if train_label[indexMin] == test_label[i]:
true_num += 1
else:
pass
accuracy = float(true_num) / num_test
x_value.append(k)
y_value.append(round(accuracy, 2))
print('当每个人选择%d张照片进行训练时,准确率为: %.2f%%' % (train_face.shape[0], accuracy * 100))
print('训练集为%d, 测试集为%d,准确率为: %.2f%%' % (train_face.shape[0], test_face.shape[0], accuracy * 100))
# 相同的数据集 特征脸 和 平均脸是一样的
# 显示平均脸
plt.imshow(np.array(data_mean.reshape(height, width)), cmap ='gray')
plt.show()
# 显示特征脸
plt.figure()
for i in range(10):
plt.subplot(2, 5, i+1)
title="Eigenface"+str(i+1)
#行,列,索引
plt.imshow(np.real(V_r[:, i].reshape(height, width)), cmap ='gray')
plt.title(title, fontsize=8)
plt.xticks([])
plt.yticks([])
plt.show()
if __name__ == '__main__':
face_rec(datasets)
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.yticks",
"numpy.linalg.eig",
"numpy.shape",
"dataloader.load_PIE",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"scipy.linalg.orth",
"numpy.tile",
"dataloader.load_FRD",
"matplo... | [((598, 612), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (606, 612), True, 'import numpy as np\n'), ((629, 645), 'numpy.mean', 'np.mean', (['data', '(0)'], {}), '(data, 0)\n', (636, 645), True, 'import numpy as np\n'), ((896, 912), 'numpy.linalg.eig', 'np.linalg.eig', (['C'], {}), '(C)\n', (909, 912), True, 'import numpy as np\n'), ((1230, 1239), 'scipy.linalg.orth', 'orth', (['V_r'], {}), '(V_r)\n', (1234, 1239), False, 'from scipy.linalg import orth\n'), ((496, 508), 'numpy.mat', 'np.mat', (['data'], {}), '(data)\n', (502, 508), True, 'import numpy as np\n'), ((671, 700), 'numpy.tile', 'np.tile', (['data_mean', '(rows, 1)'], {}), '(data_mean, (rows, 1))\n', (678, 700), True, 'import numpy as np\n'), ((3783, 3793), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3791, 3793), True, 'import matplotlib.pyplot as plt\n'), ((3819, 3831), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3829, 3831), True, 'import matplotlib.pyplot as plt\n'), ((4137, 4147), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4145, 4147), True, 'import matplotlib.pyplot as plt\n'), ((2558, 2581), 'numpy.array', 'np.array', (['data_test_new'], {}), '(data_test_new)\n', (2566, 2581), True, 'import numpy as np\n'), ((2611, 2635), 'numpy.array', 'np.array', (['data_train_new'], {}), '(data_train_new)\n', (2619, 2635), True, 'import numpy as np\n'), ((3872, 3896), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(5)', '(i + 1)'], {}), '(2, 5, i + 1)\n', (3883, 3896), True, 'import matplotlib.pyplot as plt\n'), ((4046, 4074), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(8)'}), '(title, fontsize=8)\n', (4055, 4074), True, 'import matplotlib.pyplot as plt\n'), ((4087, 4101), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4097, 4101), True, 'import matplotlib.pyplot as plt\n'), ((4114, 4128), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4124, 4128), True, 'import matplotlib.pyplot as plt\n'), ((1860, 1873), 'dataloader.load_FRD', 'load_FRD', ([], {'k': 'k'}), '(k=k)\n', (1868, 1873), False, 'from dataloader import load_PIE, load_FRD\n'), ((2330, 2363), 'numpy.tile', 'np.tile', (['data_mean', '(num_test, 1)'], {}), '(data_mean, (num_test, 1))\n', (2337, 2363), True, 'import numpy as np\n'), ((2014, 2033), 'dataloader.load_PIE', 'load_PIE', ([], {'ratio': '(0.5)'}), '(ratio=0.5)\n', (2022, 2033), False, 'from dataloader import load_PIE, load_FRD\n'), ((2836, 2872), 'numpy.tile', 'np.tile', (['test_sample', '(num_train, 1)'], {}), '(test_sample, (num_train, 1))\n', (2843, 2872), True, 'import numpy as np\n')] |
from __future__ import print_function
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib import gridspec
import os
from sklearn.metrics import confusion_matrix
import glob
import sys
from alsNet.dataset import Dataset
class_names={
0: 'Power',
1: 'Low Veg.',
2: 'Imp. Surf.',
3: 'Car',
4: 'Fence/Hedge',
5: 'Roof',
6: 'Facade',
7: 'Shrub',
8: 'Tree',
}
class_names={
2: 'Ground',
3: 'Low Veg.',
4: 'Med. Veg.',
5: 'High Veg.',
}
class_names={
2: 'Ground',
3: 'Low Veg.',
4: 'Med. Veg.',
5: 'High Veg.',
6: 'Building',
9: 'Water',
-1: 'Other'
}
def get_cm_compressed(cm, keep_classes=(2, 3, 4, 5, 6, 9), delete=False):
"""
Compresses a confusion matrix into the interesting columns/rows
(careful, they are not ordered according to keep_classes, but the indices change!)
and collects the rest in the last column/row
:param cm: a 2D confusion matrix
:param keep_classes: a set of classes to keep
:param delete: delete rows from matrix after caluclation (default: False)
:return:
"""
coll_idx = cm.shape[0]
cm_buf = np.append(cm, np.zeros((1, coll_idx)), axis=0)
cm_buf = np.append(cm_buf, np.zeros((coll_idx + 1, 1)), axis=1)
sum_idxs = [i for i in range(coll_idx) if i not in keep_classes]
cm_buf[:, coll_idx] = np.sum(cm_buf[:, sum_idxs], axis=1)
cm_buf[coll_idx, :] = np.sum(cm_buf[sum_idxs, :], axis=0)
cm_buf[coll_idx, coll_idx] = np.sum(cm_buf[sum_idxs, -1])
if delete:
cm_buf = np.delete(cm_buf, sum_idxs, axis=0)
cm_buf = np.delete(cm_buf, sum_idxs, axis=1)
return cm_buf
def over_gt(cm):
return (cm.T/ np.sum(cm, axis=1)).T
def main(tile_id):
input_files = r"D:\91_classes\10_MSc\04_results\VSC\4\test20\2011_%s_c*.laz"% tile_id
#input_files = r"D:\91_classes\10_MSc\04_results\VSC\28\test36\area1_aoi_c*_test.laz"
#input_files = r"D:\91_classes\10_MSc\04_results\VSC\32\test33\merge.las"
filelist = glob.glob(input_files)
cm_sum = np.zeros((30,30), dtype=np.int64)
pt_cnt = 0
for idx, file in enumerate(filelist):
print("Loading dataset '%s' (%s/%s)" % (file, idx+1, len(filelist)))
ds = Dataset(file)
ref = list(ds.labels)
gt_col = ds.names.index('estim_class')
gt = list(ds.points_and_features[:, gt_col+3])
labels = ref #[item+2 for item in ref]
classes = gt #[item+2 for item in gt]
pt_cnt += len(ref)
print("Creating confusion matrix")
eval_cm = confusion_matrix(labels, classes, range(30))
cm_sum += eval_cm
keep_classes = (2,3,4,5,6,9)#(2,3,4,5) #(0, 1, 2, 3, 4, 5, 6, 7, 8)
# confusion matrix plot
print("Plotting")
fig = plt.figure(figsize=(10, 10))
num_classes = len(keep_classes) + 1
keep_classes_e = keep_classes + (-1,)
gs = gridspec.GridSpec(num_classes, num_classes)
cm_sum = get_cm_compressed(cm_sum, keep_classes, delete=True)
conf_all = over_gt(cm_sum)
row = -1
for ref_idx, ref_class in enumerate(keep_classes_e):
curr_ref_axis = None
row += 1
col = -1
for eval_idx, eval_class in enumerate(keep_classes_e):
col += 1
conf = conf_all[ref_idx, eval_idx]
if curr_ref_axis:
plt.subplot(gs[row, col], sharey=curr_ref_axis)
else:
curr_ref_axis = plt.subplot(gs[row, col])
plt.plot([0], [0])
plt.xlim([0, 1])
plt.ylim([0, 1])
#plt.plot(points_seen, conf_timeline)
if col == row:
if col == num_classes-1:
plt.gca().set_facecolor('gray')
highcolor = 'k'
lowcolor = 'k'
else:
plt.gca().set_facecolor(([30/255, 180/255, 60/255, conf]))
highcolor = 'xkcd:forest green'
lowcolor = 'xkcd:grass green'
else:
plt.gca().set_facecolor(([220/255, 60/255, 30/255, conf]))
highcolor = 'xkcd:orange red'
lowcolor = 'xkcd:dirty pink'
plt.text(0.5,
0.5,
"%.1f%%" % (conf * 100) if not np.isnan(conf) else "N/A", ha='center',
)#color=highcolor if conf > 0.5 else lowcolor)
cm = cm_sum
ref_sum = np.sum(cm, axis=1)[ref_idx]
eval_sum = np.sum(cm, axis=0)[eval_idx]
plt.text(0.5,
0.3,
"%d" % (cm[ref_idx, eval_idx]), ha='center')
if col == 0:
plt.ylabel('%s\n%d\n(%.0f%%)' % (class_names[ref_class],
ref_sum,
ref_sum / (pt_cnt) * 100))
if row == 0:
plt.gca().xaxis.set_label_position('top')
plt.xlabel('%s\n%d\n(%.0f%%)' % (class_names[eval_class],
eval_sum,
eval_sum / (pt_cnt) * 100))
plt.gca().get_yaxis().set_ticks([])
plt.gca().get_xaxis().set_ticks([])
plt.ylim([0, 1])
print("saving plot")
fig.text(0.5, 0.94, 'Estimated', ha='center', va='center', fontweight='bold')
fig.text(0.06, 0.5, 'Ground truth', ha='center', va='center', rotation='vertical', fontweight='bold')
plt.subplots_adjust(hspace=.0, wspace=.0)
plt.savefig((r"D:\91_classes\10_MSc\04_results\VSC\4\test20\2011_%s_cm3.png" % tile_id).replace("*", "all"))
#plt.savefig((r"D:\91_classes\10_MSc\04_results\VSC\28\test36\conf.png"))
#plt.savefig(r"D:\91_classes\10_MSc\04_results\VSC\32\test33\merge.png")
main('13235203')
main('13245200')
main('13205000')
main('11275100')
main('*') | [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplot",
"numpy.sum",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.gca",
"numpy.zeros",
"numpy.isnan",
"alsNet.dataset.Dataset",
"matplotlib.pyplot.text",
"matplotlib.pyplot.figure",
"matplotlib.use",
"glob.glob",
"mat... | [((75, 96), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (89, 96), False, 'import matplotlib\n'), ((1404, 1439), 'numpy.sum', 'np.sum', (['cm_buf[:, sum_idxs]'], {'axis': '(1)'}), '(cm_buf[:, sum_idxs], axis=1)\n', (1410, 1439), True, 'import numpy as np\n'), ((1466, 1501), 'numpy.sum', 'np.sum', (['cm_buf[sum_idxs, :]'], {'axis': '(0)'}), '(cm_buf[sum_idxs, :], axis=0)\n', (1472, 1501), True, 'import numpy as np\n'), ((1535, 1563), 'numpy.sum', 'np.sum', (['cm_buf[sum_idxs, -1]'], {}), '(cm_buf[sum_idxs, -1])\n', (1541, 1563), True, 'import numpy as np\n'), ((2055, 2077), 'glob.glob', 'glob.glob', (['input_files'], {}), '(input_files)\n', (2064, 2077), False, 'import glob\n'), ((2091, 2125), 'numpy.zeros', 'np.zeros', (['(30, 30)'], {'dtype': 'np.int64'}), '((30, 30), dtype=np.int64)\n', (2099, 2125), True, 'import numpy as np\n'), ((2803, 2831), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2813, 2831), True, 'import matplotlib.pyplot as plt\n'), ((2923, 2966), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['num_classes', 'num_classes'], {}), '(num_classes, num_classes)\n', (2940, 2966), False, 'from matplotlib import gridspec\n'), ((5548, 5591), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.0)', 'wspace': '(0.0)'}), '(hspace=0.0, wspace=0.0)\n', (5567, 5591), True, 'import matplotlib.pyplot as plt\n'), ((1208, 1231), 'numpy.zeros', 'np.zeros', (['(1, coll_idx)'], {}), '((1, coll_idx))\n', (1216, 1231), True, 'import numpy as np\n'), ((1272, 1299), 'numpy.zeros', 'np.zeros', (['(coll_idx + 1, 1)'], {}), '((coll_idx + 1, 1))\n', (1280, 1299), True, 'import numpy as np\n'), ((1596, 1631), 'numpy.delete', 'np.delete', (['cm_buf', 'sum_idxs'], {'axis': '(0)'}), '(cm_buf, sum_idxs, axis=0)\n', (1605, 1631), True, 'import numpy as np\n'), ((1649, 1684), 'numpy.delete', 'np.delete', (['cm_buf', 'sum_idxs'], {'axis': '(1)'}), '(cm_buf, sum_idxs, axis=1)\n', (1658, 1684), True, 'import numpy as np\n'), ((2272, 2285), 'alsNet.dataset.Dataset', 'Dataset', (['file'], {}), '(file)\n', (2279, 2285), False, 'from alsNet.dataset import Dataset\n'), ((1739, 1757), 'numpy.sum', 'np.sum', (['cm'], {'axis': '(1)'}), '(cm, axis=1)\n', (1745, 1757), True, 'import numpy as np\n'), ((3512, 3530), 'matplotlib.pyplot.plot', 'plt.plot', (['[0]', '[0]'], {}), '([0], [0])\n', (3520, 3530), True, 'import matplotlib.pyplot as plt\n'), ((3543, 3559), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (3551, 3559), True, 'import matplotlib.pyplot as plt\n'), ((3572, 3588), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (3580, 3588), True, 'import matplotlib.pyplot as plt\n'), ((4569, 4630), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(0.3)', "('%d' % cm[ref_idx, eval_idx])"], {'ha': '"""center"""'}), "(0.5, 0.3, '%d' % cm[ref_idx, eval_idx], ha='center')\n", (4577, 4630), True, 'import matplotlib.pyplot as plt\n'), ((5310, 5326), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (5318, 5326), True, 'import matplotlib.pyplot as plt\n'), ((3375, 3422), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[row, col]'], {'sharey': 'curr_ref_axis'}), '(gs[row, col], sharey=curr_ref_axis)\n', (3386, 3422), True, 'import matplotlib.pyplot as plt\n'), ((3473, 3498), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[row, col]'], {}), '(gs[row, col])\n', (3484, 3498), True, 'import matplotlib.pyplot as plt\n'), ((4477, 4495), 'numpy.sum', 'np.sum', (['cm'], {'axis': '(1)'}), '(cm, axis=1)\n', (4483, 4495), True, 'import numpy as np\n'), ((4528, 4546), 'numpy.sum', 'np.sum', (['cm'], {'axis': '(0)'}), '(cm, axis=0)\n', (4534, 4546), True, 'import numpy as np\n'), ((4716, 4810), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('%s\\n%d\\n(%.0f%%)' % (class_names[ref_class], ref_sum, ref_sum / pt_cnt * 100)\n )"], {}), "('%s\\n%d\\n(%.0f%%)' % (class_names[ref_class], ref_sum, ref_sum /\n pt_cnt * 100))\n", (4726, 4810), True, 'import matplotlib.pyplot as plt\n'), ((5006, 5104), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('%s\\n%d\\n(%.0f%%)' % (class_names[eval_class], eval_sum, eval_sum / pt_cnt *\n 100))"], {}), "('%s\\n%d\\n(%.0f%%)' % (class_names[eval_class], eval_sum, \n eval_sum / pt_cnt * 100))\n", (5016, 5104), True, 'import matplotlib.pyplot as plt\n'), ((4068, 4077), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4075, 4077), True, 'import matplotlib.pyplot as plt\n'), ((4323, 4337), 'numpy.isnan', 'np.isnan', (['conf'], {}), '(conf)\n', (4331, 4337), True, 'import numpy as np\n'), ((3728, 3737), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3735, 3737), True, 'import matplotlib.pyplot as plt\n'), ((3873, 3882), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3880, 3882), True, 'import matplotlib.pyplot as plt\n'), ((4948, 4957), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4955, 4957), True, 'import matplotlib.pyplot as plt\n'), ((5213, 5222), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5220, 5222), True, 'import matplotlib.pyplot as plt\n'), ((5261, 5270), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5268, 5270), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
# input
with open('input.txt') as f:
lines = f.readlines()
# part 1
code = {'F': 0, 'B': 1, 'L': 0, 'R': 1}
vals = [64, 32, 16, 8, 4, 2, 1, 4, 2, 1]
seats = []
for line in lines:
seat = 0
for char, val in zip(line, vals):
scale = 8 if char in 'FB' else 1
seat += scale * code[char] * val
seats.append(seat)
seats = np.array(seats)
ans1 = seats.max()
# part 2
seats = np.sort(seats)
diff = seats[1:] - seats[:-1]
ans2 = seats[1:][diff == 2][0] - 1
# output
answer = []
answer.append('Part 1: {}'.format(ans1))
answer.append('Part 2: {}'.format(ans2))
with open('solution.txt', 'w') as f:
f.writelines('\n'.join(answer)+'\n')
| [
"numpy.sort",
"numpy.array"
] | [((368, 383), 'numpy.array', 'np.array', (['seats'], {}), '(seats)\n', (376, 383), True, 'import numpy as np\n'), ((421, 435), 'numpy.sort', 'np.sort', (['seats'], {}), '(seats)\n', (428, 435), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import bcolz
import numpy as np
import math
import warnings
from typing import Optional
import torch.multiprocessing as mp
"""
ASR model parts
"""
class LayerNorm(nn.Module):
def __init__(self, n_features):
super(LayerNorm, self).__init__()
self.layer_norm = nn.LayerNorm(n_features)
def forward(self, x):
x = x.transpose(2, 3)
x = self.layer_norm(x)
return x.transpose(2, 3)
class ResidualCNN(nn.Module):
"""
Residual Networks: https://arxiv.org/pdf/1512.03385.pdf
Structure:
input -> LayerNorm -> ActivationFunction -> Dropout -> Conv2d -> ... -> output + input -> output
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, dropout_p, n_features):
super(ResidualCNN, self).__init__()
self.conv_block = nn.Sequential(
LayerNorm(n_features=n_features),
nn.ReLU(),
nn.Dropout(p=dropout_p),
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
padding=int(kernel_size / 2)),
LayerNorm(n_features=n_features),
nn.ReLU(),
nn.Dropout(p=dropout_p),
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
padding=int(kernel_size / 2))
)
def forward(self, x):
identity = x
x = self.conv_block(x)
x += identity
return x
class BidirectionalGRU(nn.Module):
"""
Bidirectional GRU block.
Structure:
input -> LayerNorm -> ActivationFunction -> BidirectionalGRU -> Dropout -> output
"""
def __init__(self, input_size, hidden_size, dropout_p, batch_first):
super(BidirectionalGRU, self).__init__()
self.preprocessing_block = nn.Sequential(
nn.LayerNorm(normalized_shape=input_size),
nn.ReLU()
)
self.gru = nn.GRU(input_size=input_size, hidden_size=hidden_size, num_layers=1, batch_first=batch_first,
bidirectional=True)
self.dropout = nn.Dropout(p=dropout_p)
def forward(self, x):
x = self.preprocessing_block(x)
x, _ = self.gru(x)
x = self.dropout(x)
return x
class SpeechModel(nn.Module):
"""
Pretty similar to 'Deep Speech 2': https://arxiv.org/pdf/1512.02595.pdf
Structure:
spectrogram -> InitialConv2d -> ResConv2d - Layers -> Linear - (Transition - / Connection -) Layer -> BiGRU - Layers -> Classifier
"""
def __init__(self, n_res_cnn_layers, n_bi_gru_layers, bi_gru_dim, n_classes, n_features, dropout_p, device, dataset,
d_audio_embedding):
super(SpeechModel, self).__init__()
self.dataset = dataset
self.device = device
# InitialConv2d
self.init_conv2d = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, stride=2, padding=1)
n_features = int(n_features / 2) # feature dimension decreased by first Conv2d - Layer
# General ResidualConv2d - Layers
self.residual_cnn_general = nn.Sequential(
*[ResidualCNN(in_channels=32, out_channels=32, kernel_size=3, stride=1, dropout_p=dropout_p,
n_features=n_features) for i in range(int(n_res_cnn_layers / 2))]
)
# Specific ResidualConv2d - Layers
self.residual_cnn_specific = nn.Sequential(
*[ResidualCNN(in_channels=32, out_channels=32, kernel_size=3, stride=1, dropout_p=dropout_p,
n_features=n_features) for i in range((n_res_cnn_layers - int(n_res_cnn_layers / 2)))]
)
# Linear - (Transition - / Connection -) Layer
self.linear_layer_connection = nn.Linear(in_features=n_features * 32, out_features=bi_gru_dim)
# BidirectionalGRU - Layers
self.bi_gru_nn = nn.Sequential(
*[BidirectionalGRU(input_size=bi_gru_dim if i == 0 else bi_gru_dim * 2, hidden_size=bi_gru_dim,
dropout_p=dropout_p, batch_first=(i == 0)) for i in range(n_bi_gru_layers)]
)
# Classifier
self.classifier = nn.Sequential(
nn.Linear(in_features=bi_gru_dim * 2 if n_bi_gru_layers > 1 else bi_gru_dim, out_features=bi_gru_dim),
nn.ReLU(),
nn.Dropout(p=dropout_p),
nn.Linear(in_features=bi_gru_dim, out_features=n_classes)
)
# Audio Embedding model for irony classificaton model [NOT USED due to lack of audio data]
self.audio_embedding = AudioEmbedding(n_features=n_features, dropout_p=dropout_p,
d_audio_embedding=d_audio_embedding)
def do_audio_embedding(self, x):
x = self.audio_embedding(x)
return x
def forward(self, x, this_model_train=True):
x = self.init_conv2d(x)
x = self.residual_cnn_general(x)
# Start parallel audio processing for following irony classification
if not self.training and not this_model_train:
# Audio Embedding has not been finished implementing and has not been tested
# due to a lack of respective audio data.
raise NotImplementedError
mp.set_start_method('spawn')
audio_embedding_return = mp.Queue()
audio_embedding_process = mp.Process(
target=self.audio_embedding,
args=(x.clone(), audio_embedding_return)
)
audio_embedding_process.start()
else:
conv_output_for_audio_embedding = x.clone()
x = self.residual_cnn_specific(x)
sizes = x.size()
x = x.view(sizes[0], sizes[1] * sizes[2], sizes[3]).transpose(1, 2) # reshape for linear layer
x = self.linear_layer_connection(x)
x = self.bi_gru_nn(x)
x = self.classifier(x)
if this_model_train is True: # for seperatly training the ASR model
return x
# Audio Embedding has not been finished implementing and has not been tested
# due to a lack of respective audio data.
raise NotImplementedError
if not self.training:
# Adjust audio embeddings with ASR - classifier outputs
audio_embedding_process.join()
adjusted_audio_embedding_return = mp.Queue()
adjusted_audio_embedding_process = mp.Process(
target=self.audio_embedding.audio_embedding_adjustments,
args=(audio_embedding_return.get(), x, adjusted_audio_embedding_return)
)
adjusted_audio_embedding_process.start()
else:
audio_embedding_return = self.audio_embedding(conv_output_for_audio_embedding)
adjusted_audio_embedding = self.audio_embedding.audio_embedding_adjustments(
audio_embeddings=audio_embedding_return, asr_classifications=x)
if not self.training:
# Get adjusted audio embeddings
adjusted_audio_embedding = adjusted_audio_embedding_return.get()
return adjusted_audio_embedding
"""
Irony classifier model parts
"""
class AudioEmbedding(nn.Module):
def __init__(self, n_features, dropout_p, d_audio_embedding):
super(AudioEmbedding, self).__init__()
self.conv_block = nn.Sequential(
LayerNorm(n_features=n_features),
nn.ReLU(),
nn.Dropout(p=dropout_p),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1,
padding=int(3 / 2))
)
self.fc_0 = nn.Linear(in_features=(32 * n_features), out_features=d_audio_embedding)
self.fc_1 = nn.Linear(in_features=d_audio_embedding, out_features=d_audio_embedding)
self.activation_fn = nn.GELU()
self.dropout = nn.Dropout(p=dropout_p)
def forward(self, x: torch.Tensor, audio_embedding_return=None):
x = self.conv_block(x)
sizes = x.size()
x = x.view(sizes[0], sizes[1] * sizes[2], sizes[3]).transpose(1, 2) # reshape for linear layer
x = self.dropout(self.activation_fn(self.fc_0(x)))
x = self.dropout(self.activation_fn(self.fc_1(x)))
# return_value.put(x)
if audio_embedding_return is None:
return x
else:
audio_embedding_return.put(x)
def audio_embedding_adjustments(self, audio_embeddings: torch.Tensor, asr_classifications: torch.Tensor):
# TODO
return audio_embeddings
class PositionalEncoding(nn.Module):
def __init__(self, d_model: int = 512, max_timescale: int = 1.0e4, max_seq_len: int = 200, start_index: int = 0):
"""
Position Encoder for transformer network. Adds position encodings to word embeddings.
Args:
d_model (int): dim of word embedding vector.
max_timescale (int): choose depending on d_model (increase d_model -> decrease max_timescale).
max_seq_len (int): maximum sequence length.
start_index (int): start position index.
"""
super(PositionalEncoding, self).__init__()
position_encoding = torch.empty(max_seq_len, d_model)
position = torch.arange(start_index, max_seq_len, dtype=torch.float).unsqueeze(1)
# TODO: Adapt 'div_term' (https://datascience.stackexchange.com/questions/51065/what-is-the-positional-encoding-in-the-transformer-model)
div_term = torch.exp(torch.arange(0, d_model, 2, dtype=torch.float) *
(- math.log(max_timescale) / d_model)) # OpenAI's position encoding based on BERT
# div_term = 1 / torch.pow(10000, (2 * (torch.arange(0, d_model, 2, dtype=torch.float))) / d_model)
# using position encoding as in the paper, not as in the code
position_encoding[:, 0::2] = torch.sin(div_term * position) # for every even embedding vector index
position_encoding[:, 1::2] = torch.cos(div_term * position) # for every odd embedding vector index
position_encoding = position_encoding.unsqueeze(1)
self.register_buffer('position_encoding', position_encoding) # position encoding is not trainable
def forward(self, x):
x = x + self.position_encoding[:x.shape[0]]
return x
class SegmentEncoding(nn.Module):
def __init__(self, d_model):
super(SegmentEncoding, self).__init__()
self.segment_embedding = nn.Embedding(num_embeddings=2, embedding_dim=d_model)
def forward(self, utterance_lens: tuple, last_utterance_lens: tuple) -> torch.Tensor:
max_len = max(utterance_lens)
# max_len_last = max(last_utterance_lens)
token_tensor = torch.Tensor(([0.0] + [0.0] + ([1.0] * (max_len)))).to(next(self.parameters()).device)
segment_encoding_tensor = self.segment_embedding(token_tensor.long())
# return token_tensor
return segment_encoding_tensor
class CustomTransformerDecoderLayer(nn.Module):
def __init__(self, d_model: int = 512, n_heads: int = 12, d_feedforward: int = 2048, dropout_p: float = 0.1, activation: str = 'ReLU'):
super(CustomTransformerDecoderLayer, self).__init__()
self.self_attn = nn.MultiheadAttention(embed_dim=d_model, num_heads=n_heads, dropout=dropout_p)
self.dropout_0 = nn.Dropout(p=dropout_p)
self.layer_norm_0 = nn.LayerNorm(normalized_shape=d_model)
self.multihead_attn = nn.MultiheadAttention(embed_dim=d_model, num_heads=n_heads, dropout=dropout_p)
self.dropout_1 = nn.Dropout(p=dropout_p)
self.layer_norm_1 = nn.LayerNorm(normalized_shape=d_model)
self.fc_0 = nn.Linear(in_features=d_model, out_features=d_feedforward)
self.dropout_2 = nn.Dropout(p=dropout_p)
self.fc_1 = nn.Linear(in_features=d_feedforward, out_features=d_model)
self.dropout_3 = nn.Dropout(p=dropout_p)
self.layer_norm_2 = nn.LayerNorm(normalized_shape=d_model)
self.activation_fn = getattr(nn, activation)
def __setstate__(self, state):
if 'activation' not in state:
warnings.warn(message='"state" does not contain "activation". "nn.ReLU()" is used as default.')
state['activation'] = nn.ReLU()
super(CustomTransformerDecoderLayer, self).__setstate__(state)
def forward(self, tgt: torch.Tensor, memory: torch.Tensor, tgt_mask: Optional[torch.Tensor] = None,
memory_mask: Optional[torch.Tensor] = None, tgt_key_padding_mask: Optional[torch.Tensor] = None,
memory_key_padding_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
tgt2 = self.self_attn(query=tgt, key=tgt, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout_0(tgt2)
tgt = self.layer_norm_0(tgt)
tgt2 = self.multihead_attn(query=tgt, key=memory, value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + memory + self.dropout_1(tgt2)
tgt = self.layer_norm_1(tgt)
tgt2 = self.dropout_2(self.activation_fn(self.fc_0(tgt)))
tgt2 = self.fc_1(tgt2)
tgt = tgt + self.dropout_3(tgt2)
tgt = self.layer_norm_2(tgt)
return tgt
class CustomTransformerEncoder(nn.Module):
__constants__ = ['norm']
def __init__(self, encoder_layer, n_layers, norm=None):
super(CustomTransformerEncoder, self).__init__()
self.layers = nn.modules.transformer._get_clones(module=encoder_layer, N=n_layers)
self.n_layers = n_layers
self.norm = norm
def forward(self, src: torch.Tensor, mask: Optional[torch.Tensor] = None,
src_key_padding_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
output = src
attn_weights_list = []
for mod in self.layers:
output, attn_weights = mod(src=output, src_mask=mask, src_key_padding_mask=src_key_padding_mask)
attn_weights_list.append(attn_weights)
if self.norm is not None:
output = self.norm(output)
return output, attn_weights_list
class CustomTransformerEncoderLayer(nn.Module):
def __init__(self, d_model, n_heads, d_feedforward=2048, dropout_p=0.1, activation='gelu'):
super(CustomTransformerEncoderLayer, self).__init__()
self.self_attn = nn.MultiheadAttention(embed_dim=d_model, num_heads=n_heads)
self.dropout_attn = nn.Dropout(p=dropout_p)
self.norm_0 = nn.LayerNorm(d_model)
self.fc_0 = nn.Linear(in_features=d_model, out_features=d_feedforward)
self.dropout_0 = nn.Dropout(p=dropout_p)
self.fc_1 = nn.Linear(in_features=d_feedforward, out_features=d_model)
self.dropout_1 = nn.Dropout(p=dropout_p)
self.norm_1 = nn.LayerNorm(d_model)
self.activation_fn = nn.modules.transformer._get_activation_fn(activation=activation)
def __setstate__(self, state):
if 'activation' not in state:
warnings.warn(message="'state' does not contain 'activation'. 'nn.GELU()' is used as default.")
state['activation'] = nn.GELU()
super(CustomTransformerEncoderLayer, self).__setstate__(state)
def forward(self, src: torch.Tensor, src_mask: Optional[torch.Tensor] = None,
src_key_padding_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
src2, attn_weights = self.self_attn(query=src, key=src, value=src, key_padding_mask=src_key_padding_mask,
need_weights=True, attn_mask=src_mask)
src = src + self.dropout_attn(src2)
src = self.norm_0(src)
src2 = self.fc_1(self.dropout_0(self.activation_fn(self.fc_0(src))))
src = src + self.dropout_1(src2)
src = self.norm_1(src)
return src, attn_weights
class ContextModel(nn.Module):
def __init__(self, d_model, d_context):
super(ContextModel, self).__init__()
self.d_context = d_context
self.word_wise_fc_0 = nn.Linear(in_features=d_model, out_features=d_context)
self.dropout_0 = nn.Dropout(p=0.25)
# self.sigmoid_0 = nn.Sigmoid()
self.weight_fc_1 = nn.Linear(in_features=d_model, out_features=int(d_model / 2))
self.relu_0 = nn.ReLU()
self.weight_fc_2 = nn.Linear(in_features=int(d_model / 2), out_features=1)
self.dropout_2 = nn.Dropout(p=0.25)
self.bi_gru = nn.GRU(input_size=500, hidden_size=250, num_layers=1, bias=True, batch_first=False, dropout=0.5, bidirectional=True)
self.weight_sigmoid = nn.Sigmoid()
self.weight_softmax = nn.Softmax()
def forward(self, word_embedding, utterance_lengths):
word_embedding, _ = self.bi_gru(word_embedding)
word_embedding = word_embedding.permute(1, 2, 0) # new shape: (batch_size, d_context, sequence_length)
# context_embedding = context_embedding.squeeze(2) # new shape: (batch_size, d_context)
weight_tensor = torch.ones(word_embedding.shape[0], word_embedding.shape[2], 1).to(next(self.parameters()).device) # shape: (batch_size, sequence_length, 1)
context_embedding = word_embedding @ weight_tensor
context_embedding = context_embedding.squeeze(2) # new shape: (batch_size, d_context)
return context_embedding
class IronyClassifier(nn.Module):
def __init__(self, batch_size, n_tokens, d_model, d_context, n_heads, n_hid, n_layers, dropout_p=0.5):
super(IronyClassifier, self).__init__()
self.batch_size = batch_size
self.d_model = d_model
self.d_context = d_context
self.word_embedding = nn.Embedding(num_embeddings=100004, embedding_dim=500)
print('word_embedding loaded')
self.positional_encoder = PositionalEncoding(d_model, dropout_p)
self.segment_encoding = SegmentEncoding(d_model=d_model)
# encoder definition
encoder_layer = CustomTransformerEncoderLayer(d_model=(d_model), n_heads=n_heads, d_feedforward=n_hid, dropout_p=dropout_p, activation='gelu')
self.transformer_encoder = CustomTransformerEncoder(encoder_layer=encoder_layer, n_layers=n_layers)
self.classifier_0 = nn.Linear(in_features=(d_model), out_features=int(d_model / 2))
self.gelu_0 = nn.GELU()
self.dropout_classifier_0 = nn.Dropout(p=0.5)
self.classifier_1 = nn.Linear(in_features=int(d_model / 2), out_features=1)
self.sigmoid = nn.Sigmoid()
self.context_embedding = ContextModel(d_model=d_model, d_context=d_context)
def load_word_embedding(self, trainable=True):
# create word embedding state dict
vectors = bcolz.open('../../data/irony_data/SARC_2.0/6B.300.dat')
# vectors = bcolz.open('data/irony_data/FastText/300d-1M-SARC_2_0_Adjusted.dat')
weights_matrix = np.zeros((100004, 300))
for i in range(100000):
weights_matrix[i] = vectors[i]
weights_matrix[100000] = nn.init.xavier_uniform(torch.empty((1, 1, 300))) # 'ukw' (unknown word) weights
weights_matrix[100001] = nn.init.xavier_uniform(torch.empty((1, 1, 300))) # 'cls' (class token) weights
weights_matrix[100002] = torch.zeros((1, 1, 300)) # padding token
weights_matrix[100003] = nn.init.xavier_uniform(torch.empty(1, 1, 300)) # 'sep' (seperating token)
word_embedding = nn.Embedding(num_embeddings=100004, embedding_dim=300)
word_embedding.load_state_dict({'weight': torch.from_numpy(weights_matrix)})
if not trainable:
word_embedding.weight.requires_grad = False
return word_embedding
def generate_src_mask(self, utterance_lens: tuple, last_utterance_lens) -> torch.Tensor:
max_len = max(utterance_lens)
src_mask = []
# (['cls'] A) (['sep'] B ['sep'])
for current_len, last_current_len in zip(utterance_lens, last_utterance_lens):
src_mask.append([False] + [False] + [False] + ([False] * ((current_len - 2))) + [False] + ([True] * ((max_len) - ((current_len)))))
src_mask = torch.BoolTensor(src_mask).to(next(self.parameters()).device)
return src_mask
def generate_context(self) -> torch.Tensor:
context_tensor = torch.zeros((self.batch_size, self.d_context))
return context_tensor
def generate_word_embedding(self) -> torch.Tensor:
pass
def forward(self, src: torch.Tensor, utterance_lens: tuple, first: bool, last_word_embedding: Optional[torch.Tensor] = torch.zeros((10, 20, 200)).to(torch.device('cuda')), last_utterance_lens: Optional[tuple] = None, chain_training: Optional[bool] = True):
if not self.training:
utterance_lens = [utterance_lens]
last_utterance_lens = [last_utterance_lens]
src = self.word_embedding(src.long())
if self.training:
word_embedding = src
else:
word_embedding = src[1:-1]
if first and chain_training:
if self.training:
return None, word_embedding
else:
# return None, word_embedding[1:-1], None # Cut of 'sep' tokens (only for inference).
return None, word_embedding, None
if not first:
if self.training:
cls = last_word_embedding[0]
last_word_embedding = last_word_embedding[1:]
else:
cls = self.word_embedding(torch.LongTensor([100001]).to(next(self.parameters()).device))
context_tensor = self.context_embedding(word_embedding=last_word_embedding, utterance_lengths=last_utterance_lens)
else:
context_tensor = self.generate_context().to(next(self.parameters()).device)
if not self.training:
src = src.unsqueeze(1)
src = torch.cat((cls.unsqueeze(0), context_tensor.unsqueeze(0), src), dim=0)
src = self.positional_encoder(src)
src += self.segment_encoding(utterance_lens=utterance_lens, last_utterance_lens=last_utterance_lens).unsqueeze(1).repeat(1, self.batch_size, 1)
# torch.autograd.set_detect_anomaly = True
src_mask = self.generate_src_mask(utterance_lens=utterance_lens, last_utterance_lens=last_utterance_lens)
out, attn_weights_list = self.transformer_encoder(src, src_key_padding_mask=src_mask)
out = self.classifier_1(self.dropout_classifier_0(self.gelu_0(self.classifier_0(out[0]))))
if self.training:
return out, word_embedding
else:
return out, word_embedding, attn_weights_list
| [
"torch.nn.Dropout",
"torch.nn.Embedding",
"torch.empty",
"torch.cos",
"torch.nn.Softmax",
"torch.arange",
"torch.device",
"torch.ones",
"torch.nn.modules.transformer._get_activation_fn",
"torch.nn.LayerNorm",
"torch.nn.modules.transformer._get_clones",
"torch.Tensor",
"torch.nn.Linear",
"t... | [((321, 345), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['n_features'], {}), '(n_features)\n', (333, 345), True, 'import torch.nn as nn\n'), ((2013, 2130), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_size', 'num_layers': '(1)', 'batch_first': 'batch_first', 'bidirectional': '(True)'}), '(input_size=input_size, hidden_size=hidden_size, num_layers=1,\n batch_first=batch_first, bidirectional=True)\n', (2019, 2130), True, 'import torch.nn as nn\n'), ((2176, 2199), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_p'}), '(p=dropout_p)\n', (2186, 2199), True, 'import torch.nn as nn\n'), ((2932, 3009), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(32)', 'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=1, out_channels=32, kernel_size=3, stride=2, padding=1)\n', (2941, 3009), True, 'import torch.nn as nn\n'), ((3832, 3895), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(n_features * 32)', 'out_features': 'bi_gru_dim'}), '(in_features=n_features * 32, out_features=bi_gru_dim)\n', (3841, 3895), True, 'import torch.nn as nn\n'), ((7687, 7757), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(32 * n_features)', 'out_features': 'd_audio_embedding'}), '(in_features=32 * n_features, out_features=d_audio_embedding)\n', (7696, 7757), True, 'import torch.nn as nn\n'), ((7780, 7852), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'd_audio_embedding', 'out_features': 'd_audio_embedding'}), '(in_features=d_audio_embedding, out_features=d_audio_embedding)\n', (7789, 7852), True, 'import torch.nn as nn\n'), ((7882, 7891), 'torch.nn.GELU', 'nn.GELU', ([], {}), '()\n', (7889, 7891), True, 'import torch.nn as nn\n'), ((7915, 7938), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_p'}), '(p=dropout_p)\n', (7925, 7938), True, 'import torch.nn as nn\n'), ((9246, 9279), 'torch.empty', 'torch.empty', (['max_seq_len', 'd_model'], {}), '(max_seq_len, d_model)\n', (9257, 9279), False, 'import torch\n'), ((9926, 9956), 'torch.sin', 'torch.sin', (['(div_term * position)'], {}), '(div_term * position)\n', (9935, 9956), False, 'import torch\n'), ((10038, 10068), 'torch.cos', 'torch.cos', (['(div_term * position)'], {}), '(div_term * position)\n', (10047, 10068), False, 'import torch\n'), ((10533, 10586), 'torch.nn.Embedding', 'nn.Embedding', ([], {'num_embeddings': '(2)', 'embedding_dim': 'd_model'}), '(num_embeddings=2, embedding_dim=d_model)\n', (10545, 10586), True, 'import torch.nn as nn\n'), ((11302, 11380), 'torch.nn.MultiheadAttention', 'nn.MultiheadAttention', ([], {'embed_dim': 'd_model', 'num_heads': 'n_heads', 'dropout': 'dropout_p'}), '(embed_dim=d_model, num_heads=n_heads, dropout=dropout_p)\n', (11323, 11380), True, 'import torch.nn as nn\n'), ((11406, 11429), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_p'}), '(p=dropout_p)\n', (11416, 11429), True, 'import torch.nn as nn\n'), ((11459, 11497), 'torch.nn.LayerNorm', 'nn.LayerNorm', ([], {'normalized_shape': 'd_model'}), '(normalized_shape=d_model)\n', (11471, 11497), True, 'import torch.nn as nn\n'), ((11529, 11607), 'torch.nn.MultiheadAttention', 'nn.MultiheadAttention', ([], {'embed_dim': 'd_model', 'num_heads': 'n_heads', 'dropout': 'dropout_p'}), '(embed_dim=d_model, num_heads=n_heads, dropout=dropout_p)\n', (11550, 11607), True, 'import torch.nn as nn\n'), ((11633, 11656), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_p'}), '(p=dropout_p)\n', (11643, 11656), True, 'import torch.nn as nn\n'), ((11686, 11724), 'torch.nn.LayerNorm', 'nn.LayerNorm', ([], {'normalized_shape': 'd_model'}), '(normalized_shape=d_model)\n', (11698, 11724), True, 'import torch.nn as nn\n'), ((11746, 11804), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'd_model', 'out_features': 'd_feedforward'}), '(in_features=d_model, out_features=d_feedforward)\n', (11755, 11804), True, 'import torch.nn as nn\n'), ((11830, 11853), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_p'}), '(p=dropout_p)\n', (11840, 11853), True, 'import torch.nn as nn\n'), ((11875, 11933), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'd_feedforward', 'out_features': 'd_model'}), '(in_features=d_feedforward, out_features=d_model)\n', (11884, 11933), True, 'import torch.nn as nn\n'), ((11959, 11982), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_p'}), '(p=dropout_p)\n', (11969, 11982), True, 'import torch.nn as nn\n'), ((12012, 12050), 'torch.nn.LayerNorm', 'nn.LayerNorm', ([], {'normalized_shape': 'd_model'}), '(normalized_shape=d_model)\n', (12024, 12050), True, 'import torch.nn as nn\n'), ((13614, 13682), 'torch.nn.modules.transformer._get_clones', 'nn.modules.transformer._get_clones', ([], {'module': 'encoder_layer', 'N': 'n_layers'}), '(module=encoder_layer, N=n_layers)\n', (13648, 13682), True, 'import torch.nn as nn\n'), ((14501, 14560), 'torch.nn.MultiheadAttention', 'nn.MultiheadAttention', ([], {'embed_dim': 'd_model', 'num_heads': 'n_heads'}), '(embed_dim=d_model, num_heads=n_heads)\n', (14522, 14560), True, 'import torch.nn as nn\n'), ((14589, 14612), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_p'}), '(p=dropout_p)\n', (14599, 14612), True, 'import torch.nn as nn\n'), ((14636, 14657), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {}), '(d_model)\n', (14648, 14657), True, 'import torch.nn as nn\n'), ((14679, 14737), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'd_model', 'out_features': 'd_feedforward'}), '(in_features=d_model, out_features=d_feedforward)\n', (14688, 14737), True, 'import torch.nn as nn\n'), ((14763, 14786), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_p'}), '(p=dropout_p)\n', (14773, 14786), True, 'import torch.nn as nn\n'), ((14807, 14865), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'd_feedforward', 'out_features': 'd_model'}), '(in_features=d_feedforward, out_features=d_model)\n', (14816, 14865), True, 'import torch.nn as nn\n'), ((14891, 14914), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_p'}), '(p=dropout_p)\n', (14901, 14914), True, 'import torch.nn as nn\n'), ((14938, 14959), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {}), '(d_model)\n', (14950, 14959), True, 'import torch.nn as nn\n'), ((14990, 15054), 'torch.nn.modules.transformer._get_activation_fn', 'nn.modules.transformer._get_activation_fn', ([], {'activation': 'activation'}), '(activation=activation)\n', (15031, 15054), True, 'import torch.nn as nn\n'), ((16165, 16219), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'd_model', 'out_features': 'd_context'}), '(in_features=d_model, out_features=d_context)\n', (16174, 16219), True, 'import torch.nn as nn\n'), ((16245, 16263), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.25)'}), '(p=0.25)\n', (16255, 16263), True, 'import torch.nn as nn\n'), ((16416, 16425), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (16423, 16425), True, 'import torch.nn as nn\n'), ((16534, 16552), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.25)'}), '(p=0.25)\n', (16544, 16552), True, 'import torch.nn as nn\n'), ((16576, 16696), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': '(500)', 'hidden_size': '(250)', 'num_layers': '(1)', 'bias': '(True)', 'batch_first': '(False)', 'dropout': '(0.5)', 'bidirectional': '(True)'}), '(input_size=500, hidden_size=250, num_layers=1, bias=True,\n batch_first=False, dropout=0.5, bidirectional=True)\n', (16582, 16696), True, 'import torch.nn as nn\n'), ((16724, 16736), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (16734, 16736), True, 'import torch.nn as nn\n'), ((16767, 16779), 'torch.nn.Softmax', 'nn.Softmax', ([], {}), '()\n', (16777, 16779), True, 'import torch.nn as nn\n'), ((17800, 17854), 'torch.nn.Embedding', 'nn.Embedding', ([], {'num_embeddings': '(100004)', 'embedding_dim': '(500)'}), '(num_embeddings=100004, embedding_dim=500)\n', (17812, 17854), True, 'import torch.nn as nn\n'), ((18436, 18445), 'torch.nn.GELU', 'nn.GELU', ([], {}), '()\n', (18443, 18445), True, 'import torch.nn as nn\n'), ((18482, 18499), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (18492, 18499), True, 'import torch.nn as nn\n'), ((18607, 18619), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (18617, 18619), True, 'import torch.nn as nn\n'), ((18818, 18873), 'bcolz.open', 'bcolz.open', (['"""../../data/irony_data/SARC_2.0/6B.300.dat"""'], {}), "('../../data/irony_data/SARC_2.0/6B.300.dat')\n", (18828, 18873), False, 'import bcolz\n'), ((18989, 19012), 'numpy.zeros', 'np.zeros', (['(100004, 300)'], {}), '((100004, 300))\n', (18997, 19012), True, 'import numpy as np\n'), ((19358, 19382), 'torch.zeros', 'torch.zeros', (['(1, 1, 300)'], {}), '((1, 1, 300))\n', (19369, 19382), False, 'import torch\n'), ((19542, 19596), 'torch.nn.Embedding', 'nn.Embedding', ([], {'num_embeddings': '(100004)', 'embedding_dim': '(300)'}), '(num_embeddings=100004, embedding_dim=300)\n', (19554, 19596), True, 'import torch.nn as nn\n'), ((20406, 20452), 'torch.zeros', 'torch.zeros', (['(self.batch_size, self.d_context)'], {}), '((self.batch_size, self.d_context))\n', (20417, 20452), False, 'import torch\n'), ((933, 942), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (940, 942), True, 'import torch.nn as nn\n'), ((956, 979), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_p'}), '(p=dropout_p)\n', (966, 979), True, 'import torch.nn as nn\n'), ((1206, 1215), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1213, 1215), True, 'import torch.nn as nn\n'), ((1229, 1252), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_p'}), '(p=dropout_p)\n', (1239, 1252), True, 'import torch.nn as nn\n'), ((1919, 1960), 'torch.nn.LayerNorm', 'nn.LayerNorm', ([], {'normalized_shape': 'input_size'}), '(normalized_shape=input_size)\n', (1931, 1960), True, 'import torch.nn as nn\n'), ((1974, 1983), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1981, 1983), True, 'import torch.nn as nn\n'), ((4273, 4378), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(bi_gru_dim * 2 if n_bi_gru_layers > 1 else bi_gru_dim)', 'out_features': 'bi_gru_dim'}), '(in_features=bi_gru_dim * 2 if n_bi_gru_layers > 1 else bi_gru_dim,\n out_features=bi_gru_dim)\n', (4282, 4378), True, 'import torch.nn as nn\n'), ((4388, 4397), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4395, 4397), True, 'import torch.nn as nn\n'), ((4411, 4434), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_p'}), '(p=dropout_p)\n', (4421, 4434), True, 'import torch.nn as nn\n'), ((4448, 4505), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'bi_gru_dim', 'out_features': 'n_classes'}), '(in_features=bi_gru_dim, out_features=n_classes)\n', (4457, 4505), True, 'import torch.nn as nn\n'), ((5332, 5360), 'torch.multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (5351, 5360), True, 'import torch.multiprocessing as mp\n'), ((5399, 5409), 'torch.multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (5407, 5409), True, 'import torch.multiprocessing as mp\n'), ((6434, 6444), 'torch.multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (6442, 6444), True, 'import torch.multiprocessing as mp\n'), ((7486, 7495), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7493, 7495), True, 'import torch.nn as nn\n'), ((7509, 7532), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_p'}), '(p=dropout_p)\n', (7519, 7532), True, 'import torch.nn as nn\n'), ((12191, 12291), 'warnings.warn', 'warnings.warn', ([], {'message': '""""state" does not contain "activation". "nn.ReLU()" is used as default."""'}), '(message=\n \'"state" does not contain "activation". "nn.ReLU()" is used as default.\')\n', (12204, 12291), False, 'import warnings\n'), ((12321, 12330), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (12328, 12330), True, 'import torch.nn as nn\n'), ((15141, 15241), 'warnings.warn', 'warnings.warn', ([], {'message': '"""\'state\' does not contain \'activation\'. \'nn.GELU()\' is used as default."""'}), '(message=\n "\'state\' does not contain \'activation\'. \'nn.GELU()\' is used as default.")\n', (15154, 15241), False, 'import warnings\n'), ((15271, 15280), 'torch.nn.GELU', 'nn.GELU', ([], {}), '()\n', (15278, 15280), True, 'import torch.nn as nn\n'), ((19144, 19168), 'torch.empty', 'torch.empty', (['(1, 1, 300)'], {}), '((1, 1, 300))\n', (19155, 19168), False, 'import torch\n'), ((19263, 19287), 'torch.empty', 'torch.empty', (['(1, 1, 300)'], {}), '((1, 1, 300))\n', (19274, 19287), False, 'import torch\n'), ((19461, 19483), 'torch.empty', 'torch.empty', (['(1)', '(1)', '(300)'], {}), '(1, 1, 300)\n', (19472, 19483), False, 'import torch\n'), ((20707, 20727), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (20719, 20727), False, 'import torch\n'), ((9300, 9357), 'torch.arange', 'torch.arange', (['start_index', 'max_seq_len'], {'dtype': 'torch.float'}), '(start_index, max_seq_len, dtype=torch.float)\n', (9312, 9357), False, 'import torch\n'), ((9546, 9592), 'torch.arange', 'torch.arange', (['(0)', 'd_model', '(2)'], {'dtype': 'torch.float'}), '(0, d_model, 2, dtype=torch.float)\n', (9558, 9592), False, 'import torch\n'), ((10789, 10834), 'torch.Tensor', 'torch.Tensor', (['([0.0] + [0.0] + [1.0] * max_len)'], {}), '([0.0] + [0.0] + [1.0] * max_len)\n', (10801, 10834), False, 'import torch\n'), ((17138, 17201), 'torch.ones', 'torch.ones', (['word_embedding.shape[0]', 'word_embedding.shape[2]', '(1)'], {}), '(word_embedding.shape[0], word_embedding.shape[2], 1)\n', (17148, 17201), False, 'import torch\n'), ((19647, 19679), 'torch.from_numpy', 'torch.from_numpy', (['weights_matrix'], {}), '(weights_matrix)\n', (19663, 19679), False, 'import torch\n'), ((20245, 20271), 'torch.BoolTensor', 'torch.BoolTensor', (['src_mask'], {}), '(src_mask)\n', (20261, 20271), False, 'import torch\n'), ((20677, 20703), 'torch.zeros', 'torch.zeros', (['(10, 20, 200)'], {}), '((10, 20, 200))\n', (20688, 20703), False, 'import torch\n'), ((9627, 9650), 'math.log', 'math.log', (['max_timescale'], {}), '(max_timescale)\n', (9635, 9650), False, 'import math\n'), ((21615, 21641), 'torch.LongTensor', 'torch.LongTensor', (['[100001]'], {}), '([100001])\n', (21631, 21641), False, 'import torch\n')] |
import re
import warnings
import numpy as np
import pandas as pd
import scipy
from pandas import DataFrame
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.neighbors import BallTree, KDTree, NearestNeighbors
from sklearn.preprocessing import MultiLabelBinarizer, Normalizer
from tqdm import tqdm
class BaseRecommender(object):
def __init__(self, items_path: str, train_path: str, test_path: str, val_path: str) -> None:
"""Base recommender class
Args:
items_path (str): Path to pickle file containing the items
train_path (str): Path to train data parquet file
test_path (str): Path to test data parquet file
val_path (str): Path to validation data parquet file
"""
items = self._preprocess_items(pd.read_pickle(items_path))
self.items, self.metadata = self._generate_item_features(items)
self.train = self._preprocess_train(pd.read_parquet(train_path))
self.test = pd.read_parquet(test_path) if test_path else None
self.val = pd.read_parquet(val_path) if val_path else None
self.recommendations = DataFrame()
def _preprocess_items(self, items: DataFrame) -> DataFrame:
"""Applies preprocessing to the items
Args:
items (DataFrame): Dataframe containing all items with their metadata
Returns:
DataFrame: Sanitised item metadata
"""
### borrowed from data processing script
sentiment_map = {
'Overwhelmingly Negative' : (0.1, 1.0),
'Very Negative' : (0.1, 0.6),
'Negative' : (0.1, 0.1),
'Mostly Negative' : (0.3, 0.5),
'1 user reviews' : (0.5, 0.002),
'2 user reviews' : (0.5, 0.004),
'3 user reviews' : (0.5, 0.006),
'4 user reviews' : (0.5, 0.008),
'5 user reviews' : (0.5, 0.010),
'6 user reviews' : (0.5, 0.012),
'7 user reviews' : (0.5, 0.014),
'8 user reviews' : (0.5, 0.016),
'9 user reviews' : (0.5, 0.018),
'Mixed' : (0.55, 0.5),
'Mostly Positive' : (0.75, 0.5),
'Positive' : (0.9, 0.1),
'Very Positive' : (0.9, 0.6),
'Overwhelmingly Positive' : (1.0, 1.0),
}
# fill nan with '1 user reviews'
sentiment = items['sentiment'].apply(lambda x: x if isinstance(x, str) else '1 user reviews')
# create new columns based on the sentiment
items['sentiment_rating'] = sentiment.apply(lambda x: sentiment_map[x][0])
items['sentiment_n_reviews'] = sentiment.apply(lambda x: sentiment_map[x][1])
### stop borrow
items["price"] = items["price"].apply(lambda p: np.float32(p) if re.match(r"\d+(?:.\d{2})?", str(p)) else 0)
items["metascore"] = items["metascore"].apply(lambda m: m if m != "NA" else np.nan)
items["developer"].fillna(value='', inplace=True)
items["developer"] = items["developer"].apply(lambda my_str: my_str.lower().split(','))
items["publisher"].fillna(value='', inplace=True)
items["publisher"] = items["publisher"].apply(lambda my_str: my_str.lower().split(','))
items["early_access"] = items["early_access"].apply(lambda x: ["earlyaccess"] if x else [])
items["specs"] = items["specs"].fillna("")
items["specs"] = items["specs"].apply(lambda l: [re.subn(r"[^a-z0-9]", "", my_str.lower())[0] for my_str in l])
items["tags"] = items["tags"].fillna("")
items["tags"] = items["tags"].apply(lambda l: [re.subn(r"[^a-z0-9]", "", my_str.lower())[0] for my_str in l])
items["genres"] = items["genres"].fillna("")
items["genres"] = items["genres"].apply(lambda l: [re.subn(r"[^a-z0-9]", "", my_str.lower())[0] for my_str in l])
return items
def _preprocess_train(self, train: DataFrame) -> DataFrame:
"""Applies preprocessing to the training set
Args:
train (DataFrame): Dataframe containing all training data
Returns:
DataFrame: Sanitised training data
"""
train["normalized_playtime_forever_sum"] = train.apply(lambda x: (np.log(np.array(x["playtime_forever"]) + np.array(x["playtime_2weeks"]) + 2))/np.sum(np.log(np.array(x["playtime_forever"]) + np.array(x["playtime_2weeks"]) + 2)), axis=1)
train["normalized_playtime_forever_max"] = train.apply(lambda x: (np.log(np.array(x["playtime_forever"]) + np.array(x["playtime_2weeks"]) + 2))/np.max(np.log(np.array(x["playtime_forever"]) + np.array(x["playtime_2weeks"]) + 2)), axis=1)
return train
def set_user_data(self, train_path: str, test_path: str, val_path: str) -> None:
"""Read new train, test and val data
Args:
train_path (str): Path to train parquet file
test_path (str): Path to test parquet file
val_path (str): Path to validation parquet file
"""
self.train = pd.read_parquet(train_path)
self.test = pd.read_parquet(test_path)
self.val = pd.read_parquet(val_path)
def _generate_item_features(self, items: DataFrame):
"""Generates the item representations
Args:
items (DataFrame): Dataframe containing only relevant metadata
"""
pass
def evaluate(self, k=10, val=False) -> dict:
"""Evaluate the recommendations
Args:
filename (str, optional): filename for qualitative evaluation. Defaults to None.
qual_eval_folder (str, optional): output folder for qualitative evaluation. Defaults to None.
k (int, optional): Amount of recommendations to consider. Defaults to 10.
val (bool, optional): Wether or not to use test or validation dataset. Defaults to False.
Returns:
dict: a dict containing the hitrate@k, recall@k and nDCG@k
"""
gt = self.val if val else self.test
gt.rename(columns={"item_id": "items"}, inplace=True)
eval = self.recommendations
eval = eval.merge(gt, left_index=True, right_index=True)
results_dict = dict()
# Cap to k recommendations
eval['recommendations'] = eval['recommendations'].apply(lambda rec: rec[:k])
# compute HR@k
eval['HR@k'] = eval.apply(lambda row: int(any(item in row['recommendations'] for item in row['items'])), axis=1)
results_dict[f'HR@{k}'] = eval['HR@k'].mean()
# compute nDCG@k
eval['nDCG@k'] = eval.apply(lambda row: np.sum([int(rec in row['items'])/(np.log2(i+2)) for i, rec in enumerate(row['recommendations'])]), axis=1)
eval['nDCG@k'] = eval.apply(lambda row: row['nDCG@k']/np.sum([1/(np.log2(i+2)) for i in range(min(k, len(row['items'])))]), axis=1)
results_dict[f'nDCG@{k}'] = eval['nDCG@k'].mean()
# compute recall@k
eval['items'] = eval['items'].apply(set)
eval['recommendations'] = eval['recommendations'].apply(set)
eval['recall@k'] = eval.apply(lambda row: len(row['recommendations'].intersection(row['items']))/len(row['items']), axis=1)
results_dict[f'recall@{k}'] = eval['recall@k'].mean()
# compute ideal recall@k
eval['ideal_recall@k'] = eval.apply(lambda row: min(k, len(row['items']))/len(row['items']), axis=1)
results_dict[f'ideal_recall@{k}'] = eval['ideal_recall@k'].mean()
# compute normalised recall@k
eval['nRecall@k'] = eval.apply(lambda row: row['recall@k']/row['ideal_recall@k'], axis=1)
results_dict[f'nRecall@{k}'] = eval['nRecall@k'].mean()
return results_dict
def qualitative_evaluation(self, users:list=[], export_path:str=None) -> DataFrame:
eval_data = self.recommendations if len(users) == 0 else self.recommendations.iloc[users]
new_data = DataFrame({"owned_items": eval_data["item_id"].apply(lambda row: [self.metadata.at[id, "app_name"] for id in row]),
"recommended_items": eval_data["recommendations"].apply(lambda row: [self.metadata.at[id, "app_name"] for id in row])}, index=eval_data.index)
if export_path:
new_data.to_csv(export_path)
return new_data
class ContentBasedRecommender(BaseRecommender):
def __init__(self, items_path: str, train_path: str, test_path: str, val_path: str, sparse: bool = True, tfidf='default', normalize=False, columns:list=["genres", "tags"]) -> None:
"""Content based recommender
Args:
items_path (str): Path to pickle file containing the items
train_path (str): Path to train data parquet file
test_path (str): Path to test data parquet file
val_path (str): Path to validation data parquet file
sparse (bool, optional): If sparse representation should be used. Defaults to True.
tfidf (str, optional): Which tf-idf method to use. Defaults to 'default'.
normalize (bool, optional): If normalization should be used. Defaults to False.
columns (list, optional): Columns to use for feature representation. Defaults to ["genres", "tags"].
"""
self.sparse = sparse
self.normalize = normalize
self.recommendations = None
self.normalizer = Normalizer(copy=False)
self.columns = columns
# Select tf-idf method to use
self.tfidf = None
if tfidf == 'default':
self.tfidf = TfidfTransformer(smooth_idf=False, sublinear_tf=False)
elif tfidf == 'smooth':
self.tfidf = TfidfTransformer(smooth_idf=True, sublinear_tf=False)
elif tfidf == 'sublinear':
self.tfidf = TfidfTransformer(smooth_idf=False, sublinear_tf=True)
elif tfidf == 'smooth_sublinear':
self.tfidf = TfidfTransformer(smooth_idf=True, sublinear_tf=True)
# Select algorithm to use for neighbour computation
algorithm = 'auto'
self.method = NearestNeighbors(n_neighbors=10, algorithm=algorithm, metric='cosine')
super(ContentBasedRecommender, self).__init__(items_path, train_path, test_path, val_path)
def _process_item_features(self, items: DataFrame) -> DataFrame:
"""Processes the item metadata for feature generation
Args:
items (DataFrame): Dataframe containing items metadata
Returns:
DataFrame: Dataframe containing only relevant data for feature generation
"""
return items.filter(self.columns), items.filter([col for col in items.columns if col not in self.columns+["index"]])
def _generate_item_features(self, items: DataFrame) -> DataFrame:
"""Generates feature vector of items and appends to returned DataFrame
Args:
items (DataFrame): dataframe containing the items
Returns:
DataFrame: dataframe with feature vector appended
"""
items, metadata = self._process_item_features(items)
# Combine all features into one column
columns = items.columns.tolist()
for col in columns:
items[col] = items[col].fillna("").apply(set)
items["tags"] = items.apply(lambda x: list(
set.union(*([x[col] for col in columns]))), axis=1)
if "tags" in columns:
columns.remove("tags")
items = items.drop(columns, axis=1)
# Compute one-hot encoded vector of tags
mlb = MultiLabelBinarizer(sparse_output=self.sparse)
if self.sparse:
items = items.join(DataFrame.sparse.from_spmatrix(mlb.fit_transform(items.pop(
"tags")), index=items.index, columns=["tag_" + c for c in mlb.classes_]))
else:
items = items.join(DataFrame(mlb.fit_transform(items.pop(
"tags")), index=items.index, columns=["tag_" + c for c in mlb.classes_]))
return items, metadata
def generate_recommendations(self, amount=10, read_max=None) -> None:
"""Generate recommendations based on user review data
Args:
amount (int, optional): Amount of times to recommend. Defaults to 10.
read_max (int, optional): Max amount of users to read. Defaults to None.
"""
items = self.items
df = self.train.iloc[:read_max].copy(deep=True) if read_max else self.train
# Drop id so only feature vector is left
if self.sparse:
X = scipy.sparse.csr_matrix(items.values)
else:
X = np.array(items.values)
if self.tfidf:
# Use tf-idf
X = self.tfidf.fit_transform(X)
if self.normalize:
X = self.normalizer.fit_transform(X)
# Transformed feature vector back into items
if self.sparse:
items = DataFrame.sparse.from_spmatrix(X)
else:
items = DataFrame(X)
self.method.set_params(n_neighbors=amount)
nbrs = self.method.fit(X)
recommendation_list = []
for index, row in tqdm(df.iterrows()):
# Compute uservector and recommendations for all users
owned_items = items.iloc[row["item_id"],:]
# If user has no items, no usable data is available
assert not owned_items.empty
# Computing average, assuming all user items are indication of interest
user_vector = owned_items.mean()
if self.normalize:
user_vector = self.normalizer.transform([user_vector.to_numpy()])
else:
user_vector = [user_vector.to_numpy()]
# Start overhead of 20%
gen_am = amount//5
recommendations = []
while len(recommendations) < amount:
# calculate amount of items to be generated
gen_am += amount - len(recommendations)
nns = nbrs.kneighbors(user_vector, gen_am, return_distance=True)
# Filter out items in training set
recommendations = list(filter(lambda id: id not in row["item_id"], nns[1][0]))
recommendation_list.append(recommendations[:amount])
df["recommendations"] = recommendation_list
self.recommendations = df
class ImprovedRecommender(ContentBasedRecommender):
def __init__(self, items_path: str, train_path: str, test_path: str, val_path: str, reviews_path: str, sparse: bool = True, dim_red=None, tfidf='default', normalize:bool=False, columns:list=["specs", "publisher", "developer", "tags"], weighting_scheme={}) -> None:
"""Improved content based recommender
Args:
items_path (str): Path to pickle file containing the items
train_path (str): Path to train data parquet file
test_path (str): Path to test data parquet file
val_path (str): Path to validation data parquet file
reviews_path (str): Path to reviews parquet file
sparse (bool, optional): If sparse representation should be used. Defaults to True.
dim_red (Object, optional): Which dimensionality reduction method to use. Defaults to None.
tfidf (str, optional): Which tf-idf method to use. Defaults to 'default'.
use_feedback(bool, optional): If feedback weighing should be used. Defaults to True.
normalize (bool, optional): If normalization should be used. Defaults to False.
columns (list, optional): Columns to use for feature representation. Defaults to ["genres", "tags", "publisher", "early_access"].
"""
self.dim_red = dim_red
self.reviews = pd.read_parquet(reviews_path)
if weighting_scheme:
self.set_weighting_scheme(weighting_scheme)
else:
self.weighting_scheme = None
super(ImprovedRecommender, self).__init__(items_path, train_path, test_path, val_path, sparse, tfidf, normalize, columns)
def set_weighting_scheme(self, weighting_scheme):
assert all([key in weighting_scheme for key in ['playtime', 'sentiment', 'reviews']])
assert isinstance(weighting_scheme['playtime'], bool)
assert isinstance(weighting_scheme['reviews'], bool)
assert weighting_scheme['sentiment'] in ['rating', 'n_reviews', 'mixed', False]
if not (weighting_scheme['sentiment'] or weighting_scheme['reviews'] or weighting_scheme['playtime']):
weighting_scheme = None
self.weighting_scheme = weighting_scheme
def generate_recommendations(self, amount=10, read_max=None, seed=42069, silence=False) -> None:
"""Generate recommendations based on user review data
Args:
amount (int, optional): Amount of times to recommend. Defaults to 10.
read_max (int, optional): Max amount of users to read. Defaults to None.
"""
items = self.items
training_data = self.train.sample(n=read_max, random_state=seed) if read_max else self.train
# training_data = self.train.iloc[:read_max].copy(deep=True) if read_max else self.train
if self.sparse and self.dim_red:
self.sparse = False
warnings.warn("Sparse was set to 'True' but dimensionality reduction is used, using dense matrix representation instead.", RuntimeWarning)
# Drop id so only feature vector is left
if self.sparse:
X = scipy.sparse.csr_matrix(items.values)
else:
X = np.array(items.values)
if self.tfidf:
# Use tf-idf
X = self.tfidf.fit_transform(X)
if not self.sparse:
X = X.todense()
if self.dim_red:
# Use dimensionality reduction
X = self.dim_red.fit_transform(X)
if self.normalize:
X = self.normalizer.fit_transform(X)
# Combine transformed feature vector back into items
items = X
self.method.set_params(n_neighbors=amount)
nbrs = self.method.fit(X)
recommendation_list = []
user_matrix = np.zeros([training_data.shape[0], items.shape[1]])
i = 0
for index, it_ids, time_for, time_2w, n_time_for_sum, n_time_for_max in tqdm(training_data.itertuples(), disable=silence):
# Compute uservector and recommendations for all users
inventory_items = items[it_ids]
user_vector = None
weights = None
# if self.weighting_scheme and inventory_items.shape[0] >= 5:
if self.weighting_scheme:
weight_info = pd.DataFrame({'playtime_weights': n_time_for_max.tolist(), 'weight': 1, 'feedback': False, 'sentiment': 1}, index=it_ids)
if self.weighting_scheme['sentiment'] in ['rating', 'mixed']:
weight_info['sentiment'] *= self.metadata.iloc[it_ids]['sentiment_rating']
if self.weighting_scheme['sentiment'] in ['n_reviews', 'mixed']:
weight_info['sentiment'] *= self.metadata.iloc[it_ids]['sentiment_n_reviews']
if self.weighting_scheme['reviews'] and index in self.reviews.index:
# use explicit feedback
for like, review in zip(self.reviews.at[index, 'recommend'], self.reviews.at[index, 'reviews']):
if review in weight_info.index:
weight_info.at[review, 'weight'] = 1 if like else 0
weight_info.at[review, 'feedback'] = True
if weight_info[weight_info['weight'] == 1].empty: # this ensures that sentiment is used if user has disliked all of its items in the training data
weight_info['feedback'] = False
weight_info['weight'] = 1
# use implicit feedback where explicit is not defined
if self.weighting_scheme['sentiment']:
weight_info['weight'] = np.where(weight_info['feedback'] == False, weight_info['sentiment'], weight_info['weight'])
if self.weighting_scheme['playtime']:
weight_info['weight'] *= np.where(weight_info['feedback'] == False, weight_info['playtime_weights'], np.ones(inventory_items.shape[0]))
weights = weight_info['weight'].to_numpy()
# Compute mean of item features (weighted when feedback is used)
if self.sparse and not self.dim_red:
inventory_items = inventory_items.toarray()
user_vector = np.average(inventory_items, weights=weights, axis=0)
user_matrix[i] = user_vector
i += 1
if self.normalize:
user_matrix = self.normalizer.transform(user_matrix)
for i, user_vector in tqdm(enumerate(user_matrix), disable=silence):
# Start overhead of 20%
gen_am = amount//5
recommendations = []
while len(recommendations) < amount:
# calculate amount of items to be generated
gen_am += amount - len(recommendations)
nns = nbrs.kneighbors([user_vector], gen_am, return_distance=True)
# Filter out items in reviews
recommendations = list(filter(lambda id: id not in training_data.iat[i, 0], nns[1][0]))
recommendation_list.append(recommendations[:amount])
self.recommendations = pd.concat((training_data, DataFrame({'recommendations': recommendation_list}, index=training_data.index)), axis=1)
class PopBasedRecommender(BaseRecommender):
def __init__(self, train_path: str, test_path: str, val_path: str) -> None:
"""Popularity based recommender
Args:
train_path (str): Path to train data parquet file
test_path (str): Path to test data parquet file
val_path (str): Path to validation data parquet file
"""
self.train = pd.read_parquet(train_path)
self.test = pd.read_parquet(test_path)
self.val = pd.read_parquet(val_path)
def generate_recommendations(self, amount:int=10, read_max:int=None) -> None:
"""Generates recommendations based on popularity of the items
Args:
read_max (int, optional): Max amount of users to read. Defaults to None.
"""
df = self.train.iloc[:read_max].copy(deep=True) if read_max else self.train
n_game_pop = df["item_id"].explode()
n_game_pop.dropna(inplace=True)
n_game_pop = n_game_pop.value_counts()
df["recommendations"] = df["item_id"].apply(lambda x: [rec for rec in n_game_pop.index if rec not in x][:amount])
self.recommendations = df
| [
"pandas.DataFrame",
"numpy.average",
"pandas.DataFrame.sparse.from_spmatrix",
"numpy.log2",
"numpy.float32",
"numpy.zeros",
"numpy.ones",
"sklearn.preprocessing.MultiLabelBinarizer",
"scipy.sparse.csr_matrix",
"sklearn.neighbors.NearestNeighbors",
"pandas.read_parquet",
"numpy.array",
"numpy... | [((1151, 1162), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (1160, 1162), False, 'from pandas import DataFrame\n'), ((5028, 5055), 'pandas.read_parquet', 'pd.read_parquet', (['train_path'], {}), '(train_path)\n', (5043, 5055), True, 'import pandas as pd\n'), ((5076, 5102), 'pandas.read_parquet', 'pd.read_parquet', (['test_path'], {}), '(test_path)\n', (5091, 5102), True, 'import pandas as pd\n'), ((5122, 5147), 'pandas.read_parquet', 'pd.read_parquet', (['val_path'], {}), '(val_path)\n', (5137, 5147), True, 'import pandas as pd\n'), ((9394, 9416), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'copy': '(False)'}), '(copy=False)\n', (9404, 9416), False, 'from sklearn.preprocessing import MultiLabelBinarizer, Normalizer\n'), ((10087, 10157), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': '(10)', 'algorithm': 'algorithm', 'metric': '"""cosine"""'}), "(n_neighbors=10, algorithm=algorithm, metric='cosine')\n", (10103, 10157), False, 'from sklearn.neighbors import BallTree, KDTree, NearestNeighbors\n'), ((11572, 11618), 'sklearn.preprocessing.MultiLabelBinarizer', 'MultiLabelBinarizer', ([], {'sparse_output': 'self.sparse'}), '(sparse_output=self.sparse)\n', (11591, 11618), False, 'from sklearn.preprocessing import MultiLabelBinarizer, Normalizer\n'), ((15758, 15787), 'pandas.read_parquet', 'pd.read_parquet', (['reviews_path'], {}), '(reviews_path)\n', (15773, 15787), True, 'import pandas as pd\n'), ((18203, 18253), 'numpy.zeros', 'np.zeros', (['[training_data.shape[0], items.shape[1]]'], {}), '([training_data.shape[0], items.shape[1]])\n', (18211, 18253), True, 'import numpy as np\n'), ((22058, 22085), 'pandas.read_parquet', 'pd.read_parquet', (['train_path'], {}), '(train_path)\n', (22073, 22085), True, 'import pandas as pd\n'), ((22106, 22132), 'pandas.read_parquet', 'pd.read_parquet', (['test_path'], {}), '(test_path)\n', (22121, 22132), True, 'import pandas as pd\n'), ((22152, 22177), 'pandas.read_parquet', 'pd.read_parquet', (['val_path'], {}), '(val_path)\n', (22167, 22177), True, 'import pandas as pd\n'), ((810, 836), 'pandas.read_pickle', 'pd.read_pickle', (['items_path'], {}), '(items_path)\n', (824, 836), True, 'import pandas as pd\n'), ((954, 981), 'pandas.read_parquet', 'pd.read_parquet', (['train_path'], {}), '(train_path)\n', (969, 981), True, 'import pandas as pd\n'), ((1003, 1029), 'pandas.read_parquet', 'pd.read_parquet', (['test_path'], {}), '(test_path)\n', (1018, 1029), True, 'import pandas as pd\n'), ((1072, 1097), 'pandas.read_parquet', 'pd.read_parquet', (['val_path'], {}), '(val_path)\n', (1087, 1097), True, 'import pandas as pd\n'), ((9577, 9631), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {'smooth_idf': '(False)', 'sublinear_tf': '(False)'}), '(smooth_idf=False, sublinear_tf=False)\n', (9593, 9631), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((12562, 12599), 'scipy.sparse.csr_matrix', 'scipy.sparse.csr_matrix', (['items.values'], {}), '(items.values)\n', (12585, 12599), False, 'import scipy\n'), ((12630, 12652), 'numpy.array', 'np.array', (['items.values'], {}), '(items.values)\n', (12638, 12652), True, 'import numpy as np\n'), ((12933, 12966), 'pandas.DataFrame.sparse.from_spmatrix', 'DataFrame.sparse.from_spmatrix', (['X'], {}), '(X)\n', (12963, 12966), False, 'from pandas import DataFrame\n'), ((13001, 13013), 'pandas.DataFrame', 'DataFrame', (['X'], {}), '(X)\n', (13010, 13013), False, 'from pandas import DataFrame\n'), ((17300, 17448), 'warnings.warn', 'warnings.warn', (['"""Sparse was set to \'True\' but dimensionality reduction is used, using dense matrix representation instead."""', 'RuntimeWarning'], {}), '(\n "Sparse was set to \'True\' but dimensionality reduction is used, using dense matrix representation instead."\n , RuntimeWarning)\n', (17313, 17448), False, 'import warnings\n'), ((17529, 17566), 'scipy.sparse.csr_matrix', 'scipy.sparse.csr_matrix', (['items.values'], {}), '(items.values)\n', (17552, 17566), False, 'import scipy\n'), ((17597, 17619), 'numpy.array', 'np.array', (['items.values'], {}), '(items.values)\n', (17605, 17619), True, 'import numpy as np\n'), ((20662, 20714), 'numpy.average', 'np.average', (['inventory_items'], {'weights': 'weights', 'axis': '(0)'}), '(inventory_items, weights=weights, axis=0)\n', (20672, 20714), True, 'import numpy as np\n'), ((9689, 9742), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {'smooth_idf': '(True)', 'sublinear_tf': '(False)'}), '(smooth_idf=True, sublinear_tf=False)\n', (9705, 9742), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((21569, 21647), 'pandas.DataFrame', 'DataFrame', (["{'recommendations': recommendation_list}"], {'index': 'training_data.index'}), "({'recommendations': recommendation_list}, index=training_data.index)\n", (21578, 21647), False, 'from pandas import DataFrame\n'), ((2781, 2794), 'numpy.float32', 'np.float32', (['p'], {}), '(p)\n', (2791, 2794), True, 'import numpy as np\n'), ((9803, 9856), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {'smooth_idf': '(False)', 'sublinear_tf': '(True)'}), '(smooth_idf=False, sublinear_tf=True)\n', (9819, 9856), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((20088, 20183), 'numpy.where', 'np.where', (["(weight_info['feedback'] == False)", "weight_info['sentiment']", "weight_info['weight']"], {}), "(weight_info['feedback'] == False, weight_info['sentiment'],\n weight_info['weight'])\n", (20096, 20183), True, 'import numpy as np\n'), ((9924, 9976), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {'smooth_idf': '(True)', 'sublinear_tf': '(True)'}), '(smooth_idf=True, sublinear_tf=True)\n', (9940, 9976), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((20355, 20388), 'numpy.ones', 'np.ones', (['inventory_items.shape[0]'], {}), '(inventory_items.shape[0])\n', (20362, 20388), True, 'import numpy as np\n'), ((6672, 6686), 'numpy.log2', 'np.log2', (['(i + 2)'], {}), '(i + 2)\n', (6679, 6686), True, 'import numpy as np\n'), ((4241, 4272), 'numpy.array', 'np.array', (["x['playtime_forever']"], {}), "(x['playtime_forever'])\n", (4249, 4272), True, 'import numpy as np\n'), ((4275, 4305), 'numpy.array', 'np.array', (["x['playtime_2weeks']"], {}), "(x['playtime_2weeks'])\n", (4283, 4305), True, 'import numpy as np\n'), ((4487, 4518), 'numpy.array', 'np.array', (["x['playtime_forever']"], {}), "(x['playtime_forever'])\n", (4495, 4518), True, 'import numpy as np\n'), ((4521, 4551), 'numpy.array', 'np.array', (["x['playtime_2weeks']"], {}), "(x['playtime_2weeks'])\n", (4529, 4551), True, 'import numpy as np\n'), ((6818, 6832), 'numpy.log2', 'np.log2', (['(i + 2)'], {}), '(i + 2)\n', (6825, 6832), True, 'import numpy as np\n'), ((4326, 4357), 'numpy.array', 'np.array', (["x['playtime_forever']"], {}), "(x['playtime_forever'])\n", (4334, 4357), True, 'import numpy as np\n'), ((4360, 4390), 'numpy.array', 'np.array', (["x['playtime_2weeks']"], {}), "(x['playtime_2weeks'])\n", (4368, 4390), True, 'import numpy as np\n'), ((4572, 4603), 'numpy.array', 'np.array', (["x['playtime_forever']"], {}), "(x['playtime_forever'])\n", (4580, 4603), True, 'import numpy as np\n'), ((4606, 4636), 'numpy.array', 'np.array', (["x['playtime_2weeks']"], {}), "(x['playtime_2weeks'])\n", (4614, 4636), True, 'import numpy as np\n')] |
import collections
import copy
import dataclasses
import pathlib
import madgrad
import numpy as np
import sklearn
import sklearn.metrics
import sklearn.utils
import torch
from fastprogress.fastprogress import progress_bar
import mabe.data
import mabe.loss
import mabe.model
@dataclasses.dataclass
class TrainingConfig:
split_idx: int
feature_path: pathlib.Path = mabe.config.ROOT_PATH / "features.hdf5"
batch_size: int = 32
num_epochs: int = 40
subsample_length: int = 256
num_embeddings: int = 128
num_context: int = 512
num_ahead: int = 32 * 8
num_ahead_subsampling: int = 32
num_embedder_blocks: int = 3
input_dropout: float = 0.0
head_dropout: float = 0.0
dropout: float = 0.0
clf_loss_scaling: float = 1.0
label_smoothing: float = 0.2
optimizer: str = "SGD"
learning_rate: float = 0.01
weight_decay: float = 1e-4
scheduler: str = "cosine_annealing"
augmentation_random_noise: float = 0.0
use_extra_features: bool = False
extra_task_loss_scaler: float = 0.1
dark_annotator_loss_scaler: float = 0.2
dark_knowledge_loss_scaler: float = 0.5
fade_out_dark_knowledge: bool = False
test_run: bool = False
label_smoothing_task3: bool = True
use_best_task0: bool = False
@dataclasses.dataclass
class TrainingResult:
config: TrainingConfig
losses: list
clf_losses: dict
clf_val_f1s: dict
best_val_f1: dict
best_params: dict
final_params: tuple
test_predictions: dict
test_logits: dict
task3_test_logits: dict
params_by_epoch: list
class Trainer:
config: TrainingConfig
cpc: mabe.model.ConvCPC
logreg: mabe.model.MultiAnnotatorLogisticRegressionHead
scheduler: torch.optim.lr_scheduler._LRScheduler
optimizer: torch.optim.Optimizer
split: mabe.data.CVSplit
data: mabe.data.DataWrapper
device: str
clf_loss: list
dark_clf_loss: torch.nn.modules.loss._Loss
losses: list
clf_losses: dict
dark_losses: dict
clf_val_f1s: dict
best_params: dict
best_val_f1: dict
params_by_epoch: list
def __init__(self, config, data, split, device):
self.config = config
self.data = data
self.split = split
self.device = device
if config.test_run:
self.batches_per_epoch = 2
else:
self.batches_per_epoch = int(
sum(data.sample_lengths) / config.subsample_length / config.batch_size
)
self.num_extra_clf_tasks = (
len(np.unique(data.clf_tasks)) - 2
) # task12 clf and -1 for test data
self.num_features = data.X[0].shape[-1]
self.cpc = mabe.model.ConvCPC(
self.num_features,
config.num_embeddings,
config.num_context,
config.num_ahead,
config.num_ahead_subsampling,
config.subsample_length,
num_embedder_blocks=config.num_embedder_blocks,
input_dropout=config.input_dropout,
head_dropout=config.head_dropout,
dropout=config.dropout,
split_idx=config.split_idx,
num_extra_features=data.num_extra_features,
).to(device)
self.logreg = mabe.model.MultiAnnotatorLogisticRegressionHead(
config.num_context,
data.num_annotators,
data.num_extra_features,
self.num_extra_clf_tasks,
).to(device)
if config.optimizer == "SGD":
self.optimizer = torch.optim.SGD(
list(self.cpc.parameters()) + list(self.logreg.parameters()),
weight_decay=config.weight_decay,
lr=config.learning_rate,
momentum=0.9,
nesterov=True,
)
elif config.optimizer == "MADGRAD":
self.optimizer = madgrad.MADGRAD(
list(self.cpc.parameters()) + list(self.logreg.parameters()),
weight_decay=config.weight_decay,
lr=config.learning_rate,
)
self.clf_loss = []
for task in range(self.num_extra_clf_tasks + 1):
task_indices = np.argwhere(data.clf_tasks_labeled == task).flatten()
task_train_Y = np.concatenate([data.Y_labeled[i] for i in task_indices]).astype(np.int)
# TODO: class_weights only on train samples?
class_weights = sklearn.utils.class_weight.compute_class_weight(
"balanced", classes=np.unique(task_train_Y), y=task_train_Y
)
_, class_counts = np.unique(task_train_Y, return_counts=True)
p_class = class_counts / np.sum(class_counts)
if config.label_smoothing_task3:
self.clf_loss.append(
mabe.loss.CrossEntropyLoss(
weight=torch.from_numpy(class_weights).to(device).float(),
ignore_index=-1,
smooth_eps=config.label_smoothing,
smooth_dist=torch.from_numpy(p_class).to(device).float(),
).to(device)
)
else:
self.clf_loss.append(
mabe.loss.CrossEntropyLoss(
weight=torch.from_numpy(class_weights).to(device).float(),
ignore_index=-1,
).to(device)
)
# TODO: weight?
self.dark_clf_loss = mabe.loss.CrossEntropyLoss().to(device)
if config.scheduler == "cosine_annealing":
self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
self.optimizer, config.num_epochs * self.batches_per_epoch
)
elif config.scheduler == "none":
pass
else:
assert False
self.losses = []
self.clf_losses = collections.defaultdict(list)
self.dark_losses = []
self.clf_val_f1s = collections.defaultdict(list)
self.best_params = {}
self.best_val_f1 = {}
self.best_val_f1_combined = 0.0
self.params_by_epoch = []
for task in range(self.num_extra_clf_tasks + 1):
self.best_params[task] = (
self.get_cpu_params(self.cpc),
self.get_cpu_params(self.logreg),
)
def validation_f1(self, task: int):
with torch.no_grad():
cpc = self.cpc.eval()
logreg = self.logreg.eval()
predictions = []
labels = []
annotators = []
crop_pre, crop_post = cpc.get_crops(self.device)
def add_padding(seq):
return np.concatenate(
(
np.zeros_like(seq)[:crop_pre],
seq,
np.zeros_like(seq)[:crop_post],
)
)
with torch.no_grad():
for idx in self.split.val_indices_labeled:
if self.data.clf_tasks[idx] != task:
continue
y = self.data.Y_labeled[idx]
a = np.array([self.data.annotators_labeled[idx]]).repeat(len(y))
if task > 0:
assert np.all(a == 0) # only first annotator for task 3
x = add_padding(self.data.X_labeled[idx].astype(np.float32))
if self.config.use_extra_features:
x_extra = self.data.X_labeled_extra[idx].astype(np.float32)
x_extra = torch.from_numpy(x_extra).to(self.device, non_blocking=True)
x = torch.transpose(torch.from_numpy(x[None, :, :]), 2, 1).to(
self.device, non_blocking=True
)
x_emb = cpc.embedder(x)
c = cpc.apply_contexter(x_emb, self.device)
logreg_features = c[0].T
l = logreg(logreg_features, x_extra, a, task)
p = torch.argmax(l, dim=-1)
predictions.append(p.cpu().numpy())
labels.append(y)
annotators.append(a)
if len(predictions):
annotators = np.concatenate(annotators).astype(np.int)
predictions = np.concatenate(predictions).astype(np.int)
labels_array = np.concatenate(labels).astype(np.int)
if task == 0:
# validation loss only for first annotator
predictions = predictions[annotators == 0]
labels_array = labels_array[annotators == 0]
return mabe.loss.macro_f1_score(labels_array, predictions, 4)
else:
assert labels_array.max() == 1
assert labels_array.min() == 0
# calculate F1 score for behavior, not macro F1
# return mabe.loss.f1_score_for_label(labels_array, predictions, 2, 1)
return sklearn.metrics.f1_score(labels_array, predictions)
else:
return None
def clf_task_loss(
self,
batch,
X_extra_batch,
Y_batch,
contexts,
annotators_batch,
task,
):
has_train_labels = batch.clf_tasks == task
Y_batch_flat = Y_batch[has_train_labels].flatten().long()
valids = Y_batch_flat >= 0
assert torch.any(valids)
logreg_features = contexts[has_train_labels]
annotators_batch = annotators_batch[has_train_labels]
if self.config.use_extra_features:
X_extra_batch = X_extra_batch[has_train_labels]
num_classes = 4 if task == 0 else 2
clf_batch_loss = self.clf_loss[task](
self.logreg(logreg_features, X_extra_batch, annotators_batch, task).reshape(
-1, num_classes
),
Y_batch_flat,
)
assert np.all(
(annotators_batch.flatten() >= 0)
| ((annotators_batch.flatten() == -1) & (Y_batch_flat.cpu().data.numpy() == -1))
)
return clf_batch_loss
def dark_clf_losses(
self,
contexts,
X_extra_batch,
Y_batch_dark_behaviors,
Y_batch_dark_annotators,
annotators_batch,
):
has_dark_labels = (Y_batch_dark_behaviors >= 0.0).sum(dim=(1, 2, 3)) > 0
logreg_features = contexts[has_dark_labels]
annotators_batch = np.zeros_like(annotators_batch[has_dark_labels.cpu().data.numpy()])
if self.config.use_extra_features:
X_extra_batch = X_extra_batch[has_dark_labels]
dark_behavior_losses_batch = []
num_classes = 2
for task in range(1, self.num_extra_clf_tasks + 1):
behavior = task - 1
behavior_logits = Y_batch_dark_behaviors[has_dark_labels][:, :, behavior].reshape(
-1, num_classes
)
dark_clf_batch_loss = self.dark_clf_loss(
self.logreg(logreg_features, X_extra_batch, annotators_batch, task).reshape(
-1, num_classes
),
behavior_logits,
)
dark_behavior_losses_batch.append(dark_clf_batch_loss)
dark_behavior_loss_batch = (
sum(dark_behavior_losses_batch) * self.config.extra_task_loss_scaler
)
dark_annotator_losses_batch = []
num_classes = 4
task = 0
sum_annotators = 0
for annotator in range(self.data.num_annotators):
annotator_logits = Y_batch_dark_annotators[has_dark_labels][:, :, annotator].reshape(
-1, num_classes
)
dark_clf_batch_loss = self.dark_clf_loss(
self.logreg(logreg_features, X_extra_batch, annotators_batch, task).reshape(
-1, num_classes
),
annotator_logits,
)
scaler = 1 if annotator == 0 else self.config.dark_annotator_loss_scaler
dark_clf_batch_loss *= scaler
sum_annotators += scaler
dark_annotator_losses_batch.append(dark_clf_batch_loss)
dark_annotator_loss_batch = sum(dark_annotator_losses_batch) / sum_annotators
return dark_behavior_loss_batch + dark_annotator_loss_batch
def train_batch(self, epoch):
self.optimizer.zero_grad()
cpc = self.cpc.train()
batch = self.split.get_train_batch(
self.config.batch_size,
random_noise=self.config.augmentation_random_noise,
extra_features=self.config.use_extra_features,
dark_knowledge=self.config.dark_knowledge_loss_scaler > 0.0,
)
(
contexts,
X_extra_batch,
Y_batch,
Y_batch_dark_behaviors,
Y_batch_dark_annotators,
annotators_batch,
batch_loss,
) = cpc(batch, device=self.device, with_loss=True)
self.losses.append(batch_loss.cpu().item())
batch_clf_task_losses = []
for task in range(self.num_extra_clf_tasks + 1):
task_loss = self.clf_task_loss(
batch,
X_extra_batch,
Y_batch,
contexts,
annotators_batch,
task,
)
self.clf_losses[task].append(task_loss.item())
loss_scaler = 1
if task > 0:
loss_scaler = self.config.extra_task_loss_scaler
task_loss *= loss_scaler
batch_clf_task_losses.append(task_loss)
batch_clf_task_loss = sum(batch_clf_task_losses)
if self.config.dark_knowledge_loss_scaler > 0.0:
dark_knowledge_loss = self.dark_clf_losses(
contexts,
X_extra_batch,
Y_batch_dark_behaviors,
Y_batch_dark_annotators,
annotators_batch,
)
self.dark_losses.append(dark_knowledge_loss.item())
else:
dark_knowledge_loss = 0.0
epoch_scaler = 1.0
if self.config.fade_out_dark_knowledge:
epoch_scaler = (self.config.num_epochs / (epoch + 1)) / self.config.num_epochs
batch_loss = (
batch_loss
+ self.config.clf_loss_scaling * batch_clf_task_loss
+ self.config.dark_knowledge_loss_scaler * dark_knowledge_loss * epoch_scaler
)
batch_loss.backward()
self.optimizer.step()
if self.scheduler is not None:
self.scheduler.step()
@staticmethod
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group["lr"]
def running(self, losses):
return np.mean([i for i in losses[-self.batches_per_epoch :] if i is not None])
def log_batch(self, bar):
task3_running_clf_loss = []
for task in range(1, self.num_extra_clf_tasks + 1):
task3_running_clf_loss.append(self.running(self.clf_losses[task]))
task3_running_clf_loss = np.mean(task3_running_clf_loss)
task3_running_val_f1 = []
for task in range(1, self.num_extra_clf_tasks + 1):
task3_running_val_f1.append(self.best_val_f1[task])
task3_running_val_f1 = np.mean([i for i in task3_running_val_f1 if i is not None])
bar.comment = (
f"Train: {self.running(self.losses):.3f} | "
+ f"CLF Train: {self.running(self.clf_losses[0]):.3f} | "
+ f"CLF Train [T3]: {task3_running_clf_loss:.3f} | "
+ f"Dark: {self.running(self.dark_losses):.3f} | "
+ f"Best CLF Val F1: {self.best_val_f1[0]:.3f} | "
+ f"Best CLF Val F1 [T3]: {task3_running_val_f1:.3f} | "
+ f"LR: {self.get_lr(self.optimizer):.4f}"
)
@staticmethod
def get_cpu_params(model):
return copy.deepcopy({k: v.cpu().detach() for k, v in model.state_dict().items()})
def finalize_epoch(self):
for task in range(self.num_extra_clf_tasks + 1):
val_f1 = self.validation_f1(task)
if val_f1 is not None:
if val_f1 > self.best_val_f1[task]:
self.best_params[task] = (
self.get_cpu_params(self.cpc),
self.get_cpu_params(self.logreg),
)
self.best_val_f1[task] = max(val_f1, self.best_val_f1[task])
self.clf_val_f1s[task].append(val_f1)
# mean of validation f1s for all task3 subtasks
"""
val_f1 = np.mean(
[
self.clf_val_f1s[task][-1]
for task in range(1, self.num_extra_clf_tasks + 1)
if self.clf_val_f1s[task][-1] is not None
]
)
if val_f1 > self.best_val_f1_combined:
# no validation data for task 3.3, use mean of other task 3 subtasks
task = 4
self.best_params[task] = (
self.get_cpu_params(self.cpc),
self.get_cpu_params(self.logreg),
)
self.best_val_f1_combined = val_f1
"""
self.params_by_epoch.append(
(
self.get_cpu_params(self.cpc),
self.get_cpu_params(self.logreg),
)
)
def get_result(self) -> TrainingResult:
final_params = (
self.get_cpu_params(self.cpc),
self.get_cpu_params(self.logreg),
)
test_predictions, test_logits, task3_test_logits = mabe.util.predict_test_data(
self.cpc, self.logreg, self.data, self.device, self.config, self.best_params
)
result = TrainingResult(
config=self.config,
losses=self.losses,
clf_losses=self.clf_losses,
clf_val_f1s=self.clf_val_f1s,
best_val_f1=self.best_val_f1,
best_params=self.best_params,
params_by_epoch=self.params_by_epoch,
final_params=final_params,
test_predictions=test_predictions,
test_logits=test_logits,
task3_test_logits=task3_test_logits,
)
return result
def train_model(self) -> TrainingResult:
for task in range(self.num_extra_clf_tasks + 1):
val_f1 = self.validation_f1(task)
self.best_val_f1[task] = val_f1
self.clf_val_f1s[task].append(val_f1)
# use combined bar for epochs and batches
bar = progress_bar(range(self.config.num_epochs * self.batches_per_epoch))
bar_iter = iter(bar)
for i_epoch in range(self.config.num_epochs):
for _ in range(self.batches_per_epoch):
next(bar_iter)
self.train_batch(i_epoch)
self.log_batch(bar)
self.finalize_epoch()
return self.get_result()
| [
"numpy.zeros_like",
"numpy.sum",
"numpy.concatenate",
"torch.argmax",
"torch.any",
"numpy.all",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"collections.defaultdict",
"sklearn.metrics.f1_score",
"numpy.mean",
"numpy.array",
"numpy.argwhere",
"torch.no_grad",
"numpy.unique",
"torch.from... | [((5868, 5897), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (5891, 5897), False, 'import collections\n'), ((5955, 5984), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (5978, 5984), False, 'import collections\n'), ((9423, 9440), 'torch.any', 'torch.any', (['valids'], {}), '(valids)\n', (9432, 9440), False, 'import torch\n'), ((14785, 14856), 'numpy.mean', 'np.mean', (['[i for i in losses[-self.batches_per_epoch:] if i is not None]'], {}), '([i for i in losses[-self.batches_per_epoch:] if i is not None])\n', (14792, 14856), True, 'import numpy as np\n'), ((15097, 15128), 'numpy.mean', 'np.mean', (['task3_running_clf_loss'], {}), '(task3_running_clf_loss)\n', (15104, 15128), True, 'import numpy as np\n'), ((15319, 15378), 'numpy.mean', 'np.mean', (['[i for i in task3_running_val_f1 if i is not None]'], {}), '([i for i in task3_running_val_f1 if i is not None])\n', (15326, 15378), True, 'import numpy as np\n'), ((4582, 4625), 'numpy.unique', 'np.unique', (['task_train_Y'], {'return_counts': '(True)'}), '(task_train_Y, return_counts=True)\n', (4591, 4625), True, 'import numpy as np\n'), ((5586, 5693), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'torch.optim.lr_scheduler.CosineAnnealingLR', (['self.optimizer', '(config.num_epochs * self.batches_per_epoch)'], {}), '(self.optimizer, config.\n num_epochs * self.batches_per_epoch)\n', (5628, 5693), False, 'import torch\n'), ((6381, 6396), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6394, 6396), False, 'import torch\n'), ((2548, 2573), 'numpy.unique', 'np.unique', (['data.clf_tasks'], {}), '(data.clf_tasks)\n', (2557, 2573), True, 'import numpy as np\n'), ((4663, 4683), 'numpy.sum', 'np.sum', (['class_counts'], {}), '(class_counts)\n', (4669, 4683), True, 'import numpy as np\n'), ((6910, 6925), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6923, 6925), False, 'import torch\n'), ((9007, 9058), 'sklearn.metrics.f1_score', 'sklearn.metrics.f1_score', (['labels_array', 'predictions'], {}), '(labels_array, predictions)\n', (9031, 9058), False, 'import sklearn\n'), ((4173, 4216), 'numpy.argwhere', 'np.argwhere', (['(data.clf_tasks_labeled == task)'], {}), '(data.clf_tasks_labeled == task)\n', (4184, 4216), True, 'import numpy as np\n'), ((4254, 4311), 'numpy.concatenate', 'np.concatenate', (['[data.Y_labeled[i] for i in task_indices]'], {}), '([data.Y_labeled[i] for i in task_indices])\n', (4268, 4311), True, 'import numpy as np\n'), ((4497, 4520), 'numpy.unique', 'np.unique', (['task_train_Y'], {}), '(task_train_Y)\n', (4506, 4520), True, 'import numpy as np\n'), ((8046, 8069), 'torch.argmax', 'torch.argmax', (['l'], {'dim': '(-1)'}), '(l, dim=-1)\n', (8058, 8069), False, 'import torch\n'), ((8260, 8286), 'numpy.concatenate', 'np.concatenate', (['annotators'], {}), '(annotators)\n', (8274, 8286), True, 'import numpy as np\n'), ((8328, 8355), 'numpy.concatenate', 'np.concatenate', (['predictions'], {}), '(predictions)\n', (8342, 8355), True, 'import numpy as np\n'), ((8398, 8420), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (8412, 8420), True, 'import numpy as np\n'), ((7275, 7289), 'numpy.all', 'np.all', (['(a == 0)'], {}), '(a == 0)\n', (7281, 7289), True, 'import numpy as np\n'), ((6736, 6754), 'numpy.zeros_like', 'np.zeros_like', (['seq'], {}), '(seq)\n', (6749, 6754), True, 'import numpy as np\n'), ((6820, 6838), 'numpy.zeros_like', 'np.zeros_like', (['seq'], {}), '(seq)\n', (6833, 6838), True, 'import numpy as np\n'), ((7150, 7195), 'numpy.array', 'np.array', (['[self.data.annotators_labeled[idx]]'], {}), '([self.data.annotators_labeled[idx]])\n', (7158, 7195), True, 'import numpy as np\n'), ((7579, 7604), 'torch.from_numpy', 'torch.from_numpy', (['x_extra'], {}), '(x_extra)\n', (7595, 7604), False, 'import torch\n'), ((7681, 7712), 'torch.from_numpy', 'torch.from_numpy', (['x[None, :, :]'], {}), '(x[None, :, :])\n', (7697, 7712), False, 'import torch\n'), ((4847, 4878), 'torch.from_numpy', 'torch.from_numpy', (['class_weights'], {}), '(class_weights)\n', (4863, 4878), False, 'import torch\n'), ((5035, 5060), 'torch.from_numpy', 'torch.from_numpy', (['p_class'], {}), '(p_class)\n', (5051, 5060), False, 'import torch\n'), ((5267, 5298), 'torch.from_numpy', 'torch.from_numpy', (['class_weights'], {}), '(class_weights)\n', (5283, 5298), False, 'import torch\n')] |
# Project Recognission two level classification
import time
from statistics import mean
import numpy as np
import pandas as pd
import sklearn.metrics as skm
from sklearn import preprocessing, svm
from sklearn.decomposition import FastICA, PCA
from sklearn.ensemble import BaggingClassifier, RandomForestClassifier, VotingClassifier
from sklearn.manifold import Isomap
from sklearn.cluster import KMeans
from sklearn.manifold import LocallyLinearEmbedding
from sklearn.model_selection import StratifiedKFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import OneHotEncoder
from sklearn.tree import DecisionTreeClassifier
from preprocess import read_and_preprocess
from tfModel import tfModel
## OPTIONS ###
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
useTF = True
usePCA = False
useICA = False
useIsomap = False
useLLE = False
useClustering = False
standardiseClusterLabels = False
nEpochs = 10
nComponents = 14
kNeighbors = 15
nClusters = 6
linkType = 'ward'
pca1 = PCA(n_components=nComponents)
pca2 = PCA(n_components=nComponents)
ica1 = FastICA(n_components=nComponents, max_iter=800, random_state=0)
ica2 = FastICA(n_components=nComponents, max_iter=800, random_state=0)
iso1 = Isomap(n_neighbors=kNeighbors, n_components=nComponents)
iso2 = Isomap(n_neighbors=kNeighbors, n_components=nComponents)
lle1 = LocallyLinearEmbedding(n_components=nComponents, random_state=0)
lle2 = LocallyLinearEmbedding(n_components=nComponents, random_state=0)
print(f"Using tensorflow : {useTF}")
print(f"Using PCA : {usePCA}")
print(f"Using ICA : {useICA}")
print(f"Using Isomap : {useIsomap}")
print(f"Using LLE : {useLLE}")
print(f"Using Clustering : {useClustering}")
if usePCA or useICA or useIsomap or useLLE:
print(f"Number of components {nComponents}")
print("\n")
### Read data ###
X, y = read_and_preprocess(True)
kFolds = 4
### First Classification ###
# Train test split
start = time.perf_counter()
skf = StratifiedKFold(n_splits=kFolds, random_state=0, shuffle=True)
iterationNumber = 0
foldAccuracy = []
for train_index, test_index in skf.split(X, X['playerID']):
# Partition sets
X_train1 = X.iloc[train_index]
y_train1 = y.iloc[train_index]
X_test1 = X.iloc[test_index]
y_test1 = y.iloc[test_index]
iterationNumber += 1
x_train1PID = X_train1["playerID"]
X_train1 = X_train1.drop(labels="playerID", axis=1)
x_test1PID = X_test1["playerID"]
X_test1 = X_test1.drop(labels="playerID", axis=1)
# Standardise
scaler1 = preprocessing.StandardScaler().fit(X_train1)
X_train1 = pd.DataFrame(scaler1.transform(X_train1.values), columns=X_train1.columns, index=X_train1.index)
X_test1 = pd.DataFrame(scaler1.transform(X_test1.values), columns=X_test1.columns, index=X_test1.index)
### First Classification
model1 = KNeighborsClassifier(n_neighbors=7).fit(X_train1, y_train1)
y_predGameTrain = model1.predict(X_train1)
y_predGameTest = model1.predict(X_test1)
### Second classification ###
X_train1["playerID"] = x_train1PID
# Group swipes by game
gb = X_train1.groupby(y_predGameTrain)
groupedByGame = [gb.get_group(x) for x in gb.groups]
trainGame1 = groupedByGame[0]
trainGame2 = groupedByGame[1]
# Game 1: Training
X_train21 = trainGame1.loc[:, trainGame1.columns != "playerID"]
y_train21 = trainGame1["playerID"]
scaler21 = preprocessing.StandardScaler().fit(X_train21)
X_train21 = pd.DataFrame(scaler21.transform(X_train21.values), columns=X_train21.columns, index=X_train21.index)
# Models
tree = DecisionTreeClassifier(criterion='entropy', random_state=0)
knn = KNeighborsClassifier(n_neighbors=11)
svc = svm.SVC(gamma=1, kernel='poly')
svc2 = svm.SVC(gamma=0.5, kernel='poly')
svc3 = svm.SVC(gamma=1.5, kernel='poly')
svc4 = svm.SVC(gamma=0.4, kernel='poly')
svc5 = svm.SVC(gamma=2.5, kernel='poly')
mlp = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(80, 80, 80), max_iter=300, random_state=0)
mlp2 = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(80, 80, 80, 80), max_iter=300, random_state=0)
mlp3 = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(80, 80), max_iter=300, random_state=0)
mlp4 = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(80, 80, 80, 80, 80), max_iter=300,
random_state=0)
forest = RandomForestClassifier(n_estimators=75, max_features=8, random_state=0, n_jobs=1)
bagging = BaggingClassifier(max_features=8, n_estimators=40, n_jobs=1, random_state=0)
model21 = VotingClassifier(
estimators=[('mlp', mlp), ('mlp2', mlp2), ('mlp3', mlp3), ('mlp4', mlp4), ('forest', forest), ('svc', svc)],
voting='hard')
if useClustering:
# clusteringTrain1 = KMeans(n_clusters=nClusters, n_init=3, random_state=0)
clusteringTrain1 = Birch(n_clusters=nClusters)
labels = clusteringTrain1.fit_predict(X_train21)
if standardiseClusterLabels:
labels = preprocessing.StandardScaler().fit_transform(labels.reshape(-1, 1))
X_train21['ClusterLabel'] = labels
if useTF:
model21 = tfModel(len(set(y_train21)))
if usePCA:
X_train21 = pca1.fit_transform(X_train21)
if useICA:
X_train21 = ica1.fit_transform(X_train21)
if useIsomap:
X_train21 = iso1.fit_transform(X_train21)
if useLLE:
X_train21 = lle1.fit_transform(X_train21)
# Training
if useTF:
if not (usePCA or useICA or useIsomap or useLLE):
X_train21 = X_train21.to_numpy()
y_train21CPY = y_train21
y_train21 = y_train21.to_numpy()
LE1 = preprocessing.LabelEncoder()
LE1.fit(y_train21)
OneHot1 = OneHotEncoder()
y_train21 = OneHot1.fit_transform(y_train21.reshape(-1, 1)).toarray()
model21.fit(X_train21, y_train21, validation_split=0.1, epochs=nEpochs)
if (iterationNumber == 1):
print(model21.summary())
else:
model21.fit(X_train21, y_train21)
if (iterationNumber == 1):
print(model21)
# Game 2: Training
X_train22 = trainGame2.loc[:, trainGame2.columns != "playerID"]
y_train22 = trainGame2["playerID"]
scaler22 = preprocessing.StandardScaler().fit(X_train22)
X_train22 = pd.DataFrame(scaler22.transform(X_train22.values), columns=X_train22.columns, index=X_train22.index)
# Models
tree = DecisionTreeClassifier(criterion='entropy', random_state=0)
knn = KNeighborsClassifier(n_neighbors=11)
svc = svm.SVC(gamma=1, kernel='poly')
svc2 = svm.SVC(gamma=0.5, kernel='poly')
svc3 = svm.SVC(gamma=1.5, kernel='poly')
svc4 = svm.SVC(gamma=0.4, kernel='poly')
svc5 = svm.SVC(gamma=2.5, kernel='poly')
mlp = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(80, 80, 80), max_iter=300, random_state=0)
mlp2 = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(80, 80, 80, 80), max_iter=300, random_state=0)
mlp3 = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(80, 80), max_iter=300, random_state=0)
mlp4 = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(80, 80, 80, 80, 80), max_iter=300,
random_state=0)
forest = RandomForestClassifier(n_estimators=75, max_features=8, random_state=0, n_jobs=1)
bagging = BaggingClassifier(max_features=8, n_estimators=40, n_jobs=1, random_state=0)
model22 = VotingClassifier(
estimators=[('mlp', mlp), ('mlp2', mlp2), ('mlp3', mlp3), ('mlp4', mlp4), ('forest', forest), ('svc', svc)],
voting='hard')
if useClustering:
clusteringTrain2 = KMeans(n_clusters=nClusters, n_init=3, random_state=0)
labels = clusteringTrain2.fit_predict(X_train22)
if standardiseClusterLabels:
labels = preprocessing.StandardScaler().fit_transform(labels.reshape(-1, 1))
X_train22['ClusterLabel'] = labels
if useTF:
model22 = tfModel(len(set(y_train22)))
if usePCA:
X_train22 = pca2.fit_transform(X_train22)
if useICA:
X_train22 = ica2.fit_transform(X_train22)
if useIsomap:
X_train22 = iso2.fit_transform(X_train22)
if useLLE:
X_train22 = lle2.fit_transform(X_train22)
# Training
if useTF:
if not (usePCA or useICA or useIsomap or useLLE):
X_train22 = X_train22.to_numpy()
y_train22 = y_train22.to_numpy()
LE2 = preprocessing.LabelEncoder()
LE2.fit(y_train22)
OneHot2 = OneHotEncoder()
y_train22 = OneHot2.fit_transform(y_train22.reshape(-1, 1)).toarray()
model22.fit(X_train22, y_train22, validation_split=0.1, epochs=nEpochs)
if (iterationNumber == 1):
print(model22.summary())
else:
model22.fit(X_train22, y_train22)
if (iterationNumber == 1):
print(model22)
# Second classification test
X_test1["playerID"] = x_test1PID
gb = X_test1.groupby(y_predGameTest)
groupedByGame = [gb.get_group(x) for x in gb.groups]
testGame1 = groupedByGame[0]
testGame2 = groupedByGame[1]
# Game 1: Test
X_test21 = testGame1.loc[:, trainGame1.columns != "playerID"]
y_test21 = testGame1["playerID"]
X_test21 = pd.DataFrame(scaler21.transform(X_test21.values), columns=X_test21.columns, index=X_test21.index)
if useClustering:
clusteringTest1 = KMeans(n_clusters=nClusters, n_init=3, random_state=0)
labels = clusteringTest1.fit_predict(X_test21)
if standardiseClusterLabels:
labels = preprocessing.StandardScaler().fit_transform(labels.reshape(-1, 1))
X_test21['ClusterLabel'] = labels
if usePCA:
X_test21 = pca1.transform(X_test21)
if useICA:
X_test21 = ica1.transform(X_test21)
if useIsomap:
X_test21 = iso1.transform(X_test21)
if useLLE:
X_test21 = lle1.transform(X_test21)
if useTF:
y_test21CPY = y_test21
if not (usePCA or useICA or useIsomap or useLLE):
X_test21 = X_test21.to_numpy()
y_test21 = y_test21.to_numpy()
y_pred21 = model21.predict(X_test21)
y_pred21 = y_pred21.argmax(axis=-1)
y_pred21 = LE1.inverse_transform(y_pred21)
else:
y_pred21 = model21.predict(X_test21)
testing_accuracy1 = skm.accuracy_score(y_test21, y_pred21)
print(f"Testing accuracy 1 = {testing_accuracy1}")
# Game 2: Test
X_test22 = testGame2.loc[:, trainGame2.columns != "playerID"]
y_test22 = testGame2["playerID"]
X_test22 = pd.DataFrame(scaler22.transform(X_test22.values), columns=X_test22.columns, index=X_test22.index)
if useClustering:
clusteringTest2 = KMeans(n_clusters=nClusters, n_init=3, random_state=0)
labels = clusteringTest2.fit_predict(X_test22)
if standardiseClusterLabels:
labels = preprocessing.StandardScaler().fit_transform(labels.reshape(-1, 1))
X_test22['ClusterLabel'] = labels
if usePCA:
X_test22 = pca2.transform(X_test22)
if useICA:
X_test22 = ica2.transform(X_test22)
if useIsomap:
X_test22 = iso2.transform(X_test22)
if useLLE:
X_test22 = lle2.transform(X_test22)
if useTF:
if not (usePCA or useICA or useIsomap or useLLE):
X_test22 = X_test22.to_numpy()
y_test22 = y_test22.to_numpy()
y_pred22 = model22.predict(X_test22)
y_pred22 = y_pred22.argmax(axis=-1)
y_pred22 = LE2.inverse_transform(y_pred22)
else:
y_pred22 = model22.predict(X_test22)
testing_accuracy2 = skm.accuracy_score(y_test22, y_pred22)
print(f"Testing accuracy 2 = {testing_accuracy2}")
# Calculate fold accuracy
w = [len(y_test21), len(y_test22)]
acc = [testing_accuracy1, testing_accuracy2]
weighted_acc = np.average(a=acc, weights=w)
print(f"Iteration {iterationNumber}: Total accuracy = {weighted_acc}\n")
foldAccuracy.append(weighted_acc)
print(f"{kFolds}-fold accuracy = {mean(foldAccuracy):.8f}")
# Execution Time
end = time.perf_counter()
print(f"\nExecution time = {end - start:.2f} second(s)")
| [
"sklearn.preprocessing.StandardScaler",
"sklearn.metrics.accuracy_score",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.manifold.LocallyLinearEmbedding",
"sklearn.ensemble.VotingClassifier",
"sklearn.neural_network.MLPClassifier",
"sklearn.svm.SVC",
"pandas.set_option",
"sklearn.cluster.KMeans",
... | [((815, 857), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (828, 857), True, 'import pandas as pd\n'), ((859, 898), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None'], {}), "('display.max_rows', None)\n", (872, 898), True, 'import pandas as pd\n'), ((1128, 1157), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'nComponents'}), '(n_components=nComponents)\n', (1131, 1157), False, 'from sklearn.decomposition import FastICA, PCA\n'), ((1166, 1195), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'nComponents'}), '(n_components=nComponents)\n', (1169, 1195), False, 'from sklearn.decomposition import FastICA, PCA\n'), ((1204, 1267), 'sklearn.decomposition.FastICA', 'FastICA', ([], {'n_components': 'nComponents', 'max_iter': '(800)', 'random_state': '(0)'}), '(n_components=nComponents, max_iter=800, random_state=0)\n', (1211, 1267), False, 'from sklearn.decomposition import FastICA, PCA\n'), ((1276, 1339), 'sklearn.decomposition.FastICA', 'FastICA', ([], {'n_components': 'nComponents', 'max_iter': '(800)', 'random_state': '(0)'}), '(n_components=nComponents, max_iter=800, random_state=0)\n', (1283, 1339), False, 'from sklearn.decomposition import FastICA, PCA\n'), ((1348, 1404), 'sklearn.manifold.Isomap', 'Isomap', ([], {'n_neighbors': 'kNeighbors', 'n_components': 'nComponents'}), '(n_neighbors=kNeighbors, n_components=nComponents)\n', (1354, 1404), False, 'from sklearn.manifold import Isomap\n'), ((1413, 1469), 'sklearn.manifold.Isomap', 'Isomap', ([], {'n_neighbors': 'kNeighbors', 'n_components': 'nComponents'}), '(n_neighbors=kNeighbors, n_components=nComponents)\n', (1419, 1469), False, 'from sklearn.manifold import Isomap\n'), ((1478, 1542), 'sklearn.manifold.LocallyLinearEmbedding', 'LocallyLinearEmbedding', ([], {'n_components': 'nComponents', 'random_state': '(0)'}), '(n_components=nComponents, random_state=0)\n', (1500, 1542), False, 'from sklearn.manifold import LocallyLinearEmbedding\n'), ((1551, 1615), 'sklearn.manifold.LocallyLinearEmbedding', 'LocallyLinearEmbedding', ([], {'n_components': 'nComponents', 'random_state': '(0)'}), '(n_components=nComponents, random_state=0)\n', (1573, 1615), False, 'from sklearn.manifold import LocallyLinearEmbedding\n'), ((1971, 1996), 'preprocess.read_and_preprocess', 'read_and_preprocess', (['(True)'], {}), '(True)\n', (1990, 1996), False, 'from preprocess import read_and_preprocess\n'), ((2070, 2089), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2087, 2089), False, 'import time\n'), ((2097, 2159), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'kFolds', 'random_state': '(0)', 'shuffle': '(True)'}), '(n_splits=kFolds, random_state=0, shuffle=True)\n', (2112, 2159), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((12472, 12491), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (12489, 12491), False, 'import time\n'), ((3770, 3829), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'criterion': '"""entropy"""', 'random_state': '(0)'}), "(criterion='entropy', random_state=0)\n", (3792, 3829), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((3841, 3877), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(11)'}), '(n_neighbors=11)\n', (3861, 3877), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((3889, 3920), 'sklearn.svm.SVC', 'svm.SVC', ([], {'gamma': '(1)', 'kernel': '"""poly"""'}), "(gamma=1, kernel='poly')\n", (3896, 3920), False, 'from sklearn import preprocessing, svm\n'), ((3933, 3966), 'sklearn.svm.SVC', 'svm.SVC', ([], {'gamma': '(0.5)', 'kernel': '"""poly"""'}), "(gamma=0.5, kernel='poly')\n", (3940, 3966), False, 'from sklearn import preprocessing, svm\n'), ((3979, 4012), 'sklearn.svm.SVC', 'svm.SVC', ([], {'gamma': '(1.5)', 'kernel': '"""poly"""'}), "(gamma=1.5, kernel='poly')\n", (3986, 4012), False, 'from sklearn import preprocessing, svm\n'), ((4025, 4058), 'sklearn.svm.SVC', 'svm.SVC', ([], {'gamma': '(0.4)', 'kernel': '"""poly"""'}), "(gamma=0.4, kernel='poly')\n", (4032, 4058), False, 'from sklearn import preprocessing, svm\n'), ((4071, 4104), 'sklearn.svm.SVC', 'svm.SVC', ([], {'gamma': '(2.5)', 'kernel': '"""poly"""'}), "(gamma=2.5, kernel='poly')\n", (4078, 4104), False, 'from sklearn import preprocessing, svm\n'), ((4116, 4224), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': '"""adam"""', 'alpha': '(1e-05)', 'hidden_layer_sizes': '(80, 80, 80)', 'max_iter': '(300)', 'random_state': '(0)'}), "(solver='adam', alpha=1e-05, hidden_layer_sizes=(80, 80, 80),\n max_iter=300, random_state=0)\n", (4129, 4224), False, 'from sklearn.neural_network import MLPClassifier\n'), ((4232, 4345), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': '"""adam"""', 'alpha': '(1e-05)', 'hidden_layer_sizes': '(80, 80, 80, 80)', 'max_iter': '(300)', 'random_state': '(0)'}), "(solver='adam', alpha=1e-05, hidden_layer_sizes=(80, 80, 80, \n 80), max_iter=300, random_state=0)\n", (4245, 4345), False, 'from sklearn.neural_network import MLPClassifier\n'), ((4352, 4456), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': '"""adam"""', 'alpha': '(1e-05)', 'hidden_layer_sizes': '(80, 80)', 'max_iter': '(300)', 'random_state': '(0)'}), "(solver='adam', alpha=1e-05, hidden_layer_sizes=(80, 80),\n max_iter=300, random_state=0)\n", (4365, 4456), False, 'from sklearn.neural_network import MLPClassifier\n'), ((4464, 4581), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': '"""adam"""', 'alpha': '(1e-05)', 'hidden_layer_sizes': '(80, 80, 80, 80, 80)', 'max_iter': '(300)', 'random_state': '(0)'}), "(solver='adam', alpha=1e-05, hidden_layer_sizes=(80, 80, 80, \n 80, 80), max_iter=300, random_state=0)\n", (4477, 4581), False, 'from sklearn.neural_network import MLPClassifier\n'), ((4616, 4701), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(75)', 'max_features': '(8)', 'random_state': '(0)', 'n_jobs': '(1)'}), '(n_estimators=75, max_features=8, random_state=0,\n n_jobs=1)\n', (4638, 4701), False, 'from sklearn.ensemble import BaggingClassifier, RandomForestClassifier, VotingClassifier\n'), ((4713, 4789), 'sklearn.ensemble.BaggingClassifier', 'BaggingClassifier', ([], {'max_features': '(8)', 'n_estimators': '(40)', 'n_jobs': '(1)', 'random_state': '(0)'}), '(max_features=8, n_estimators=40, n_jobs=1, random_state=0)\n', (4730, 4789), False, 'from sklearn.ensemble import BaggingClassifier, RandomForestClassifier, VotingClassifier\n'), ((4805, 4949), 'sklearn.ensemble.VotingClassifier', 'VotingClassifier', ([], {'estimators': "[('mlp', mlp), ('mlp2', mlp2), ('mlp3', mlp3), ('mlp4', mlp4), ('forest',\n forest), ('svc', svc)]", 'voting': '"""hard"""'}), "(estimators=[('mlp', mlp), ('mlp2', mlp2), ('mlp3', mlp3),\n ('mlp4', mlp4), ('forest', forest), ('svc', svc)], voting='hard')\n", (4821, 4949), False, 'from sklearn.ensemble import BaggingClassifier, RandomForestClassifier, VotingClassifier\n'), ((6713, 6772), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'criterion': '"""entropy"""', 'random_state': '(0)'}), "(criterion='entropy', random_state=0)\n", (6735, 6772), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((6784, 6820), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(11)'}), '(n_neighbors=11)\n', (6804, 6820), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((6832, 6863), 'sklearn.svm.SVC', 'svm.SVC', ([], {'gamma': '(1)', 'kernel': '"""poly"""'}), "(gamma=1, kernel='poly')\n", (6839, 6863), False, 'from sklearn import preprocessing, svm\n'), ((6876, 6909), 'sklearn.svm.SVC', 'svm.SVC', ([], {'gamma': '(0.5)', 'kernel': '"""poly"""'}), "(gamma=0.5, kernel='poly')\n", (6883, 6909), False, 'from sklearn import preprocessing, svm\n'), ((6922, 6955), 'sklearn.svm.SVC', 'svm.SVC', ([], {'gamma': '(1.5)', 'kernel': '"""poly"""'}), "(gamma=1.5, kernel='poly')\n", (6929, 6955), False, 'from sklearn import preprocessing, svm\n'), ((6968, 7001), 'sklearn.svm.SVC', 'svm.SVC', ([], {'gamma': '(0.4)', 'kernel': '"""poly"""'}), "(gamma=0.4, kernel='poly')\n", (6975, 7001), False, 'from sklearn import preprocessing, svm\n'), ((7014, 7047), 'sklearn.svm.SVC', 'svm.SVC', ([], {'gamma': '(2.5)', 'kernel': '"""poly"""'}), "(gamma=2.5, kernel='poly')\n", (7021, 7047), False, 'from sklearn import preprocessing, svm\n'), ((7059, 7167), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': '"""adam"""', 'alpha': '(1e-05)', 'hidden_layer_sizes': '(80, 80, 80)', 'max_iter': '(300)', 'random_state': '(0)'}), "(solver='adam', alpha=1e-05, hidden_layer_sizes=(80, 80, 80),\n max_iter=300, random_state=0)\n", (7072, 7167), False, 'from sklearn.neural_network import MLPClassifier\n'), ((7175, 7288), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': '"""adam"""', 'alpha': '(1e-05)', 'hidden_layer_sizes': '(80, 80, 80, 80)', 'max_iter': '(300)', 'random_state': '(0)'}), "(solver='adam', alpha=1e-05, hidden_layer_sizes=(80, 80, 80, \n 80), max_iter=300, random_state=0)\n", (7188, 7288), False, 'from sklearn.neural_network import MLPClassifier\n'), ((7295, 7399), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': '"""adam"""', 'alpha': '(1e-05)', 'hidden_layer_sizes': '(80, 80)', 'max_iter': '(300)', 'random_state': '(0)'}), "(solver='adam', alpha=1e-05, hidden_layer_sizes=(80, 80),\n max_iter=300, random_state=0)\n", (7308, 7399), False, 'from sklearn.neural_network import MLPClassifier\n'), ((7407, 7524), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': '"""adam"""', 'alpha': '(1e-05)', 'hidden_layer_sizes': '(80, 80, 80, 80, 80)', 'max_iter': '(300)', 'random_state': '(0)'}), "(solver='adam', alpha=1e-05, hidden_layer_sizes=(80, 80, 80, \n 80, 80), max_iter=300, random_state=0)\n", (7420, 7524), False, 'from sklearn.neural_network import MLPClassifier\n'), ((7559, 7644), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(75)', 'max_features': '(8)', 'random_state': '(0)', 'n_jobs': '(1)'}), '(n_estimators=75, max_features=8, random_state=0,\n n_jobs=1)\n', (7581, 7644), False, 'from sklearn.ensemble import BaggingClassifier, RandomForestClassifier, VotingClassifier\n'), ((7656, 7732), 'sklearn.ensemble.BaggingClassifier', 'BaggingClassifier', ([], {'max_features': '(8)', 'n_estimators': '(40)', 'n_jobs': '(1)', 'random_state': '(0)'}), '(max_features=8, n_estimators=40, n_jobs=1, random_state=0)\n', (7673, 7732), False, 'from sklearn.ensemble import BaggingClassifier, RandomForestClassifier, VotingClassifier\n'), ((7748, 7892), 'sklearn.ensemble.VotingClassifier', 'VotingClassifier', ([], {'estimators': "[('mlp', mlp), ('mlp2', mlp2), ('mlp3', mlp3), ('mlp4', mlp4), ('forest',\n forest), ('svc', svc)]", 'voting': '"""hard"""'}), "(estimators=[('mlp', mlp), ('mlp2', mlp2), ('mlp3', mlp3),\n ('mlp4', mlp4), ('forest', forest), ('svc', svc)], voting='hard')\n", (7764, 7892), False, 'from sklearn.ensemble import BaggingClassifier, RandomForestClassifier, VotingClassifier\n'), ((10702, 10740), 'sklearn.metrics.accuracy_score', 'skm.accuracy_score', (['y_test21', 'y_pred21'], {}), '(y_test21, y_pred21)\n', (10720, 10740), True, 'import sklearn.metrics as skm\n'), ((12000, 12038), 'sklearn.metrics.accuracy_score', 'skm.accuracy_score', (['y_test22', 'y_pred22'], {}), '(y_test22, y_pred22)\n', (12018, 12038), True, 'import sklearn.metrics as skm\n'), ((12238, 12266), 'numpy.average', 'np.average', ([], {'a': 'acc', 'weights': 'w'}), '(a=acc, weights=w)\n', (12248, 12266), True, 'import numpy as np\n'), ((5924, 5952), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (5950, 5952), False, 'from sklearn import preprocessing, svm\n'), ((6000, 6015), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (6013, 6015), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((7959, 8013), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'nClusters', 'n_init': '(3)', 'random_state': '(0)'}), '(n_clusters=nClusters, n_init=3, random_state=0)\n', (7965, 8013), False, 'from sklearn.cluster import KMeans\n'), ((8775, 8803), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (8801, 8803), False, 'from sklearn import preprocessing, svm\n'), ((8851, 8866), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (8864, 8866), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((9754, 9808), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'nClusters', 'n_init': '(3)', 'random_state': '(0)'}), '(n_clusters=nClusters, n_init=3, random_state=0)\n', (9760, 9808), False, 'from sklearn.cluster import KMeans\n'), ((11088, 11142), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'nClusters', 'n_init': '(3)', 'random_state': '(0)'}), '(n_clusters=nClusters, n_init=3, random_state=0)\n', (11094, 11142), False, 'from sklearn.cluster import KMeans\n'), ((2681, 2711), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (2709, 2711), False, 'from sklearn import preprocessing, svm\n'), ((2994, 3029), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(7)'}), '(n_neighbors=7)\n', (3014, 3029), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((3578, 3608), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (3606, 3608), False, 'from sklearn import preprocessing, svm\n'), ((6521, 6551), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (6549, 6551), False, 'from sklearn import preprocessing, svm\n'), ((12421, 12439), 'statistics.mean', 'mean', (['foldAccuracy'], {}), '(foldAccuracy)\n', (12425, 12439), False, 'from statistics import mean\n'), ((5247, 5277), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (5275, 5277), False, 'from sklearn import preprocessing, svm\n'), ((8132, 8162), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (8160, 8162), False, 'from sklearn import preprocessing, svm\n'), ((9925, 9955), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (9953, 9955), False, 'from sklearn import preprocessing, svm\n'), ((11259, 11289), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (11287, 11289), False, 'from sklearn import preprocessing, svm\n')] |
"""
<NAME>
Thu Dec 6 21:26:00 2018
"""
import os
from subprocess import call
from importlib import util
import numpy as np
from numpy import cbrt
import matplotlib as mpl
# Allows matplotlib to run without being install as a framework on OSX
mpl.use('TkAGG')
import matplotlib.pyplot as plt
from datetime import date
from tqdm import tqdm
# Import from solar_system
from solar_system import km2m, au2m, day2sec, julian_day, load_constants
from solar_system import jpl_kernel, jpl_body_id, simulate_leapfrog, calc_mse, plot_energy, U_ij
# Types
from typing import Tuple, List, Dict, Optional
# *************************************************************************************************
# Handle import of module fluxions differently if module
# module is being loaded as __main__ or a module in a package.
if util.find_spec("fluxions") is not None:
import fluxions as fl
else:
cwd = os.getcwd()
os.chdir('..')
import fluxions as fl
os.chdir(cwd)
# *************************************************************************************************
# Set plot style
mpl.rcParams.update({'font.size': 20})
# *************************************************************************************************
def configuration(t0: date, t1: Optional[date] = None,
steps_per_day: int = None) -> Tuple[np.ndarray, np.ndarray]:
"""
Get the positions and velocities of the sun and eight planets
Returned as a tuple q, v
q: Nx3 array of positions (x, y, z) in the J2000.0 coordinate frame.
"""
# Default steps_per_day = 1
if steps_per_day is None:
steps_per_day = 1
# Time step dt is 1.0 over steps per day
dt: float = 1.0 / float(steps_per_day)
# Default t1 to one day after t0
if t1 is not None:
# Convert t to a julian day
jd0: int = julian_day(t0)
jd1: int = julian_day(t1)
else:
jd0: int = julian_day(t0)
jd1: int = jd0 + dt
# Pass the times as an array of julian days
jd: np.ndarray = np.arange(jd0, jd1, dt)
# Number of time steps
N: int = len(jd)
# bodies is a list of the celestial bodies considered; should be in an enclosing scope
# Number of bodies
B: int = len(bodies)
# Number of dimensions
dims: int = B * 3
# Initialize empty arrays for position q and velocity v
q: np.ndarray = np.zeros((N, dims))
v: np.ndarray = np.zeros((N, dims))
# Position and velocity of the sun as arrays of length 3
body_ids: List[int] = [jpl_body_id[body] for body in bodies]
# Fill in the position and velocity for each body in order
for i, body_id in enumerate(body_ids):
# The slice of columns for this body (same in q and v)
slice_i = slice(3*i, 3*(i+1))
# Extract the position and velocity from jpl
qi, vi = jpl_kernel[0, body_id].compute_and_differentiate(jd)
# Convert positions from km to meters (multiply by km2m)
q[:, slice_i] = qi.T * km2m
# Convert velocities from km / day to meters / sec (multiply by km2m, divide by day2sec)
v[:, slice_i] = vi.T * (km2m / day2sec)
# Return tuple of Tx6 arrays for position q and velocity v
return q, v
# *************************************************************************************************
def plot(q: np.ndarray, bodies: List[str], plot_colors: Dict[str, str],
sim_name: str, fname: Optional[str] = None):
"""
Plot the planetary orbits. Plot size limited to box of 10 AU around the sun.
q is a Nx3B array. t indexes time points. 3B columns are (x, y, z) for the bodies in order.
"""
# Get N and number of dims
N, dims = q.shape
# Slices for
x_slice = slice(0, dims, 3)
y_slice = slice(1, dims, 3)
# Convert all distances from meters to astronomical units (AU)
plot_x = q[:, x_slice] / au2m
plot_y = q[:, y_slice] / au2m
# Set up chart title and scale
fig, ax = plt.subplots(figsize=[12,12])
ax.set_title(f'Inner Planetary Orbits in 2018; Weekly from {sim_name}')
ax.set_xlabel('x in J2000.0 Frame; Astronomical Units (au)')
ax.set_ylabel('y in J2000.0 Frame; Astronomical Units (au)')
# Scale and tick size
a = 5.0
da = 1.0
ticks = np.arange(-a, a+da, da)
# Set limits and ticks
ax.set_xlim(-a, a)
ax.set_ylim(-a, a)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
# Set marker sizes proportional to size of bodies
radius_earth = radius_tbl['earth']
markersize_earth = 4.0
markersize_tbl = {body : cbrt(radius_tbl[body] / radius_earth) * markersize_earth for body in bodies}
# Plot the orbit of each body
for k, body in enumerate(bodies):
ax.plot(plot_x[:, k], plot_y[:, k], label=body, color=plot_colors[body],
linewidth=0, markersize = markersize_tbl[body], marker='o')
# Legend and grid
fig.legend(loc=7, bbox_to_anchor=(0.85, 0.5))
# ax.legend()
ax.grid()
# Save plot if a filename was provided
if fname is not None:
fig.savefig(fname, bbox_inches='tight')
# Display plot
plt.show()
# *************************************************************************************************
def make_frame(fig, ax, plot_x: np.ndarray, plot_y: np.ndarray, frame_num: int,
bodies: List[str], plot_colors: Dict[str, str], markersize_tbl: Dict[str, float],
fname: str):
"""
Make a series of frames of the planetary orbits that can be assembled into a movie.
q is a Nx3B array. t indexes time points. 3B columns are (x, y, z) for the bodies in order.
"""
# Clear the axis
ax.clear()
ax.set_title(f'Inner Planetary Orbits in 2018')
ax.set_xlabel('x in J2000.0 Frame; Astronomical Units (au)')
ax.set_ylabel('y in J2000.0 Frame; Astronomical Units (au)')
# Scale and tick size
a = 2.0
da = 1.0
ticks = np.arange(-a, a+da, da)
# Set limits and ticks
ax.set_xlim(-a, a)
ax.set_ylim(-a, a)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
# Plot the orbit of each body
for k, body in enumerate(bodies[0:5]):
ax.plot(plot_x[k], plot_y[k], label=body, color=plot_colors[body],
linewidth=0, markersize = markersize_tbl[body], marker='o')
ax.grid()
# Save this frame
fig.savefig(f'{fname}_{frame_num:05d}.png')
def make_movie(q: np.ndarray, step: int, bodies: List[str], plot_colors: Dict[str, str], fname: str):
"""
Make a series of frames of the planetary orbits that can be assembled into a movie.
q is a Nx3B array. t indexes time points. 3B columns are (x, y, z) for the bodies in order.
"""
# Get N and number of dims
N, dims = q.shape
# Slices for x and y
x_slice = slice(0, dims, 3)
y_slice = slice(1, dims, 3)
# Convert all distances from meters to astronomical units (AU)
plot_x = q[:, x_slice] / au2m
plot_y = q[:, y_slice] / au2m
# Set marker sizes proportional to size of bodies
radius_earth = radius_tbl['earth']
markersize_earth = 8.0
markersize_tbl = {body : (radius_tbl[body] / radius_earth)**0.25 * markersize_earth for body in bodies}
# Set up chart title and scale
fig, ax = plt.subplots(figsize=[12,12])
# Make each frame
print(f'Generating movie with {N} frames...')
for fn in tqdm(range(N//step)):
# The row number in the array is the frame number times the step size
rn: int = fn * step
make_frame(fig, ax, plot_x[rn], plot_y[rn], fn, bodies, plot_colors, markersize_tbl, fname)
# Assemble the frames into a movie (video only)
cmd1: List[str] = ['ffmpeg', '-r', '24', '-f', 'image2', '-s', '864x864', '-i', 'movie/planets_%05d.png',
'-vcodec', 'libx264', '-crf', '20', '-pix_fmt', 'yuv420p', 'movie/planets_video.mp4']
call(cmd1)
# Add the soundtrack to the movie
cmd2: List[str] = ['ffmpeg', '-i', 'movie/planets_video.mp4', '-i', 'movie/holst_jupiter.mp3',
'-c:v', 'copy', '-shortest', 'movie/planets.mp4']
call(cmd2)
# Delete the frames
cmd3: List[str] = ['rm', 'movie/*.png']
call(cmd3)
# *************************************************************************************************
def accel(q: np.ndarray):
"""
Compute the gravitational accelerations in the system
q in row vector of 6 elements: sun (x, y, z), earth (x, y, z)
"""
# Infer number of dimensions from q
dims: int = len(q)
# Initialize acceleration as dimsx1 array
a: np.ndarray = np.zeros(dims)
# Iterate over each distinct pair of bodies
for i in range(B):
for j in range(i+1, B):
# Masses of body i and j
m0 = mass[i]
m1 = mass[j]
# Extract position of body i and j as 3-vectors
pos_0 = q[slices[i]]
pos_1 = q[slices[j]]
# Displacement vector from body i to body j
dv_01: np.ndarray = pos_1 - pos_0
# Distance from body i to j
r_01: float = np.linalg.norm(dv_01)
# Unit vector pointing from body i to body j
udv_01 = dv_01 / r_01
# The force between these has magnitude G*m0*m1 / r^2
f_01: float = (G * m0 * m1) / (r_01 ** 2)
# The force vectors are attractive
a[slices[i]] += f_01 * udv_01 / m0
a[slices[j]] -= f_01 * udv_01 / m1
# Return the acceleration vector
return a
# *************************************************************************************************
def energy(q, v):
"""Compute the kinetic and potential energy of the planetary system"""
# Number of points
N: int = len(q)
# Initialize arrays to zero of the correct size
T: np.ndarray = np.zeros(N)
U: np.ndarray = np.zeros(N)
# Add up kinetic energy of each body
for i in range(B):
# Kinetic energy is 1/2 mv^2
m = mass[i]
vi = v[:, slices[i]]
T += 0.5 * m * np.sum(vi * vi, axis=1)
# Add up potential energy of each pair of bodies
for i in range(B):
for j in range(i+1, B):
# Masses of these two bodies
mi = mass[i]
mj = mass[j]
# Positions of body i and j
qi: np.ndarray = q[:, slices[i]]
qj: np.ndarray = q[:, slices[j]]
# Potential energy is -G m1 m2 / r
dv_ij = qj - qi
r_ij = np.linalg.norm(dv_ij, axis=1)
U -= G * mi * mj * 1.0 / r_ij
# Total energy H = T + U
H = T + U
return H, T, U
# *************************************************************************************************
# q variables for the eight planets system
x0, y0, z0 = fl.Vars('x0', 'y0', 'z0')
x1, y1, z1 = fl.Vars('x1', 'y1', 'z1')
x2, y2, z2 = fl.Vars('x2', 'y2', 'z2')
x3, y3, z3 = fl.Vars('x3', 'y3', 'z3')
x4, y4, z4 = fl.Vars('x4', 'y4', 'z4')
x5, y5, z5 = fl.Vars('x5', 'y5', 'z5')
x6, y6, z6 = fl.Vars('x6', 'y6', 'z6')
x7, y7, z7 = fl.Vars('x7', 'y7', 'z7')
x8, y8, z8 = fl.Vars('x8', 'y8', 'z8')
# Arrange qs into a list
q_vars = [x0, y0, z0,
x1, y1, z1,
x2, y2, z2,
x3, y3, z3,
x4, y4, z4,
x5, y5, z5,
x6, y6, z6,
x7, y7, z7,
x8, y8, z8]
def make_force(q_vars, mass):
"""Fluxion with the potential energy of the eight planets sytem"""
# Number of bodies
B: int = len(mass)
# Build the potential energy fluxion by iterating over distinct pairs of bodies
U = fl.Const(0.0)
for i in range(B):
for j in range(i+1, B):
U += U_ij(q_vars, mass, i, j)
# Varname arrays for both the coordinate system and U
vn_q = np.array([q.var_name for q in q_vars])
vn_fl = np.array(sorted(U.var_names))
# Permutation array for putting variables in q in the order expected by U (alphabetical)
q2fl = np.array([np.argmax((vn_q == v)) for v in vn_fl])
# Permutation array for putting results of U.diff() in order of q_vars
fl2q = np.array([np.argmax((vn_fl == v)) for v in vn_q])
# Return a force function from this potential
force_func = lambda q: -U.diff(q[q2fl]).squeeze()[fl2q]
return force_func
def accel_fl(q: np.ndarray):
"""Accelaration in the earth-sun system using Fluxion potential energy"""
# Infer number of dimensions from q
dims: int = len(q)
# Number of celestial bodies
B: int = dims // 3
# The force given the positions q of the bodies
f = force(q)
# The accelerations from this force
a = np.zeros(dims)
for i in range(B):
a[slices[i]] = f[slices[i]] / mass[i]
return a
# *************************************************************************************************
# main
# Load physical constants
G, body_name, mass_tbl, radius_tbl = load_constants()
# The celestial bodies in this simulation
bodies = ['sun', 'mercury', 'venus', 'earth', 'mars', 'jupiter', 'saturn', 'uranus', 'neptune']
# Number of bodies in this simulation
B: int = len(bodies)
# Number of dimensions
dims: int = B*3
# Masses of sun and earth
mass = np.array([mass_tbl[body] for body in bodies])
# Slices for the B celestial bodies
slices = [slice(b*3, (b+1)*3) for b in range(B)]
# Colors for plotting each body
plot_colors = {
'sun': 'orange',
'mercury': 'gray',
'venus': 'yellow',
'earth': 'blue',
'mars': 'red',
'jupiter':'orange',
'saturn':'gold',
'uranus':'blue',
'neptune':'blue'
}
# Build a force function
force = make_force(q_vars, mass)
def main():
# Set simulation time step to one day
steps_per_day: int = 16
# Extract position and velocity of earth-sun system in 2018
t0 = date(2018,1,1)
t1 = date(2019,1,1)
q_jpl, v_jpl = configuration(t0, t1, steps_per_day)
# Simulate solar earth-sun system
q_sim, v_sim = simulate_leapfrog(configuration, accel, t0, t1, steps_per_day)
# Compute energy time series for earth-sun system with JPL and leapfrog simulations
H_jpl, T_jpl, U_jpl = energy(q_jpl, v_jpl)
H_sim, T_sim, U_sim = energy(q_sim, v_sim)
# Plot the earth-sun orbits in 2018 at weekly intervals using the simulation
plot_step: int = 7 * steps_per_day
plot(q_jpl[::plot_step], bodies, plot_colors, 'JPL', 'figs/eight_planets_jpl.png')
plot(q_sim[::plot_step], bodies, plot_colors, 'Leapfrog', 'figs/eight_planets_leapfrog.png')
# Compute the MSE in AUs between the two simulations
mse = calc_mse(q_jpl, q_sim)
print(f'MSE between leapfrog simulation with {steps_per_day} steps per day and JPL:')
print(f'{mse:0.3e} astronomical units.')
# Compute energy change as % of original KE
energy_chng_jpl = (H_jpl[-1] - H_jpl[0]) / T_jpl[0]
energy_chng_sim = (H_sim[-1] - H_sim[0]) / T_sim[0]
print(f'\nEnergy change as fraction of original KE during simulation with {steps_per_day} steps per day:')
print(f'JPL: {energy_chng_jpl:0.2e}.')
print(f'Leapfrog: {energy_chng_sim:0.2e}.')
# Plot time series of kinetic and potential energy
# N: int = len(q_jpl)
# plot_days = np.linspace(0.0, (t1-t0).days, N)
# plot_energy(plot_days, H_jpl, T_jpl, U_jpl)
# Make movie frames if necessary
if not os.path.isfile('movie/planets_video.mp4'):
make_movie(q_jpl, steps_per_day//2, bodies, plot_colors, 'movie/planets')
else:
print(f'Found movie/planets_video.mp4, not regenerating it.')
# Move post-processing
if __name__ == '__main__':
main()
| [
"numpy.sum",
"numpy.argmax",
"os.path.isfile",
"numpy.arange",
"numpy.linalg.norm",
"os.chdir",
"solar_system.load_constants",
"fluxions.Vars",
"matplotlib.rcParams.update",
"solar_system.julian_day",
"solar_system.U_ij",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.cbrt"... | [((244, 260), 'matplotlib.use', 'mpl.use', (['"""TkAGG"""'], {}), "('TkAGG')\n", (251, 260), True, 'import matplotlib as mpl\n'), ((1098, 1136), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (["{'font.size': 20}"], {}), "({'font.size': 20})\n", (1117, 1136), True, 'import matplotlib as mpl\n'), ((10962, 10987), 'fluxions.Vars', 'fl.Vars', (['"""x0"""', '"""y0"""', '"""z0"""'], {}), "('x0', 'y0', 'z0')\n", (10969, 10987), True, 'import fluxions as fl\n'), ((11001, 11026), 'fluxions.Vars', 'fl.Vars', (['"""x1"""', '"""y1"""', '"""z1"""'], {}), "('x1', 'y1', 'z1')\n", (11008, 11026), True, 'import fluxions as fl\n'), ((11040, 11065), 'fluxions.Vars', 'fl.Vars', (['"""x2"""', '"""y2"""', '"""z2"""'], {}), "('x2', 'y2', 'z2')\n", (11047, 11065), True, 'import fluxions as fl\n'), ((11079, 11104), 'fluxions.Vars', 'fl.Vars', (['"""x3"""', '"""y3"""', '"""z3"""'], {}), "('x3', 'y3', 'z3')\n", (11086, 11104), True, 'import fluxions as fl\n'), ((11118, 11143), 'fluxions.Vars', 'fl.Vars', (['"""x4"""', '"""y4"""', '"""z4"""'], {}), "('x4', 'y4', 'z4')\n", (11125, 11143), True, 'import fluxions as fl\n'), ((11157, 11182), 'fluxions.Vars', 'fl.Vars', (['"""x5"""', '"""y5"""', '"""z5"""'], {}), "('x5', 'y5', 'z5')\n", (11164, 11182), True, 'import fluxions as fl\n'), ((11196, 11221), 'fluxions.Vars', 'fl.Vars', (['"""x6"""', '"""y6"""', '"""z6"""'], {}), "('x6', 'y6', 'z6')\n", (11203, 11221), True, 'import fluxions as fl\n'), ((11235, 11260), 'fluxions.Vars', 'fl.Vars', (['"""x7"""', '"""y7"""', '"""z7"""'], {}), "('x7', 'y7', 'z7')\n", (11242, 11260), True, 'import fluxions as fl\n'), ((11274, 11299), 'fluxions.Vars', 'fl.Vars', (['"""x8"""', '"""y8"""', '"""z8"""'], {}), "('x8', 'y8', 'z8')\n", (11281, 11299), True, 'import fluxions as fl\n'), ((13081, 13097), 'solar_system.load_constants', 'load_constants', ([], {}), '()\n', (13095, 13097), False, 'from solar_system import km2m, au2m, day2sec, julian_day, load_constants\n'), ((13370, 13415), 'numpy.array', 'np.array', (['[mass_tbl[body] for body in bodies]'], {}), '([mass_tbl[body] for body in bodies])\n', (13378, 13415), True, 'import numpy as np\n'), ((819, 845), 'importlib.util.find_spec', 'util.find_spec', (['"""fluxions"""'], {}), "('fluxions')\n", (833, 845), False, 'from importlib import util\n'), ((905, 916), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (914, 916), False, 'import os\n'), ((921, 935), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (929, 935), False, 'import os\n'), ((966, 979), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (974, 979), False, 'import os\n'), ((2050, 2073), 'numpy.arange', 'np.arange', (['jd0', 'jd1', 'dt'], {}), '(jd0, jd1, dt)\n', (2059, 2073), True, 'import numpy as np\n'), ((2401, 2420), 'numpy.zeros', 'np.zeros', (['(N, dims)'], {}), '((N, dims))\n', (2409, 2420), True, 'import numpy as np\n'), ((2441, 2460), 'numpy.zeros', 'np.zeros', (['(N, dims)'], {}), '((N, dims))\n', (2449, 2460), True, 'import numpy as np\n'), ((3998, 4028), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '[12, 12]'}), '(figsize=[12, 12])\n', (4010, 4028), True, 'import matplotlib.pyplot as plt\n'), ((4297, 4322), 'numpy.arange', 'np.arange', (['(-a)', '(a + da)', 'da'], {}), '(-a, a + da, da)\n', (4306, 4322), True, 'import numpy as np\n'), ((5157, 5167), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5165, 5167), True, 'import matplotlib.pyplot as plt\n'), ((5961, 5986), 'numpy.arange', 'np.arange', (['(-a)', '(a + da)', 'da'], {}), '(-a, a + da, da)\n', (5970, 5986), True, 'import numpy as np\n'), ((7299, 7329), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '[12, 12]'}), '(figsize=[12, 12])\n', (7311, 7329), True, 'import matplotlib.pyplot as plt\n'), ((7919, 7929), 'subprocess.call', 'call', (['cmd1'], {}), '(cmd1)\n', (7923, 7929), False, 'from subprocess import call\n'), ((8145, 8155), 'subprocess.call', 'call', (['cmd2'], {}), '(cmd2)\n', (8149, 8155), False, 'from subprocess import call\n'), ((8230, 8240), 'subprocess.call', 'call', (['cmd3'], {}), '(cmd3)\n', (8234, 8240), False, 'from subprocess import call\n'), ((8639, 8653), 'numpy.zeros', 'np.zeros', (['dims'], {}), '(dims)\n', (8647, 8653), True, 'import numpy as np\n'), ((9963, 9974), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (9971, 9974), True, 'import numpy as np\n'), ((9995, 10006), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (10003, 10006), True, 'import numpy as np\n'), ((11770, 11783), 'fluxions.Const', 'fl.Const', (['(0.0)'], {}), '(0.0)\n', (11778, 11783), True, 'import fluxions as fl\n'), ((11956, 11994), 'numpy.array', 'np.array', (['[q.var_name for q in q_vars]'], {}), '([q.var_name for q in q_vars])\n', (11964, 11994), True, 'import numpy as np\n'), ((12806, 12820), 'numpy.zeros', 'np.zeros', (['dims'], {}), '(dims)\n', (12814, 12820), True, 'import numpy as np\n'), ((14011, 14027), 'datetime.date', 'date', (['(2018)', '(1)', '(1)'], {}), '(2018, 1, 1)\n', (14015, 14027), False, 'from datetime import date\n'), ((14035, 14051), 'datetime.date', 'date', (['(2019)', '(1)', '(1)'], {}), '(2019, 1, 1)\n', (14039, 14051), False, 'from datetime import date\n'), ((14169, 14231), 'solar_system.simulate_leapfrog', 'simulate_leapfrog', (['configuration', 'accel', 't0', 't1', 'steps_per_day'], {}), '(configuration, accel, t0, t1, steps_per_day)\n', (14186, 14231), False, 'from solar_system import jpl_kernel, jpl_body_id, simulate_leapfrog, calc_mse, plot_energy, U_ij\n'), ((14800, 14822), 'solar_system.calc_mse', 'calc_mse', (['q_jpl', 'q_sim'], {}), '(q_jpl, q_sim)\n', (14808, 14822), False, 'from solar_system import jpl_kernel, jpl_body_id, simulate_leapfrog, calc_mse, plot_energy, U_ij\n'), ((1859, 1873), 'solar_system.julian_day', 'julian_day', (['t0'], {}), '(t0)\n', (1869, 1873), False, 'from solar_system import km2m, au2m, day2sec, julian_day, load_constants\n'), ((1893, 1907), 'solar_system.julian_day', 'julian_day', (['t1'], {}), '(t1)\n', (1903, 1907), False, 'from solar_system import km2m, au2m, day2sec, julian_day, load_constants\n'), ((1937, 1951), 'solar_system.julian_day', 'julian_day', (['t0'], {}), '(t0)\n', (1947, 1951), False, 'from solar_system import km2m, au2m, day2sec, julian_day, load_constants\n'), ((15575, 15616), 'os.path.isfile', 'os.path.isfile', (['"""movie/planets_video.mp4"""'], {}), "('movie/planets_video.mp4')\n", (15589, 15616), False, 'import os\n'), ((4594, 4631), 'numpy.cbrt', 'cbrt', (['(radius_tbl[body] / radius_earth)'], {}), '(radius_tbl[body] / radius_earth)\n', (4598, 4631), False, 'from numpy import cbrt\n'), ((9175, 9196), 'numpy.linalg.norm', 'np.linalg.norm', (['dv_01'], {}), '(dv_01)\n', (9189, 9196), True, 'import numpy as np\n'), ((10189, 10212), 'numpy.sum', 'np.sum', (['(vi * vi)'], {'axis': '(1)'}), '(vi * vi, axis=1)\n', (10195, 10212), True, 'import numpy as np\n'), ((10660, 10689), 'numpy.linalg.norm', 'np.linalg.norm', (['dv_ij'], {'axis': '(1)'}), '(dv_ij, axis=1)\n', (10674, 10689), True, 'import numpy as np\n'), ((11857, 11881), 'solar_system.U_ij', 'U_ij', (['q_vars', 'mass', 'i', 'j'], {}), '(q_vars, mass, i, j)\n', (11861, 11881), False, 'from solar_system import jpl_kernel, jpl_body_id, simulate_leapfrog, calc_mse, plot_energy, U_ij\n'), ((12151, 12171), 'numpy.argmax', 'np.argmax', (['(vn_q == v)'], {}), '(vn_q == v)\n', (12160, 12171), True, 'import numpy as np\n'), ((12287, 12308), 'numpy.argmax', 'np.argmax', (['(vn_fl == v)'], {}), '(vn_fl == v)\n', (12296, 12308), True, 'import numpy as np\n')] |
import os
# If server, need to use osmesa for pyopengl/pyrender
if os.cpu_count() > 20:
os.environ['PYOPENGL_PLATFORM'] = 'osmesa'
# https://github.com/marian42/mesh_to_sdf/issues/13
# https://pyrender.readthedocs.io/en/latest/install/index.html?highlight=ssh#getting-pyrender-working-with-osmesa
else:
os.environ['PYOPENGL_PLATFORM'] = 'egl' # default one was pyglet, which hangs sometime for unknown reason: https://github.com/marian42/mesh_to_sdf/issues/19;
import numpy as np
import psutil
import concurrent.futures
import trimesh
from mesh_to_sdf import get_surface_point_cloud, BadMeshException
from util.misc import ensure_directory
class PointSampler:
def __init__(self, num_cpus=16,
cpu_offset=0,
num_sdf_available_per_obj=20000, # 200000, was 1000!
num_surface_per_obj=2048,
model_extension='.stl',
**kwargs):
self.MODEL_EXTENSION = model_extension
self.SDF_CLOUD_SAMPLE_SIZE = num_sdf_available_per_obj
self.SURFACE_CLOUD_SAMPLE_SIZE = num_surface_per_obj
self.num_cpus = num_cpus
self.cpu_offset = cpu_offset
def reset_dir(self, directory):
self.DIRECTORY_MODELS = directory
self.DIRECTORY_SDF = directory + 'sdf/'
def get_npy_filename(self, model_filename, qualifier=''):
return self.DIRECTORY_SDF + model_filename[len(self.DIRECTORY_MODELS):-len(self.MODEL_EXTENSION)] + qualifier + '.npy'
############################################################################
def get_bad_mesh_filename(self, model_filename):
return self.DIRECTORY_SDF + model_filename[len(self.DIRECTORY_MODELS):-len(self.MODEL_EXTENSION)] + '.badmesh'
def mark_bad_mesh(self, model_filename):
filename = self.get_bad_mesh_filename(model_filename)
ensure_directory(os.path.dirname(filename))
open(filename, 'w').close()
def is_bad_mesh(self, model_filename):
return os.path.exists(self.get_bad_mesh_filename(model_filename))
############################################################################
def process_model_file(self, file_list, cpu_id):
# Assign CPU
ps = psutil.Process()
ps.cpu_affinity([cpu_id])
for filename in file_list:
sdf_cloud_filename = self.get_npy_filename(filename, '-sdf')
surface_cloud_filename = self.get_npy_filename(filename, '-surface')
if self.is_bad_mesh(filename):
continue
# Load mesh
mesh = trimesh.load(filename)
# mesh = scale_to_unit_sphere(mesh) # do not scale!
mesh_bounds = np.maximum(abs(mesh.bounds[0]), mesh.bounds[1]) # half, use the larger side for all 3 dims, but already centered
# Sample point cloud (surface) of the object
pcl = trimesh.sample.sample_surface(mesh, self.SURFACE_CLOUD_SAMPLE_SIZE)[0] # points and indices
np.save(surface_cloud_filename, pcl)
# Sample sdf (heavy computations)
surface_point_cloud = get_surface_point_cloud(mesh, surface_point_method='scan', scan_count=100, scan_resolution=400, sample_point_count=1000000) # default uses 10m for sample method, scan method creates about 4m points
# surface_point_method: The method to generate a surface point cloud. Either 'scan' or 'sample'. The scanning method creates virtual scans while the sampling method uses the triangles to sample surface points. The sampling method only works with watertight meshes with correct face normals, but avoids some of the artifacts that the scanning method creates.
try:
points, sdf, model_size = surface_point_cloud.sample_sdf_near_surface_with_bounds(mesh_bounds=mesh_bounds, number_of_points=self.SDF_CLOUD_SAMPLE_SIZE, sign_method='depth', min_size=0.015,)
# sign_method: The method to determine the signs of the SDF values. Either 'normal' or 'depth'. The normal method uses normals of the point cloud. It works better for meshes with holes, but sometimes results in "bubble" artifacts. The depth method avoids the bubble artifacts but is less accurate.
combined = np.concatenate((points, sdf[:, np.newaxis]), axis=1)
ensure_directory(os.path.dirname(sdf_cloud_filename))
np.save(sdf_cloud_filename, combined)
# Debug
if model_size < 0.1:
print(model_size, filename)
except BadMeshException:
# tqdm.write("Skipping bad mesh. ({:s})".format(filename))
self.mark_bad_mesh(filename)
continue
return 1
def process_model_file_helper(self, args):
return self.process_model_file(args[0], args[1])
def sample_new_surface_point_sdf(self, obj_id_list):
"""
Path already resetted to the target one; do not combine
"""
ensure_directory(self.DIRECTORY_SDF)
files = [self.DIRECTORY_MODELS + str(id) + '.stl' for id in obj_id_list]
# Split into batches
num_trial = len(files)
file_id_batch_all = np.array_split(np.arange(num_trial), self.num_cpus)
args = (([files[id] for id in file_id_batch],
self.cpu_offset+batch_ind) for batch_ind, file_id_batch in enumerate(file_id_batch_all))
# Order does not matter
with concurrent.futures.ProcessPoolExecutor(self.num_cpus) as executor:
res_batch_all = list(executor.map(self.process_model_file_helper, args))
executor.shutdown()
return 1
if __name__ == '__main__':
sampler = PointSampler(num_cpus=16,
cpu_offset=0,
num_sdf_available_per_obj=20000,
num_surface_per_obj=2048)
sampler.reset_dir(directory='')
sampler.sample_new_surface_point_sdf(obj_id_list=np.arange(255))
| [
"psutil.Process",
"trimesh.load",
"numpy.save",
"trimesh.sample.sample_surface",
"os.path.dirname",
"util.misc.ensure_directory",
"os.cpu_count",
"numpy.arange",
"mesh_to_sdf.get_surface_point_cloud",
"numpy.concatenate"
] | [((67, 81), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (79, 81), False, 'import os\n'), ((2061, 2077), 'psutil.Process', 'psutil.Process', ([], {}), '()\n', (2075, 2077), False, 'import psutil\n'), ((4480, 4516), 'util.misc.ensure_directory', 'ensure_directory', (['self.DIRECTORY_SDF'], {}), '(self.DIRECTORY_SDF)\n', (4496, 4516), False, 'from util.misc import ensure_directory\n'), ((1731, 1756), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (1746, 1756), False, 'import os\n'), ((2346, 2368), 'trimesh.load', 'trimesh.load', (['filename'], {}), '(filename)\n', (2358, 2368), False, 'import trimesh\n'), ((2707, 2743), 'numpy.save', 'np.save', (['surface_cloud_filename', 'pcl'], {}), '(surface_cloud_filename, pcl)\n', (2714, 2743), True, 'import numpy as np\n'), ((2807, 2934), 'mesh_to_sdf.get_surface_point_cloud', 'get_surface_point_cloud', (['mesh'], {'surface_point_method': '"""scan"""', 'scan_count': '(100)', 'scan_resolution': '(400)', 'sample_point_count': '(1000000)'}), "(mesh, surface_point_method='scan', scan_count=100,\n scan_resolution=400, sample_point_count=1000000)\n", (2830, 2934), False, 'from mesh_to_sdf import get_surface_point_cloud, BadMeshException\n'), ((4678, 4698), 'numpy.arange', 'np.arange', (['num_trial'], {}), '(num_trial)\n', (4687, 4698), True, 'import numpy as np\n'), ((5314, 5328), 'numpy.arange', 'np.arange', (['(255)'], {}), '(255)\n', (5323, 5328), True, 'import numpy as np\n'), ((2612, 2679), 'trimesh.sample.sample_surface', 'trimesh.sample.sample_surface', (['mesh', 'self.SURFACE_CLOUD_SAMPLE_SIZE'], {}), '(mesh, self.SURFACE_CLOUD_SAMPLE_SIZE)\n', (2641, 2679), False, 'import trimesh\n'), ((3885, 3937), 'numpy.concatenate', 'np.concatenate', (['(points, sdf[:, np.newaxis])'], {'axis': '(1)'}), '((points, sdf[:, np.newaxis]), axis=1)\n', (3899, 3937), True, 'import numpy as np\n'), ((4000, 4037), 'numpy.save', 'np.save', (['sdf_cloud_filename', 'combined'], {}), '(sdf_cloud_filename, combined)\n', (4007, 4037), True, 'import numpy as np\n'), ((3959, 3994), 'os.path.dirname', 'os.path.dirname', (['sdf_cloud_filename'], {}), '(sdf_cloud_filename)\n', (3974, 3994), False, 'import os\n')] |
"""Geometry conversion utilities."""
import re
from typing import NewType, Tuple
import numpy as np
import scipy.constants # type: ignore[import]
# Vector of atomic species
Species = NewType('Species', Tuple[str, ...])
# Crystal basis [3x3]
Basis = NewType('Basis', np.ndarray)
# Position matrix [n_atoms x 3]
Tau = NewType('Tau', np.ndarray)
# Unit cell dimensions (a, b, c) in angstrom and angles (alpha, beta, gamma)
CellParam = NewType("CellParam", Tuple[float, float, float, float, float, float])
def convert_coords(alat, basis, tau, in_type, out_type):
# type: (float, Basis, Tau, str, str) -> Tau
"""Convert coordinate type.
:param alat: lattice parameter in bohr
:param basis: basis in angstrom
:param tau: vector of positions shape (n_atoms, 3)
:param in_type: coordinate type of `tau`
:param out_type: coordinate type to return
"""
# TODO: deal with coord types such as "alat = 3.2"; either here or in callers
if in_type == out_type:
return tau
# Otherwise convert first to crystal
tau = to_crystal(alat, basis, tau, in_type)
# Then to desired type
tau = from_crystal(alat, basis, tau, out_type)
return tau
def to_crystal(alat, basis, tau, in_type):
# type: (float, Basis, Tau, str) -> Tau
"""Convert from arbitrary coords to crystal."""
bohr_to_ang = scipy.constants.value("Bohr radius") / scipy.constants.angstrom
if in_type == 'crystal':
return tau
elif in_type == 'alat':
return alat * bohr_to_ang * tau @ np.linalg.inv(basis)
elif in_type == 'angstrom':
return tau @ np.linalg.inv(basis)
else:
raise ValueError("Coord. type {}".format(in_type))
def from_crystal(alat, basis, tau, out_type):
# type: (float, Basis, Tau, str) -> Tau
"""Convert from crystal coords to arbitrary coords."""
ang_to_bohr = scipy.constants.angstrom / scipy.constants.value("Bohr radius")
# lattice vectors are rows of the basis
if out_type == 'crystal':
return tau
elif out_type == 'alat':
return (1 / alat) * ang_to_bohr * tau @ basis
elif out_type == 'angstrom':
return tau @ basis
else:
raise ValueError("Coord. type {}".format(out_type))
def _basis_to_bohr(basis: Basis, in_type: str) -> Basis:
"""Scale basis to bohr units."""
alat_re = re.compile(r"^alat *= +([.\d]+)$")
ang_to_bohr = scipy.constants.angstrom / scipy.constants.value("Bohr radius")
if in_type == "angstrom":
return ang_to_bohr * basis
elif in_type == "bohr":
return basis
elif alat_re.match(in_type):
alat_in = float(alat_re.match(in_type).group(1))
return alat_in * basis
raise ValueError(f"Bad basis coordinates {in_type}")
def cell_alat(basis: Basis, in_type="bohr") -> float:
"""Calculate alat (defined as the first lattice parameter in bohr)."""
basis_bohr = _basis_to_bohr(basis, in_type)
return np.linalg.norm(basis_bohr[0])
def convert_basis(basis: Basis, in_type: str, out_type: str) -> Basis:
"""Scale basis to correct units.
:param basis: Basis in input coordinates
:param in_type: Input units. alat should contain value of alat.
:param out_type: Desired output coordinates. alat output will redefine alat.
:returns: Rescaled basis
"""
bohr_to_ang = scipy.constants.value("Bohr radius") / scipy.constants.angstrom
# First convert to Bohr
basis_bohr = _basis_to_bohr(basis, in_type)
# Convert to output coordinates
if out_type == "angstrom":
return bohr_to_ang * basis_bohr
elif out_type == "bohr":
return basis_bohr
elif out_type == "alat":
alat_out = cell_alat(basis_bohr)
return basis_bohr / alat_out
else:
raise ValueError(f"Bad basis coordinates {out_type}")
def cell_volume(basis):
# type: (Basis) -> float
"""Calculate cell volume in A^3."""
return float(np.linalg.det(basis))
def cell_parameters(basis):
# type: (Basis) -> CellParam
"""Return unit cell dimensions and angles (in angstrom)."""
def get_angle(vec1, vec2):
# type: (np.ndarray, np.ndarray) -> float
cos_ab = (
np.abs(np.dot(vec1, vec2)) / np.linalg.norm(vec1) / np.linalg.norm(vec2)
)
return np.arccos(cos_ab)
len_a = np.linalg.norm(basis[0])
len_b = np.linalg.norm(basis[1])
len_c = np.linalg.norm(basis[2])
alpha = get_angle(basis[1], basis[2])
beta = get_angle(basis[0], basis[2])
gamma = get_angle(basis[0], basis[1])
return CellParam((len_a, len_b, len_c, alpha, beta, gamma))
| [
"numpy.linalg.det",
"numpy.linalg.norm",
"numpy.linalg.inv",
"numpy.dot",
"typing.NewType",
"numpy.arccos",
"re.compile"
] | [((187, 222), 'typing.NewType', 'NewType', (['"""Species"""', 'Tuple[str, ...]'], {}), "('Species', Tuple[str, ...])\n", (194, 222), False, 'from typing import NewType, Tuple\n'), ((253, 281), 'typing.NewType', 'NewType', (['"""Basis"""', 'np.ndarray'], {}), "('Basis', np.ndarray)\n", (260, 281), False, 'from typing import NewType, Tuple\n'), ((320, 346), 'typing.NewType', 'NewType', (['"""Tau"""', 'np.ndarray'], {}), "('Tau', np.ndarray)\n", (327, 346), False, 'from typing import NewType, Tuple\n'), ((436, 505), 'typing.NewType', 'NewType', (['"""CellParam"""', 'Tuple[float, float, float, float, float, float]'], {}), "('CellParam', Tuple[float, float, float, float, float, float])\n", (443, 505), False, 'from typing import NewType, Tuple\n'), ((2351, 2385), 're.compile', 're.compile', (['"""^alat *= +([.\\\\d]+)$"""'], {}), "('^alat *= +([.\\\\d]+)$')\n", (2361, 2385), False, 'import re\n'), ((2950, 2979), 'numpy.linalg.norm', 'np.linalg.norm', (['basis_bohr[0]'], {}), '(basis_bohr[0])\n', (2964, 2979), True, 'import numpy as np\n'), ((4326, 4350), 'numpy.linalg.norm', 'np.linalg.norm', (['basis[0]'], {}), '(basis[0])\n', (4340, 4350), True, 'import numpy as np\n'), ((4363, 4387), 'numpy.linalg.norm', 'np.linalg.norm', (['basis[1]'], {}), '(basis[1])\n', (4377, 4387), True, 'import numpy as np\n'), ((4400, 4424), 'numpy.linalg.norm', 'np.linalg.norm', (['basis[2]'], {}), '(basis[2])\n', (4414, 4424), True, 'import numpy as np\n'), ((3935, 3955), 'numpy.linalg.det', 'np.linalg.det', (['basis'], {}), '(basis)\n', (3948, 3955), True, 'import numpy as np\n'), ((4295, 4312), 'numpy.arccos', 'np.arccos', (['cos_ab'], {}), '(cos_ab)\n', (4304, 4312), True, 'import numpy as np\n'), ((4249, 4269), 'numpy.linalg.norm', 'np.linalg.norm', (['vec2'], {}), '(vec2)\n', (4263, 4269), True, 'import numpy as np\n'), ((1537, 1557), 'numpy.linalg.inv', 'np.linalg.inv', (['basis'], {}), '(basis)\n', (1550, 1557), True, 'import numpy as np\n'), ((4226, 4246), 'numpy.linalg.norm', 'np.linalg.norm', (['vec1'], {}), '(vec1)\n', (4240, 4246), True, 'import numpy as np\n'), ((1611, 1631), 'numpy.linalg.inv', 'np.linalg.inv', (['basis'], {}), '(basis)\n', (1624, 1631), True, 'import numpy as np\n'), ((4204, 4222), 'numpy.dot', 'np.dot', (['vec1', 'vec2'], {}), '(vec1, vec2)\n', (4210, 4222), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import sys
import h5py
import numpy as np
from tronn.plot.visualization import plot_pwm
from tronn.util.pwms import PWM
def main():
"""plot pwm weights
"""
# args
tfmodisco_file = sys.argv[1]
metacluster_name = sys.argv[2]
pattern_name = sys.argv[3]
plot_prefix = sys.argv[4]
background_freq = 0.25 # keep it simple
# read in tfmodisco file, plot both fwd and reverse
# any trimming??
with h5py.File(tfmodisco_file, "r") as hf:
# get probs from file
probs = hf["metacluster_idx_to_submetacluster_results"][
metacluster_name]["seqlets_to_patterns_result"]["patterns"][pattern_name]["sequence"]["fwd"][:]
probs[probs == 0] = 0.0001
# convert to weights
weights = []
for idx in range(probs.shape[0]):
weights.append(
np.log2(np.array(probs[idx,:]) / background_freq).tolist()
)
weights = np.array(weights).transpose(1,0)
# load to PWM class, trim
pwm = PWM(weights)
pwm = pwm.chomp(ic_thresh=0.4)
# plot fwd
plot_file = "{}.{}.{}.fwd.pdf".format(plot_prefix, metacluster_name, pattern_name)
plot_pwm(pwm.get_probs().transpose(), plot_file)
# plot rev
plot_file = "{}.{}.{}.rev.pdf".format(plot_prefix, metacluster_name, pattern_name)
plot_pwm(pwm.reverse_complement().get_probs().transpose(), plot_file)
return
main()
| [
"tronn.util.pwms.PWM",
"h5py.File",
"numpy.array"
] | [((468, 498), 'h5py.File', 'h5py.File', (['tfmodisco_file', '"""r"""'], {}), "(tfmodisco_file, 'r')\n", (477, 498), False, 'import h5py\n'), ((1054, 1066), 'tronn.util.pwms.PWM', 'PWM', (['weights'], {}), '(weights)\n', (1057, 1066), False, 'from tronn.util.pwms import PWM\n'), ((972, 989), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (980, 989), True, 'import numpy as np\n'), ((889, 912), 'numpy.array', 'np.array', (['probs[idx, :]'], {}), '(probs[idx, :])\n', (897, 912), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
def mask3d(nx, ny, nz, center_r=[15, 15, 0], undersampling=0.5):
# create undersampling mask
mask_shape = np.array([nx, ny, nz])
Npts = mask_shape.prod() # total number of data points
k = int(round(Npts * undersampling)) # undersampling
ri = np.random.choice(Npts, k, replace=False) # index for undersampling
ma = np.zeros(Npts) # initialize an all zero vector
ma[ri] = 1 # set sampled data points to 1
mask = ma.reshape(mask_shape)
flag_centerfull = 1
# x center, k-space index range
if center_r[0] > 0:
cxr = np.arange(-center_r[0], center_r[0] + 1) + mask_shape[0] // 2
elif center_r[0] is 0:
cxr = np.arange(mask_shape[0])
else:
flag_centerfull = 0
# y center, k-space index range
if center_r[1] > 0:
cyr = np.arange(-center_r[1], center_r[1] + 1) + mask_shape[1] // 2
elif center_r[1] is 0:
cyr = np.arange(mask_shape[1])
else:
flag_centerfull = 0
# z center, k-space index range
if center_r[2] > 0:
czr = np.arange(-center_r[2], center_r[2] + 1) + mask_shape[2] // 2
elif center_r[2] is 0:
czr = np.arange(mask_shape[2])
else:
flag_centerfull = 0
# full sampling in the center kspace
if flag_centerfull is not 0:
mask[np.ix_(cxr, cyr, czr)] = \
np.ones((cxr.shape[0], cyr.shape[0], czr.shape[0])) # center k-space is fully sampled
return mask
def calc_SNR(y, y_):
y = np.array(y).flatten()
y_ = np.array(y_).flatten()
err = np.linalg.norm(y_ - y) ** 2
snr = 10 * np.log10(np.linalg.norm(y_) ** 2 / err)
return snr
def tempfft(input, inv):
if len(input.shape) == 4:
nb, nt, nx, ny = np.float32(input.shape)
nt = tf.constant(np.complex64(nt + 0j))
if inv:
x = tf.transpose(input, perm=[0, 2, 3, 1])
# x = tf.signal.fftshift(x, 3)
x = tf.signal.ifft(x)
x = tf.transpose(x, perm=[0, 3, 1, 2])
x = x * tf.sqrt(nt)
else:
x = tf.transpose(input, perm=[0, 2, 3, 1])
x = tf.signal.fft(x)
# x = tf.signal.fftshift(x, 3)
x = tf.transpose(x, perm=[0, 3, 1, 2])
x = x / tf.sqrt(nt)
else:
nb, nt, nx, ny, _ = np.float32(input.shape)
nt = tf.constant(np.complex64(nt + 0j))
if inv:
x = tf.transpose(input, perm=[0, 2, 3, 4, 1])
# x = tf.signal.fftshift(x, 4)
x = tf.signal.ifft(x)
x = tf.transpose(x, perm=[0, 4, 1, 2, 3])
x = x * tf.sqrt(nt)
else:
x = tf.transpose(input, perm=[0, 2, 3, 4, 1])
x = tf.signal.fft(x)
# x = tf.signal.fftshift(x, 4)
x = tf.transpose(x, perm=[0, 4, 1, 2, 3])
x = x / tf.sqrt(nt)
return x
def mse(recon, label):
if recon.dtype == tf.complex64:
residual_cplx = recon - label
residual = tf.stack([tf.math.real(residual_cplx), tf.math.imag(residual_cplx)], axis=-1)
mse = tf.reduce_mean(residual ** 2)
else:
residual = recon - label
mse = tf.reduce_mean(residual ** 2)
return mse
def fft2c_mri(x):
# nb nx ny nt
X = tf.signal.ifftshift(x, 2)
X = tf.transpose(X, perm=[0, 1, 3, 2]) # permute to make nx dimension the last one.
X = tf.signal.fft(X)
X = tf.transpose(X, perm=[0, 1, 3, 2]) # permute back to original order.
nb, nt, nx, ny = np.float32(x.shape)
nx = tf.constant(np.complex64(nx + 0j))
ny = tf.constant(np.complex64(ny + 0j))
X = tf.signal.fftshift(X, 2) / tf.sqrt(nx)
X = tf.signal.ifftshift(X, 3)
X = tf.signal.fft(X)
X = tf.signal.fftshift(X, 3) / tf.sqrt(ny)
return X
def ifft2c_mri(X):
# nb nx ny nt
x = tf.signal.ifftshift(X, 2)
x = tf.transpose(x, perm=[0, 1, 3, 2]) # permute a to make nx dimension the last one.
x = tf.signal.ifft(x)
x = tf.transpose(x, perm=[0, 1, 3, 2]) # permute back to original order.
nb, nt, nx, ny = np.float32(X.shape)
nx = tf.constant(np.complex64(nx + 0j))
ny = tf.constant(np.complex64(ny + 0j))
x = tf.signal.fftshift(x, 2) * tf.sqrt(nx)
x = tf.signal.ifftshift(x, 3)
x = tf.signal.ifft(x)
x = tf.signal.fftshift(x, 3) * tf.sqrt(ny)
return x
def sos(x):
# x: nb, ncoil, nt, nx, ny; complex64
x = tf.math.reduce_sum(tf.abs(x ** 2), axis=1)
x = x ** (1.0 / 2)
return x
def softthres(x, thres):
x_abs = tf.abs(x)
coef = tf.nn.relu(x_abs - thres) / (x_abs + 1e-10)
coef = tf.cast(coef, tf.complex64)
return coef * x
| [
"numpy.ones",
"tensorflow.signal.fft",
"numpy.linalg.norm",
"numpy.arange",
"tensorflow.signal.fftshift",
"numpy.complex64",
"tensorflow.sqrt",
"tensorflow.signal.ifft",
"tensorflow.abs",
"tensorflow.nn.relu",
"tensorflow.math.real",
"tensorflow.cast",
"numpy.random.choice",
"tensorflow.ma... | [((159, 181), 'numpy.array', 'np.array', (['[nx, ny, nz]'], {}), '([nx, ny, nz])\n', (167, 181), True, 'import numpy as np\n'), ((309, 349), 'numpy.random.choice', 'np.random.choice', (['Npts', 'k'], {'replace': '(False)'}), '(Npts, k, replace=False)\n', (325, 349), True, 'import numpy as np\n'), ((386, 400), 'numpy.zeros', 'np.zeros', (['Npts'], {}), '(Npts)\n', (394, 400), True, 'import numpy as np\n'), ((3281, 3306), 'tensorflow.signal.ifftshift', 'tf.signal.ifftshift', (['x', '(2)'], {}), '(x, 2)\n', (3300, 3306), True, 'import tensorflow as tf\n'), ((3315, 3349), 'tensorflow.transpose', 'tf.transpose', (['X'], {'perm': '[0, 1, 3, 2]'}), '(X, perm=[0, 1, 3, 2])\n', (3327, 3349), True, 'import tensorflow as tf\n'), ((3404, 3420), 'tensorflow.signal.fft', 'tf.signal.fft', (['X'], {}), '(X)\n', (3417, 3420), True, 'import tensorflow as tf\n'), ((3429, 3463), 'tensorflow.transpose', 'tf.transpose', (['X'], {'perm': '[0, 1, 3, 2]'}), '(X, perm=[0, 1, 3, 2])\n', (3441, 3463), True, 'import tensorflow as tf\n'), ((3520, 3539), 'numpy.float32', 'np.float32', (['x.shape'], {}), '(x.shape)\n', (3530, 3539), True, 'import numpy as np\n'), ((3684, 3709), 'tensorflow.signal.ifftshift', 'tf.signal.ifftshift', (['X', '(3)'], {}), '(X, 3)\n', (3703, 3709), True, 'import tensorflow as tf\n'), ((3718, 3734), 'tensorflow.signal.fft', 'tf.signal.fft', (['X'], {}), '(X)\n', (3731, 3734), True, 'import tensorflow as tf\n'), ((3843, 3868), 'tensorflow.signal.ifftshift', 'tf.signal.ifftshift', (['X', '(2)'], {}), '(X, 2)\n', (3862, 3868), True, 'import tensorflow as tf\n'), ((3877, 3911), 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[0, 1, 3, 2]'}), '(x, perm=[0, 1, 3, 2])\n', (3889, 3911), True, 'import tensorflow as tf\n'), ((3968, 3985), 'tensorflow.signal.ifft', 'tf.signal.ifft', (['x'], {}), '(x)\n', (3982, 3985), True, 'import tensorflow as tf\n'), ((3994, 4028), 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[0, 1, 3, 2]'}), '(x, perm=[0, 1, 3, 2])\n', (4006, 4028), True, 'import tensorflow as tf\n'), ((4085, 4104), 'numpy.float32', 'np.float32', (['X.shape'], {}), '(X.shape)\n', (4095, 4104), True, 'import numpy as np\n'), ((4249, 4274), 'tensorflow.signal.ifftshift', 'tf.signal.ifftshift', (['x', '(3)'], {}), '(x, 3)\n', (4268, 4274), True, 'import tensorflow as tf\n'), ((4283, 4300), 'tensorflow.signal.ifft', 'tf.signal.ifft', (['x'], {}), '(x)\n', (4297, 4300), True, 'import tensorflow as tf\n'), ((4544, 4553), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (4550, 4553), True, 'import tensorflow as tf\n'), ((4620, 4647), 'tensorflow.cast', 'tf.cast', (['coef', 'tf.complex64'], {}), '(coef, tf.complex64)\n', (4627, 4647), True, 'import tensorflow as tf\n'), ((1387, 1438), 'numpy.ones', 'np.ones', (['(cxr.shape[0], cyr.shape[0], czr.shape[0])'], {}), '((cxr.shape[0], cyr.shape[0], czr.shape[0]))\n', (1394, 1438), True, 'import numpy as np\n'), ((1585, 1607), 'numpy.linalg.norm', 'np.linalg.norm', (['(y_ - y)'], {}), '(y_ - y)\n', (1599, 1607), True, 'import numpy as np\n'), ((1766, 1789), 'numpy.float32', 'np.float32', (['input.shape'], {}), '(input.shape)\n', (1776, 1789), True, 'import numpy as np\n'), ((2336, 2359), 'numpy.float32', 'np.float32', (['input.shape'], {}), '(input.shape)\n', (2346, 2359), True, 'import numpy as np\n'), ((3103, 3132), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(residual ** 2)'], {}), '(residual ** 2)\n', (3117, 3132), True, 'import tensorflow as tf\n'), ((3190, 3219), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(residual ** 2)'], {}), '(residual ** 2)\n', (3204, 3219), True, 'import tensorflow as tf\n'), ((3561, 3584), 'numpy.complex64', 'np.complex64', (['(nx + 0.0j)'], {}), '(nx + 0.0j)\n', (3573, 3584), True, 'import numpy as np\n'), ((3605, 3628), 'numpy.complex64', 'np.complex64', (['(ny + 0.0j)'], {}), '(ny + 0.0j)\n', (3617, 3628), True, 'import numpy as np\n'), ((3636, 3660), 'tensorflow.signal.fftshift', 'tf.signal.fftshift', (['X', '(2)'], {}), '(X, 2)\n', (3654, 3660), True, 'import tensorflow as tf\n'), ((3663, 3674), 'tensorflow.sqrt', 'tf.sqrt', (['nx'], {}), '(nx)\n', (3670, 3674), True, 'import tensorflow as tf\n'), ((3743, 3767), 'tensorflow.signal.fftshift', 'tf.signal.fftshift', (['X', '(3)'], {}), '(X, 3)\n', (3761, 3767), True, 'import tensorflow as tf\n'), ((3770, 3781), 'tensorflow.sqrt', 'tf.sqrt', (['ny'], {}), '(ny)\n', (3777, 3781), True, 'import tensorflow as tf\n'), ((4126, 4149), 'numpy.complex64', 'np.complex64', (['(nx + 0.0j)'], {}), '(nx + 0.0j)\n', (4138, 4149), True, 'import numpy as np\n'), ((4170, 4193), 'numpy.complex64', 'np.complex64', (['(ny + 0.0j)'], {}), '(ny + 0.0j)\n', (4182, 4193), True, 'import numpy as np\n'), ((4201, 4225), 'tensorflow.signal.fftshift', 'tf.signal.fftshift', (['x', '(2)'], {}), '(x, 2)\n', (4219, 4225), True, 'import tensorflow as tf\n'), ((4228, 4239), 'tensorflow.sqrt', 'tf.sqrt', (['nx'], {}), '(nx)\n', (4235, 4239), True, 'import tensorflow as tf\n'), ((4309, 4333), 'tensorflow.signal.fftshift', 'tf.signal.fftshift', (['x', '(3)'], {}), '(x, 3)\n', (4327, 4333), True, 'import tensorflow as tf\n'), ((4336, 4347), 'tensorflow.sqrt', 'tf.sqrt', (['ny'], {}), '(ny)\n', (4343, 4347), True, 'import tensorflow as tf\n'), ((4445, 4459), 'tensorflow.abs', 'tf.abs', (['(x ** 2)'], {}), '(x ** 2)\n', (4451, 4459), True, 'import tensorflow as tf\n'), ((4565, 4590), 'tensorflow.nn.relu', 'tf.nn.relu', (['(x_abs - thres)'], {}), '(x_abs - thres)\n', (4575, 4590), True, 'import tensorflow as tf\n'), ((614, 654), 'numpy.arange', 'np.arange', (['(-center_r[0])', '(center_r[0] + 1)'], {}), '(-center_r[0], center_r[0] + 1)\n', (623, 654), True, 'import numpy as np\n'), ((717, 741), 'numpy.arange', 'np.arange', (['mask_shape[0]'], {}), '(mask_shape[0])\n', (726, 741), True, 'import numpy as np\n'), ((854, 894), 'numpy.arange', 'np.arange', (['(-center_r[1])', '(center_r[1] + 1)'], {}), '(-center_r[1], center_r[1] + 1)\n', (863, 894), True, 'import numpy as np\n'), ((957, 981), 'numpy.arange', 'np.arange', (['mask_shape[1]'], {}), '(mask_shape[1])\n', (966, 981), True, 'import numpy as np\n'), ((1094, 1134), 'numpy.arange', 'np.arange', (['(-center_r[2])', '(center_r[2] + 1)'], {}), '(-center_r[2], center_r[2] + 1)\n', (1103, 1134), True, 'import numpy as np\n'), ((1197, 1221), 'numpy.arange', 'np.arange', (['mask_shape[2]'], {}), '(mask_shape[2])\n', (1206, 1221), True, 'import numpy as np\n'), ((1348, 1369), 'numpy.ix_', 'np.ix_', (['cxr', 'cyr', 'czr'], {}), '(cxr, cyr, czr)\n', (1354, 1369), True, 'import numpy as np\n'), ((1521, 1532), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1529, 1532), True, 'import numpy as np\n'), ((1552, 1564), 'numpy.array', 'np.array', (['y_'], {}), '(y_)\n', (1560, 1564), True, 'import numpy as np\n'), ((1815, 1838), 'numpy.complex64', 'np.complex64', (['(nt + 0.0j)'], {}), '(nt + 0.0j)\n', (1827, 1838), True, 'import numpy as np\n'), ((1871, 1909), 'tensorflow.transpose', 'tf.transpose', (['input'], {'perm': '[0, 2, 3, 1]'}), '(input, perm=[0, 2, 3, 1])\n', (1883, 1909), True, 'import tensorflow as tf\n'), ((1969, 1986), 'tensorflow.signal.ifft', 'tf.signal.ifft', (['x'], {}), '(x)\n', (1983, 1986), True, 'import tensorflow as tf\n'), ((2003, 2037), 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[0, 3, 1, 2]'}), '(x, perm=[0, 3, 1, 2])\n', (2015, 2037), True, 'import tensorflow as tf\n'), ((2100, 2138), 'tensorflow.transpose', 'tf.transpose', (['input'], {'perm': '[0, 2, 3, 1]'}), '(input, perm=[0, 2, 3, 1])\n', (2112, 2138), True, 'import tensorflow as tf\n'), ((2155, 2171), 'tensorflow.signal.fft', 'tf.signal.fft', (['x'], {}), '(x)\n', (2168, 2171), True, 'import tensorflow as tf\n'), ((2231, 2265), 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[0, 3, 1, 2]'}), '(x, perm=[0, 3, 1, 2])\n', (2243, 2265), True, 'import tensorflow as tf\n'), ((2385, 2408), 'numpy.complex64', 'np.complex64', (['(nt + 0.0j)'], {}), '(nt + 0.0j)\n', (2397, 2408), True, 'import numpy as np\n'), ((2441, 2482), 'tensorflow.transpose', 'tf.transpose', (['input'], {'perm': '[0, 2, 3, 4, 1]'}), '(input, perm=[0, 2, 3, 4, 1])\n', (2453, 2482), True, 'import tensorflow as tf\n'), ((2542, 2559), 'tensorflow.signal.ifft', 'tf.signal.ifft', (['x'], {}), '(x)\n', (2556, 2559), True, 'import tensorflow as tf\n'), ((2576, 2613), 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[0, 4, 1, 2, 3]'}), '(x, perm=[0, 4, 1, 2, 3])\n', (2588, 2613), True, 'import tensorflow as tf\n'), ((2676, 2717), 'tensorflow.transpose', 'tf.transpose', (['input'], {'perm': '[0, 2, 3, 4, 1]'}), '(input, perm=[0, 2, 3, 4, 1])\n', (2688, 2717), True, 'import tensorflow as tf\n'), ((2734, 2750), 'tensorflow.signal.fft', 'tf.signal.fft', (['x'], {}), '(x)\n', (2747, 2750), True, 'import tensorflow as tf\n'), ((2810, 2847), 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[0, 4, 1, 2, 3]'}), '(x, perm=[0, 4, 1, 2, 3])\n', (2822, 2847), True, 'import tensorflow as tf\n'), ((2058, 2069), 'tensorflow.sqrt', 'tf.sqrt', (['nt'], {}), '(nt)\n', (2065, 2069), True, 'import tensorflow as tf\n'), ((2286, 2297), 'tensorflow.sqrt', 'tf.sqrt', (['nt'], {}), '(nt)\n', (2293, 2297), True, 'import tensorflow as tf\n'), ((2634, 2645), 'tensorflow.sqrt', 'tf.sqrt', (['nt'], {}), '(nt)\n', (2641, 2645), True, 'import tensorflow as tf\n'), ((2868, 2879), 'tensorflow.sqrt', 'tf.sqrt', (['nt'], {}), '(nt)\n', (2875, 2879), True, 'import tensorflow as tf\n'), ((3021, 3048), 'tensorflow.math.real', 'tf.math.real', (['residual_cplx'], {}), '(residual_cplx)\n', (3033, 3048), True, 'import tensorflow as tf\n'), ((3050, 3077), 'tensorflow.math.imag', 'tf.math.imag', (['residual_cplx'], {}), '(residual_cplx)\n', (3062, 3077), True, 'import tensorflow as tf\n'), ((1637, 1655), 'numpy.linalg.norm', 'np.linalg.norm', (['y_'], {}), '(y_)\n', (1651, 1655), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
x_data = np.random.rand(100).astype(np.float16)
W=tf.Variable(tf.random_uniform([1],-1,1))
B=tf.Variable(tf.zeros([1]))
k=3
b=1
y_data=x_data*k+b
y=W*x_data+B
loss=tf.reduce_mean(tf.square(y_data-y))
opt=tf.train.GradientDescentOptimizer(0.5)
train=opt.minimize(loss)
init=tf.initialize_all_variables()
sess=tf.Session()
sess.run(init)
for it in range(201):
sess.run(train)
if not it % 20:
print(it,sess.run(W),sess.run(B))
| [
"tensorflow.random_uniform",
"tensorflow.Session",
"tensorflow.zeros",
"tensorflow.initialize_all_variables",
"tensorflow.square",
"numpy.random.rand",
"tensorflow.train.GradientDescentOptimizer"
] | [((250, 288), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.5)'], {}), '(0.5)\n', (283, 288), True, 'import tensorflow as tf\n'), ((320, 349), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (347, 349), True, 'import tensorflow as tf\n'), ((355, 367), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (365, 367), True, 'import tensorflow as tf\n'), ((106, 135), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]', '(-1)', '(1)'], {}), '([1], -1, 1)\n', (123, 135), True, 'import tensorflow as tf\n'), ((149, 162), 'tensorflow.zeros', 'tf.zeros', (['[1]'], {}), '([1])\n', (157, 162), True, 'import tensorflow as tf\n'), ((224, 245), 'tensorflow.square', 'tf.square', (['(y_data - y)'], {}), '(y_data - y)\n', (233, 245), True, 'import tensorflow as tf\n'), ((53, 72), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (67, 72), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import numpy as np
import pandas as pd
import re
import string
import sys
import unidecode
import yaml
from collections import defaultdict
from gensim import corpora, models
from itertools import chain
from joblib import Parallel, delayed
from nltk import ngrams
from nltk.corpus import stopwords as nltk_stopwords
from nltk.tokenize import TweetTokenizer
from operator import itemgetter
from scipy import sparse as sps
def normalize_token(token, **kwargs):
if kwargs.get("remove_hashtags") and token.startswith("#"):
return ""
if kwargs.get("remove_links") and token.startswith("http"):
return ""
if kwargs.get("remove_mentions") and token.startswith("@"):
return ""
if kwargs.get("remove_numeric") and token.isnumeric():
return ""
if kwargs.get("split_hashtags") and token.startswith("#"):
matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', token[1:])
token = "#" + ":".join([m.group(0) for m in matches])
elif kwargs.get("normalize_hashtags") and token.startswith("#"):
token = "<hashtag>"
if kwargs.get("normalize_mentions") and token.startswith("@"):
token = "<user>"
if kwargs.get("tweet_lowercase"):
token = token.lower()
return unidecode.unidecode(token)
def split_hashtags(token):
if token.startswith("#"):
return token[1:].split(":")
else:
return [token]
def normalize_tweet(tweet, stopwords=set(), punctuation=set(), **kwargs):
tweet = [normalize_token(t, **kwargs).strip() for t in tweet
if t not in stopwords and t not in punctuation and t.strip() != ""]
if kwargs.get("split_hashtags"):
tweet = list(chain(*map(split_hashtags, tweet)))
if kwargs.get("words_ngrams", None):
word_ngrams = (
"_".join(ngram)
for n in kwargs["word_ngrams"]
for ngram in ngrams(tweet, n)
)
tweet = list(chain(tweet, word_ngrams))
if kwargs.get("char_ngrams", None):
char_ngrams = (
"".join(cngram)
for token in tweet
for n in kwargs["char_ngrams"]
for cngram in ngrams(token, n)
)
tweet = list(chain(tweet, char_ngrams))
return tweet
def extract_hashtags(tokens, hashtag_ignore=set()):
return sorted(set([
t for t in tokens
if t.startswith("#") and
t.strip() != "#" and
t.lower()[1:] not in hashtag_ignore
]))
def extract_mentions(tokens, mentions_ignore=set()):
return sorted(set([
t for t in tokens
if t.startswith("@") and
t.strip() != "@" and
t.lower()[1:] not in mentions_ignore
]))
def extract_ngrams(tokens, n=3):
return sorted(set([
"_".join(ngram) for ngram in ngrams(tokens, n=n)
]))
def extract_toptfidf(tfidf_tweet, k=5):
return [
t[0] for t in sorted(tfidf_tweet, key=itemgetter(1), reverse=True)[:k]
]
def build_adjacency_matrix(graph_type, data):
adjacency = []
for idx, row_i in data.iterrows():
# Needed for NetworkX to keep track of all existing nodes
# (even isolated ones)
adjacency.append((row_i["ID"], row_i["ID"], 0))
# Only store a triangular matrix (the matrix is symmetric)
for _, row_j in data.loc[idx+1:].iterrows():
# TODO: Is this the best way to weight edges?
edge_weight = len(
set(row_i[graph_type]).intersection(row_j[graph_type])
)
if edge_weight > 0:
adjacency.append((row_i["ID"], row_j["ID"], edge_weight))
return graph_type, adjacency
def main(args):
print("Loading data", file=sys.stderr)
dataset = pd.read_csv(args.dataset_input)
data_config = dict(
char_ngrams=args.char_ngrams,
ignore_hashtags=args.ignore_hashtags,
ignore_mentions=args.ignore_mentions,
min_docs=args.min_docs,
max_docs=args.max_docs,
normalize_hashtags=args.normalize_hashtags,
normalize_mentions=args.normalize_mentions,
reduce_tweet_word_len=args.reduce_tweet_word_len,
remove_hashtags=args.remove_hashtags,
remove_links=args.remove_links,
remove_mentions=args.remove_mentions,
remove_numeric=args.remove_numeric,
remove_punctuation=args.remove_punctuation,
remove_stopwords=args.remove_stopwords,
split_hashtags=args.split_hashtags,
tweet_lowercase=args.tweet_lowercase,
word_ngrams=args.word_ngrams
)
if args.supervised_only:
print("Filtering unsupervised", file=sys.stderr)
dataset = dataset[dataset["Stance"] != "UNK"]
print("Tokenizing tweets", file=sys.stderr)
tweet_tokenizer = TweetTokenizer(
reduce_len=args.reduce_tweet_word_len
)
dataset["TokenizedTweet"] = dataset["Tweet"].apply(
tweet_tokenizer.tokenize
)
print("Normalizing tweets", file=sys.stderr)
punctuation_symbols = set(string.punctuation) \
if args.remove_punctuation else set()
stopwords = set(nltk_stopwords.words("english")) \
if args.remove_stopwords else set()
dataset["NormalizedTweet"] = dataset["TokenizedTweet"].apply(
lambda t: normalize_tweet(
tweet=t,
stopwords=stopwords,
punctuation=punctuation_symbols,
char_ngrams=args.char_ngrams,
normalize_hashtags=args.normalize_hashtags,
normalize_mentions=args.normalize_mentions,
remove_hashtags=args.remove_hashtags,
remove_links=args.remove_links,
remove_mentions=args.remove_mentions,
remove_numeric=args.remove_numeric,
split_hashtags=args.split_hashtags,
tweet_lowercase=args.tweet_lowercase,
word_ngrams=args.word_ngrams
)
)
print("Building vocabulary", file=sys.stderr)
tweets_vocab = corpora.Dictionary(dataset["NormalizedTweet"])
tweets_vocab.filter_extremes(
no_below=args.min_docs,
no_above=args.max_docs
)
print("Building bag-of-words features", file=sys.stderr)
bow_corpus = dataset["NormalizedTweet"].apply(
tweets_vocab.doc2bow
).tolist()
corpora.MmCorpus.serialize(
"{}.bow.mm".format(args.output_basename),
bow_corpus
)
print("Building TF-IDF features", file=sys.stderr)
tfidf_model = models.TfidfModel(
bow_corpus,
dictionary=tweets_vocab
)
tfidf_corpus = tfidf_model[bow_corpus]
corpora.MmCorpus.serialize(
"{}.tfidf.mm".format(args.output_basename),
tfidf_corpus
)
print("Extracting graph information", file=sys.stderr)
graph_types = []
if args.graph_hashtags:
dataset["hashtags"] = dataset["TokenizedTweet"].apply(
lambda t: extract_hashtags(
t,
set(map(lambda t: t.lower(), args.ignore_hashtags))
)
)
graph_types.append("hashtags")
if args.graph_mentions:
dataset["mentions"] = dataset["TokenizedTweet"].apply(
lambda t: extract_mentions(
t,
set(map(lambda t: t.lower(), args.ignore_mentions))
)
)
graph_types.append("mentions")
for n in args.graph_ngrams:
dataset["{}-gram".format(n)] = dataset["TokenizedTweet"].apply(
lambda t: extract_ngrams(t, n)
)
graph_types.append("{}-gram".format(n))
for k in args.graph_tfidf:
dataset["top-{}-tfidf".format(k)] = dataset["ID"].apply(
lambda idx: extract_toptfidf(tfidf_corpus[idx], k)
)
graph_types.append("top-{}-tfidf".format(k))
print("Building graphs", file=sys.stderr)
adjacencies = dict(
Parallel(n_jobs=-1, verbose=10)(
delayed(build_adjacency_matrix)(
graph_type, dataset.loc[:, ["ID", graph_type]]
) for graph_type in graph_types
)
)
if args.graph_document_word:
print("Building document_word_graph", file=sys.stderr)
tweets_corpus = dataset["NormalizedTweet"].apply(
tweets_vocab.doc2idx
).tolist()
# Word-Word Co-occurrence Matrix
word_word_count = defaultdict(int)
window_size = args.graph_document_word_window
for tweet in tweets_corpus:
for idx, ctoken in enumerate(tweet):
if ctoken == -1:
continue
for wtoken in tweet[max(idx-window_size, 0):idx+window_size+1]:
if wtoken == -1:
continue
word_word_count[(ctoken, wtoken)] += 1
data = list(word_word_count.values())
rows, cols = list(zip(*word_word_count.keys()))
cooccurrence_matrix_shape = (len(tweets_vocab),) * 2
cooccurrence_matrix = sps.coo_matrix(
(data, (rows, cols)),
shape=cooccurrence_matrix_shape
)
cooccurrence_matrix.setdiag(0)
# PPMI Matrix
word_totals = np.array(cooccurrence_matrix.sum(axis=0))[0]
total = word_totals.sum()
word_probs = word_totals/total
ppmi = cooccurrence_matrix / total
ppmi.data /= (word_probs[ppmi.row] * word_probs[ppmi.col])
ppmi.row = ppmi.row[ppmi.data > 0]
ppmi.col = ppmi.col[ppmi.data > 0]
ppmi.data = ppmi.data[ppmi.data > 0]
ppmi.data = np.log(ppmi.data)
ppmi = sps.triu(ppmi)
# Adjacency matrix
base_word_index = dataset.shape[0]
adjacency_shape = (base_word_index + len(tweets_vocab),) * 2
rows = []
cols = []
data = []
for tidx, tweet in enumerate(tfidf_corpus):
for widx, tfidf_score in tweet:
rows.append(tidx)
cols.append(widx + base_word_index)
data.append(tfidf_score)
rows.extend(ppmi.row + base_word_index)
cols.extend(ppmi.col + base_word_index)
data.extend(ppmi.data)
adjacency = sps.coo_matrix((data, (rows, cols)), shape=adjacency_shape)
adjacency.setdiag(1)
adjacencies["document_word"] = list(zip(adjacency.row, adjacency.col, adjacency.data))
print("Saving graphs", file=sys.stderr)
for graph_type, adjacency in adjacencies.items():
pd.DataFrame(
adjacency,
columns=["row", "col", "weight"]
).to_csv(
"{}.{}.csv.gz".format(args.output_basename, graph_type),
index=False
)
print("Saving data configuration", file=sys.stderr)
with open("{}.data.yml".format(args.output_basename), "w") as fh:
yaml.dump(data_config, fh)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("dataset_input",
help="Path to the dataset csv file.")
parser.add_argument("output_basename",
help="Basename (path included) to store the outputs")
parser.add_argument("--char-ngrams",
default=[],
help="Build features of character n-grams.",
nargs="+",
type=int)
parser.add_argument("--graph-document-word",
action="store_true",
help="Build graph of document words (Yao et al 2019).")
parser.add_argument("--graph-document-word-window",
default=5,
help="Word co-occurrence window (Yao et al 2019).",
type=int)
parser.add_argument("--graph-hashtags",
action="store_true",
help="Build graph of hashtags.")
parser.add_argument("--graph-mentions",
action="store_true",
help="Build graph of mentions.")
parser.add_argument("--graph-ngrams",
default=[],
help="Build graph of n-grams.",
nargs="+",
type=int)
parser.add_argument("--graph-tfidf",
default=[],
help="Build graph of top k tfidf tokens.",
nargs="+",
type=int)
parser.add_argument("--ignore-hashtags",
default=[],
help="List of hashtag to ignore when building graph.",
nargs="+",
type=str)
parser.add_argument("--ignore-mentions",
default=[],
help="List of mentions to ignore when building graph.",
nargs="+",
type=str)
parser.add_argument("--max-docs",
default=1.0,
help="Maximum fraction of documents for TF-IDF.",
type=float)
parser.add_argument("--min-docs",
default=2,
help="Minimum document frequency for TF-IDF.",
type=int)
parser.add_argument("--normalize-hashtags",
action="store_true",
help="Normalize hashtags in tweets.")
parser.add_argument("--normalize-mentions",
action="store_true",
help="Normalize mentions in tweets.")
parser.add_argument("--remove-hashtags",
action="store_true",
help="Remove hashtags from tweets.")
parser.add_argument("--remove-links",
action="store_true",
help="Remove hyperlinks from tweets.")
parser.add_argument("--remove-mentions",
action="store_true",
help="Remove mentions from tweets.")
parser.add_argument("--remove-numeric",
action="store_true",
help="Remove numeric tokens from tweets.")
parser.add_argument("--remove-punctuation",
action="store_true",
help="Remove punctuation symbols from tweets.")
parser.add_argument("--remove-stopwords",
action="store_true",
help="Remove stopwords from tweets.")
parser.add_argument("--reduce-tweet-word-len",
action="store_true",
help="Reduce the lenght of words in TweetTokenizer.")
parser.add_argument("--split-hashtags",
action="store_true",
help="Camel case splitting of hashtags.")
parser.add_argument("--supervised-only",
action="store_true",
help="Build data only from labeled corpora.")
parser.add_argument("--tweet-lowercase",
action="store_true",
help="Lowercase the tweets.")
parser.add_argument("--word-ngrams",
default=[],
help="Build features of word n-grams.",
nargs="+",
type=int)
args = parser.parse_args()
main(args)
| [
"argparse.ArgumentParser",
"pandas.read_csv",
"re.finditer",
"yaml.dump",
"collections.defaultdict",
"pandas.DataFrame",
"gensim.corpora.Dictionary",
"scipy.sparse.triu",
"scipy.sparse.coo_matrix",
"itertools.chain",
"nltk.tokenize.TweetTokenizer",
"nltk.ngrams",
"nltk.corpus.stopwords.words... | [((1345, 1371), 'unidecode.unidecode', 'unidecode.unidecode', (['token'], {}), '(token)\n', (1364, 1371), False, 'import unidecode\n'), ((3809, 3840), 'pandas.read_csv', 'pd.read_csv', (['args.dataset_input'], {}), '(args.dataset_input)\n', (3820, 3840), True, 'import pandas as pd\n'), ((4842, 4895), 'nltk.tokenize.TweetTokenizer', 'TweetTokenizer', ([], {'reduce_len': 'args.reduce_tweet_word_len'}), '(reduce_len=args.reduce_tweet_word_len)\n', (4856, 4895), False, 'from nltk.tokenize import TweetTokenizer\n'), ((6023, 6069), 'gensim.corpora.Dictionary', 'corpora.Dictionary', (["dataset['NormalizedTweet']"], {}), "(dataset['NormalizedTweet'])\n", (6041, 6069), False, 'from gensim import corpora, models\n'), ((6511, 6565), 'gensim.models.TfidfModel', 'models.TfidfModel', (['bow_corpus'], {'dictionary': 'tweets_vocab'}), '(bow_corpus, dictionary=tweets_vocab)\n', (6528, 6565), False, 'from gensim import corpora, models\n'), ((10874, 10899), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10897, 10899), False, 'import argparse\n'), ((932, 1011), 're.finditer', 're.finditer', (['""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)"""', 'token[1:]'], {}), "('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', token[1:])\n", (943, 1011), False, 'import re\n'), ((8371, 8387), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (8382, 8387), False, 'from collections import defaultdict\n'), ((8993, 9062), 'scipy.sparse.coo_matrix', 'sps.coo_matrix', (['(data, (rows, cols))'], {'shape': 'cooccurrence_matrix_shape'}), '((data, (rows, cols)), shape=cooccurrence_matrix_shape)\n', (9007, 9062), True, 'from scipy import sparse as sps\n'), ((9560, 9577), 'numpy.log', 'np.log', (['ppmi.data'], {}), '(ppmi.data)\n', (9566, 9577), True, 'import numpy as np\n'), ((9593, 9607), 'scipy.sparse.triu', 'sps.triu', (['ppmi'], {}), '(ppmi)\n', (9601, 9607), True, 'from scipy import sparse as sps\n'), ((10176, 10235), 'scipy.sparse.coo_matrix', 'sps.coo_matrix', (['(data, (rows, cols))'], {'shape': 'adjacency_shape'}), '((data, (rows, cols)), shape=adjacency_shape)\n', (10190, 10235), True, 'from scipy import sparse as sps\n'), ((10805, 10831), 'yaml.dump', 'yaml.dump', (['data_config', 'fh'], {}), '(data_config, fh)\n', (10814, 10831), False, 'import yaml\n'), ((2027, 2052), 'itertools.chain', 'chain', (['tweet', 'word_ngrams'], {}), '(tweet, word_ngrams)\n', (2032, 2052), False, 'from itertools import chain\n'), ((2295, 2320), 'itertools.chain', 'chain', (['tweet', 'char_ngrams'], {}), '(tweet, char_ngrams)\n', (2300, 2320), False, 'from itertools import chain\n'), ((5173, 5204), 'nltk.corpus.stopwords.words', 'nltk_stopwords.words', (['"""english"""'], {}), "('english')\n", (5193, 5204), True, 'from nltk.corpus import stopwords as nltk_stopwords\n'), ((7895, 7926), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)', 'verbose': '(10)'}), '(n_jobs=-1, verbose=10)\n', (7903, 7926), False, 'from joblib import Parallel, delayed\n'), ((1979, 1995), 'nltk.ngrams', 'ngrams', (['tweet', 'n'], {}), '(tweet, n)\n', (1985, 1995), False, 'from nltk import ngrams\n'), ((2247, 2263), 'nltk.ngrams', 'ngrams', (['token', 'n'], {}), '(token, n)\n', (2253, 2263), False, 'from nltk import ngrams\n'), ((10467, 10524), 'pandas.DataFrame', 'pd.DataFrame', (['adjacency'], {'columns': "['row', 'col', 'weight']"}), "(adjacency, columns=['row', 'col', 'weight'])\n", (10479, 10524), True, 'import pandas as pd\n'), ((2874, 2893), 'nltk.ngrams', 'ngrams', (['tokens'], {'n': 'n'}), '(tokens, n=n)\n', (2880, 2893), False, 'from nltk import ngrams\n'), ((7940, 7971), 'joblib.delayed', 'delayed', (['build_adjacency_matrix'], {}), '(build_adjacency_matrix)\n', (7947, 7971), False, 'from joblib import Parallel, delayed\n'), ((3003, 3016), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (3013, 3016), False, 'from operator import itemgetter\n')] |
import concurrent.futures as cf
import operator
import numpy as np
# Required for fast concatenation of MDTraj trajectories.
try:
import mdtraj
except ImportError:
mdtraj = None
from functools import partial, reduce
from tqdm import tqdm
from westpa.analysis.core import Walker, Trace
from typing import Callable
class SegmentCollector:
"""An object that manages the retrieval of trajectory segments.
Parameters
----------
traj_descr : Trajectory
A trajectory descriptor.
use_threads : bool, default False
Whether to use a pool of threads to retrieve trajectory segments
asynchronously. Setting this parameter to True may be may be
useful when segment retrieval is an I/O bound task.
max_workers : int, optional
Maximum number of threads to use. The default value is specified
in the :type:`ThreadPoolExecutor`
`documentation <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_.
show_progress : bool, default False
Whether to show a progress bar when retrieving multiple segments.
"""
def __init__(self, traj_descr, use_threads=False, max_workers=None, show_progress=False):
self.traj_descr = traj_descr
self.use_threads = use_threads
self.max_workers = max_workers
self.show_progress = show_progress
@property
def traj_descr(self):
return self._traj_descr
@traj_descr.setter
def traj_descr(self, value):
if not isinstance(value, Trajectory):
msg = f'traj_descr must be an instance of {Trajectory}'
raise TypeError(msg)
self._traj_descr = value
@property
def use_threads(self):
return self._use_threads
@use_threads.setter
def use_threads(self, value):
if not isinstance(value, bool):
raise TypeError('use_threads must be True or False')
self._use_threads = value
@property
def max_workers(self):
return self._max_workers
@max_workers.setter
def max_workers(self, value):
if value is None:
self._max_workers = None
return
if value <= 0:
raise ValueError('max_workers must be greater than 0')
self._max_workers = value
@property
def show_progress(self):
return self._show_progress
@show_progress.setter
def show_progress(self, value):
if not isinstance(value, bool):
raise ValueError('show_progress must be True or False')
self._show_progress = value
def get_segment(self, walker):
"""Return the trajectory of a given walker.
Parameters
----------
walker : Walker
A walker.
Returns
-------
sequence
The trajectory of `walker`.
"""
return self.traj_descr.__get__(walker, Walker)
def get_segments(self, walkers):
"""Return the trajectories of a group of walkers.
Parameters
----------
walkers : Iterable[Walker]
A group of walkers.
Returns
-------
list of sequences
The trajectory of each walker.
"""
walkers = tuple(walkers)
tqdm_kwargs = dict(desc='Retrieving segments', disable=(not self.show_progress), position=0, total=len(walkers),)
if self.use_threads:
with cf.ThreadPoolExecutor(self.max_workers) as executor:
future_to_key = {executor.submit(self.get_segment, walker): key for key, walker in enumerate(walkers)}
futures = list(tqdm(cf.as_completed(future_to_key), **tqdm_kwargs))
futures.sort(key=future_to_key.get)
segments = (future.result() for future in futures)
else:
segments = tqdm(map(self.get_segment, walkers), **tqdm_kwargs)
return list(segments)
class Trajectory:
"""A data descriptor for walker and trace trajectories.
Parameters
----------
fget : callable
Function for getting a trajectory segment. Must take a single
:type:`Walker` object as input and return a sequence representing
the trajectory of the walker.
name : str, optional
Name of the :type:`Walker` and :type:`Trace` attribute to which
to assign the trajectory descriptor. If not provided, `name` will
default to the function name of `fget`.
concatenator : callable, optional
Function for concatenating trajectories. Must take a sequence of
trajectories as input and return their concatenation. The default
`concatenator` is :func:`concatenate`.
cache_segments : bool, default True
Whether to cache trajectory segments.
See Also
--------
:func:`trajectory_segment`
Decorator that transforms a function for getting trajectory
segments into a :type:`Trajectory` descriptor.
"""
def __init__(self, fget=None, *, name=None, concatenator=None, cache_segments=True):
if fget is None:
return partial(self.__init__, name=name, concatenator=concatenator, cache_segments=cache_segments)
if name is None:
name = fget.__name__
self.fget = fget
self.name = name
self.concatenator = concatenator
self.cache_segments = cache_segments
self._segment_collector = SegmentCollector(self)
# Attach self to Walker and Trace classes.
for cls in Walker, Trace:
if hasattr(cls, name):
msg = f"class '{cls.__name__}' already has attribute '{name}'"
raise AttributeError(msg)
for cls in Walker, Trace:
setattr(cls, name, self)
@property
def private_name(self):
"""str: Name of the :type:`Walker` instance attribute used for
caching segments.
"""
return '_' + self.name
@property
def segment_collector(self):
"""SegmentCollector: Segment retrieval manager."""
return self._segment_collector
@property
def fget(self):
"""callable: Function for getting trajectory segments."""
return self._fget
@fget.setter
def fget(self, value):
if not isinstance(value, Callable):
raise TypeError('fget must be callable')
self._fget = value
@property
def cache_segments(self):
"""bool: Whether to cache trajectory segments."""
return self._cache_segments
@cache_segments.setter
def cache_segments(self, value):
if not isinstance(value, bool):
raise TypeError('cache_segments must be True or False')
self._cache_segments = value
@property
def concatenator(self):
"""callable: Function for concatenating trajectories."""
return self._concatenator
@concatenator.setter
def concatenator(self, value):
if value is None:
value = concatenate
elif not isinstance(value, Callable):
raise TypeError('concatenator must be callable')
self._concatenator = value
def __get__(self, instance, owner):
if instance is None:
return self
if owner is Walker:
if hasattr(instance, self.private_name):
value = getattr(instance, self.private_name)
else:
value = self.fget(instance)
self._validate_segment(value)
if self.cache_segments:
setattr(instance, self.private_name, value)
return value
if owner is Trace:
segments = self.segment_collector.get_segments(instance)
return self.concatenator(segments)
msg = f'owner must be Walker or Trace, not {owner.__name__}'
raise TypeError(msg)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __call__(self, arg):
if isinstance(arg, Walker):
return self.__get__(arg, Walker)
if isinstance(arg, Trace):
return self.__get__(arg, Trace)
raise TypeError('argument must be a Walker or Trace')
def _validate_segment(self, value):
if not hasattr(value, '__getitem__'):
msg = f"'{type(value).__name__}' object can't be concatenated"
raise TypeError(msg)
def trajectory_segment(fget=None, *, cache=True):
"""Transform a function for getting a trajectory segment into a
trajectory attribute of the same name.
Parameters
----------
fget : callable
Function for getting a trajectory segment. Must take a single
:type:`Walker` object as input and return a sequence.
cache : bool, default True
Whether to cache trajectory segments.
Returns
-------
Trajectory
The newly created trajectory attribute. Equivalent to
``getattr(Walker, fget.__name__)`` and
``getattr(Trace, fget.__name__)``.
"""
return Trajectory(fget, cache_segments=cache)
def concatenate(trajectories):
"""Return the concatenation of a sequence of trajectories.
Parameters
----------
trajectories : sequence of sequences
A sequence of trajectories.
Returns
-------
sequence
The concatenation of `trajectories`.
"""
if isinstance(trajectories[0], np.ndarray):
return np.concatenate(trajectories)
if mdtraj and isinstance(trajectories[0], mdtraj.Trajectory):
return trajectories[0].join(trajectories[1:], check_topology=False)
return reduce(operator.concat, trajectories)
| [
"functools.partial",
"numpy.concatenate",
"functools.reduce",
"concurrent.futures.ThreadPoolExecutor",
"concurrent.futures.as_completed"
] | [((9635, 9672), 'functools.reduce', 'reduce', (['operator.concat', 'trajectories'], {}), '(operator.concat, trajectories)\n', (9641, 9672), False, 'from functools import partial, reduce\n'), ((9453, 9481), 'numpy.concatenate', 'np.concatenate', (['trajectories'], {}), '(trajectories)\n', (9467, 9481), True, 'import numpy as np\n'), ((5126, 5222), 'functools.partial', 'partial', (['self.__init__'], {'name': 'name', 'concatenator': 'concatenator', 'cache_segments': 'cache_segments'}), '(self.__init__, name=name, concatenator=concatenator, cache_segments\n =cache_segments)\n', (5133, 5222), False, 'from functools import partial, reduce\n'), ((3457, 3496), 'concurrent.futures.ThreadPoolExecutor', 'cf.ThreadPoolExecutor', (['self.max_workers'], {}), '(self.max_workers)\n', (3478, 3496), True, 'import concurrent.futures as cf\n'), ((3665, 3695), 'concurrent.futures.as_completed', 'cf.as_completed', (['future_to_key'], {}), '(future_to_key)\n', (3680, 3695), True, 'import concurrent.futures as cf\n')] |
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Generator, Any
import click
import os
import string
import random
import numpy as np
from read_vectors_files import fvecs_read, ivecs_read
from jina.flow import Flow
RANDOM_SEED = 14
os.environ['PARALLEL'] = str(1)
os.environ['SHARDS'] = str(1)
os.environ['TMP_DATA_DIR'] = '/tmp/jina/faiss/siftsmall'
def get_random_ws(workspace_path, length=8):
random.seed(RANDOM_SEED)
letters = string.ascii_lowercase
dn = ''.join(random.choice(letters) for i in range(length))
return os.path.join(workspace_path, dn)
def read_data(db_file_path: str):
return fvecs_read(db_file_path)
def save_topk(resp, output_file, top_k):
results = []
with open(output_file, 'w') as fw:
query_id = 0
for d in resp.search.docs:
result = []
fw.write('-' * 20)
fw.write('\n')
fw.write('query id {}:'.format(query_id))
fw.write('\n')
fw.write('matched vectors' + "*" * 10)
fw.write('\n')
for idx, match in enumerate(d.matches):
result.append(match.id)
score = match.score.value
if score < 0.0:
continue
m_fn = np.frombuffer(match.blob.buffer, match.blob.dtype)
fw.write('\n')
fw.write('Idx: {:>2d}:(DocId {}, Ranking score: {:f}): \n{}'.
format(idx, match.id, score, m_fn))
fw.write('\n')
fw.write('\n')
results.append(result)
query_id += 1
fw.write(f'recall@{top_k}: {recall_at_k(np.array(results), top_k)}')
print(open(output_file, 'r').read())
def recall_at_k(results, k):
"""
Computes how many times the true nearest neighbour is returned as one of the k closest vectors from a query.
Taken from https://gist.github.com/mdouze/046c1960bc82801e6b40ed8ee677d33e
"""
groundtruth_path = os.path.join(os.environ['TMP_DATA_DIR'], 'siftsmall_groundtruth.ivecs')
groundtruth = ivecs_read(groundtruth_path)
eval = (results[:, :k] == groundtruth[:, :1]).sum() / float(results.shape[0])
return eval
@click.command()
@click.option('--task', '-t')
@click.option('--batch_size', '-n', default=50)
@click.option('--top_k', '-k', default=5)
def main(task, batch_size, top_k):
os.environ['WORKDIR'] = get_random_ws(os.environ['TMP_DATA_DIR'])
if task == 'index':
data_path = os.path.join(os.environ['TMP_DATA_DIR'], 'siftsmall_base.fvecs')
if os.path.exists(data_path):
print(f'\n +---------------------------------------------------------------------------------+ \
\n | 🤖🤖🤖 | \
\n | The directory {data_path} already exists. Please remove it before indexing again. | \
\n | 🤖🤖🤖 | \
\n +---------------------------------------------------------------------------------+')
flow = Flow().load_config('flow-index.yml')
with flow.build() as fl:
fl.index_ndarray(read_data(data_path), batch_size=batch_size)
elif task == 'query':
data_path = os.path.join(os.environ['TMP_DATA_DIR'], 'siftsmall_query.fvecs')
flow = Flow().load_config('flow-query.yml')
with flow.build() as fl:
ppr = lambda x: save_topk(x, os.path.join(os.environ['TMP_DATA_DIR'], 'query_results.txt'), top_k)
fl.search_ndarray(read_data(data_path), output_fn=ppr, top_k=top_k)
else:
raise NotImplementedError(
f'unknown task: {task}. A valid task is either `index` or `query`.')
if __name__ == '__main__':
main()
| [
"read_vectors_files.ivecs_read",
"read_vectors_files.fvecs_read",
"numpy.frombuffer",
"click.option",
"os.path.exists",
"random.choice",
"click.command",
"random.seed",
"jina.flow.Flow",
"numpy.array",
"os.path.join"
] | [((2277, 2292), 'click.command', 'click.command', ([], {}), '()\n', (2290, 2292), False, 'import click\n'), ((2294, 2322), 'click.option', 'click.option', (['"""--task"""', '"""-t"""'], {}), "('--task', '-t')\n", (2306, 2322), False, 'import click\n'), ((2324, 2370), 'click.option', 'click.option', (['"""--batch_size"""', '"""-n"""'], {'default': '(50)'}), "('--batch_size', '-n', default=50)\n", (2336, 2370), False, 'import click\n'), ((2372, 2412), 'click.option', 'click.option', (['"""--top_k"""', '"""-k"""'], {'default': '(5)'}), "('--top_k', '-k', default=5)\n", (2384, 2412), False, 'import click\n'), ((478, 502), 'random.seed', 'random.seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (489, 502), False, 'import random\n'), ((615, 647), 'os.path.join', 'os.path.join', (['workspace_path', 'dn'], {}), '(workspace_path, dn)\n', (627, 647), False, 'import os\n'), ((695, 719), 'read_vectors_files.fvecs_read', 'fvecs_read', (['db_file_path'], {}), '(db_file_path)\n', (705, 719), False, 'from read_vectors_files import fvecs_read, ivecs_read\n'), ((2057, 2128), 'os.path.join', 'os.path.join', (["os.environ['TMP_DATA_DIR']", '"""siftsmall_groundtruth.ivecs"""'], {}), "(os.environ['TMP_DATA_DIR'], 'siftsmall_groundtruth.ivecs')\n", (2069, 2128), False, 'import os\n'), ((2147, 2175), 'read_vectors_files.ivecs_read', 'ivecs_read', (['groundtruth_path'], {}), '(groundtruth_path)\n', (2157, 2175), False, 'from read_vectors_files import fvecs_read, ivecs_read\n'), ((2562, 2626), 'os.path.join', 'os.path.join', (["os.environ['TMP_DATA_DIR']", '"""siftsmall_base.fvecs"""'], {}), "(os.environ['TMP_DATA_DIR'], 'siftsmall_base.fvecs')\n", (2574, 2626), False, 'import os\n'), ((2638, 2663), 'os.path.exists', 'os.path.exists', (['data_path'], {}), '(data_path)\n', (2652, 2663), False, 'import os\n'), ((557, 579), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (570, 579), False, 'import random\n'), ((3420, 3485), 'os.path.join', 'os.path.join', (["os.environ['TMP_DATA_DIR']", '"""siftsmall_query.fvecs"""'], {}), "(os.environ['TMP_DATA_DIR'], 'siftsmall_query.fvecs')\n", (3432, 3485), False, 'import os\n'), ((1335, 1385), 'numpy.frombuffer', 'np.frombuffer', (['match.blob.buffer', 'match.blob.dtype'], {}), '(match.blob.buffer, match.blob.dtype)\n', (1348, 1385), True, 'import numpy as np\n'), ((3230, 3236), 'jina.flow.Flow', 'Flow', ([], {}), '()\n', (3234, 3236), False, 'from jina.flow import Flow\n'), ((3501, 3507), 'jina.flow.Flow', 'Flow', ([], {}), '()\n', (3505, 3507), False, 'from jina.flow import Flow\n'), ((1723, 1740), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (1731, 1740), True, 'import numpy as np\n'), ((3612, 3673), 'os.path.join', 'os.path.join', (["os.environ['TMP_DATA_DIR']", '"""query_results.txt"""'], {}), "(os.environ['TMP_DATA_DIR'], 'query_results.txt')\n", (3624, 3673), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
from pathlib import Path
import pandas as pd, numpy as np
from itertools import combinations
from scipy.spatial.distance import pdist, squareform
from skbio import DistanceMatrix
from skbio.stats.distance import permanova
script_folder = Path.cwd()
outputs_folder = script_folder.parent / 'Outputs'
fname = outputs_folder / 'Seurat_integration_PCA_cell_embeddings.txt'
pca = pd.read_csv(fname, sep='\t', header=0, index_col=0, encoding='utf-8')
pca = pca.iloc[:, :18]
fname = outputs_folder / 'Seurat_integration_SNN_clusters.txt'
clusters = pd.read_csv(fname, sep='\t', header=0, index_col=0, encoding='utf-8')
fname = outputs_folder / 'WT_and_KO_cells_celltypes.txt'
celltypes = pd.read_csv(fname, sep='\t', header=0, index_col=0, encoding='utf-8')
cluster2celltype = {}
for C in [8, 10]:
cells = clusters.index[clusters['Cluster'] == C].tolist()
temp = {}
for barcode in cells:
celltype = celltypes.loc[barcode, 'Maintype']
if celltype not in temp:
temp.update({celltype:[]})
temp[celltype].append(barcode)
cluster2celltype.update({C:temp})
fname = outputs_folder / 'Permanova_results.xlsx'
with pd.ExcelWriter(fname) as writer:
for C in [8, 10]:
frames = []
for (celltype_A, celltype_B) in list(combinations(sorted(cluster2celltype[C].keys()), 2)):
cells = cluster2celltype[C][celltype_A] + cluster2celltype[C][celltype_B]
grouping = [celltype_A]*len(cluster2celltype[C][celltype_A]) + [celltype_B]*len(cluster2celltype[C][celltype_B])
X = pca.loc[cells, :].copy()
dm = squareform(pdist(X, metric='euclidean'))
dist_mat = DistanceMatrix(dm, cells)
np.random.seed(0)
result = permanova(dist_mat, grouping, permutations=1000)
result.name = ('%s vs %s'%(celltype_A, celltype_B))
frames.append(result)
result = pd.concat(frames, axis='columns')
sheet_name = 'cluster %d'%(C)
result.T.to_excel(writer, sheet_name=sheet_name, header=True, index=True,
index_label='', encoding='utf-8')
| [
"numpy.random.seed",
"pandas.read_csv",
"skbio.stats.distance.permanova",
"scipy.spatial.distance.pdist",
"pathlib.Path.cwd",
"pandas.concat",
"pandas.ExcelWriter",
"skbio.DistanceMatrix"
] | [((288, 298), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (296, 298), False, 'from pathlib import Path\n'), ((426, 495), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'sep': '"""\t"""', 'header': '(0)', 'index_col': '(0)', 'encoding': '"""utf-8"""'}), "(fname, sep='\\t', header=0, index_col=0, encoding='utf-8')\n", (437, 495), True, 'import pandas as pd, numpy as np\n'), ((594, 663), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'sep': '"""\t"""', 'header': '(0)', 'index_col': '(0)', 'encoding': '"""utf-8"""'}), "(fname, sep='\\t', header=0, index_col=0, encoding='utf-8')\n", (605, 663), True, 'import pandas as pd, numpy as np\n'), ((734, 803), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'sep': '"""\t"""', 'header': '(0)', 'index_col': '(0)', 'encoding': '"""utf-8"""'}), "(fname, sep='\\t', header=0, index_col=0, encoding='utf-8')\n", (745, 803), True, 'import pandas as pd, numpy as np\n'), ((1206, 1227), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['fname'], {}), '(fname)\n', (1220, 1227), True, 'import pandas as pd, numpy as np\n'), ((1967, 2000), 'pandas.concat', 'pd.concat', (['frames'], {'axis': '"""columns"""'}), "(frames, axis='columns')\n", (1976, 2000), True, 'import pandas as pd, numpy as np\n'), ((1726, 1751), 'skbio.DistanceMatrix', 'DistanceMatrix', (['dm', 'cells'], {}), '(dm, cells)\n', (1740, 1751), False, 'from skbio import DistanceMatrix\n'), ((1764, 1781), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1778, 1781), True, 'import pandas as pd, numpy as np\n'), ((1803, 1851), 'skbio.stats.distance.permanova', 'permanova', (['dist_mat', 'grouping'], {'permutations': '(1000)'}), '(dist_mat, grouping, permutations=1000)\n', (1812, 1851), False, 'from skbio.stats.distance import permanova\n'), ((1673, 1701), 'scipy.spatial.distance.pdist', 'pdist', (['X'], {'metric': '"""euclidean"""'}), "(X, metric='euclidean')\n", (1678, 1701), False, 'from scipy.spatial.distance import pdist, squareform\n')] |
"""
==================================================================
Plot of accuracy and time as sample_size and num_features increase
==================================================================
We show that the increase in computation time is linear when
increasing the number of features or the sample size increases.
"""
import matplotlib.pyplot as plt
import numpy as np
from time import time
from sklearn_extra.robust import RobustWeightedEstimator
from sklearn.linear_model import SGDClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(42)
x_label = "Number of features"
dimensions = np.linspace(50, 5000, num=8).astype(int)
sample_sizes = np.linspace(50, 5000, num=8).astype(int)
accuracies = []
times = []
# Get the accuracy and time of computations for a dataset with varying number
# of features
for d in dimensions:
# Make an example in dimension d. Use a scale factor for the problem to be
# easy even in high dimension.
X, y = make_classification(
n_samples=200, n_features=d, scale=1 / np.sqrt(2 * d), random_state=rng
)
stime = time()
clf = RobustWeightedEstimator(
SGDClassifier(loss="hinge", penalty="l1"),
loss="hinge",
random_state=rng,
)
accuracies.append(np.mean(cross_val_score(clf, X, y, cv=10)))
times.append(time() - stime)
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(dimensions, accuracies)
axs[0, 0].set_xlabel(x_label)
axs[0, 0].set_ylabel("accuracy")
axs[0, 1].plot(dimensions, times)
axs[0, 1].set_xlabel(x_label)
axs[0, 1].set_ylabel("Time to fit and predict (s)")
accuracies = []
times = []
# Get the accuracy and time of computations for a dataset with varying number
# of samples
for n in sample_sizes:
X, y = make_classification(n_samples=n, n_features=5, random_state=rng)
stime = time()
clf = RobustWeightedEstimator(
SGDClassifier(loss="hinge", penalty="l1"),
loss="hinge",
random_state=rng,
)
accuracies.append(np.mean(cross_val_score(clf, X, y, cv=10)))
times.append(time() - stime)
axs[1, 0].plot(dimensions, accuracies)
axs[1, 0].set_xlabel(x_label)
axs[1, 0].set_ylabel("accuracy")
axs[1, 1].plot(dimensions, times)
axs[1, 1].set_xlabel(x_label)
axs[1, 1].set_ylabel("Time to fit and predict (s)")
plt.show()
| [
"matplotlib.pyplot.show",
"sklearn.linear_model.SGDClassifier",
"sklearn.model_selection.cross_val_score",
"sklearn.datasets.make_classification",
"numpy.random.RandomState",
"time.time",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"numpy.sqrt"
] | [((621, 646), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (642, 646), True, 'import numpy as np\n'), ((1432, 1450), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (1444, 1450), True, 'import matplotlib.pyplot as plt\n'), ((2368, 2378), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2376, 2378), True, 'import matplotlib.pyplot as plt\n'), ((1174, 1180), 'time.time', 'time', ([], {}), '()\n', (1178, 1180), False, 'from time import time\n'), ((1824, 1888), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': 'n', 'n_features': '(5)', 'random_state': 'rng'}), '(n_samples=n, n_features=5, random_state=rng)\n', (1843, 1888), False, 'from sklearn.datasets import make_classification\n'), ((1901, 1907), 'time.time', 'time', ([], {}), '()\n', (1905, 1907), False, 'from time import time\n'), ((691, 719), 'numpy.linspace', 'np.linspace', (['(50)', '(5000)'], {'num': '(8)'}), '(50, 5000, num=8)\n', (702, 719), True, 'import numpy as np\n'), ((747, 775), 'numpy.linspace', 'np.linspace', (['(50)', '(5000)'], {'num': '(8)'}), '(50, 5000, num=8)\n', (758, 775), True, 'import numpy as np\n'), ((1224, 1265), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""hinge"""', 'penalty': '"""l1"""'}), "(loss='hinge', penalty='l1')\n", (1237, 1265), False, 'from sklearn.linear_model import SGDClassifier\n'), ((1951, 1992), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""hinge"""', 'penalty': '"""l1"""'}), "(loss='hinge', penalty='l1')\n", (1964, 1992), False, 'from sklearn.linear_model import SGDClassifier\n'), ((1351, 1384), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['clf', 'X', 'y'], {'cv': '(10)'}), '(clf, X, y, cv=10)\n', (1366, 1384), False, 'from sklearn.model_selection import cross_val_score\n'), ((1404, 1410), 'time.time', 'time', ([], {}), '()\n', (1408, 1410), False, 'from time import time\n'), ((2078, 2111), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['clf', 'X', 'y'], {'cv': '(10)'}), '(clf, X, y, cv=10)\n', (2093, 2111), False, 'from sklearn.model_selection import cross_val_score\n'), ((2131, 2137), 'time.time', 'time', ([], {}), '()\n', (2135, 2137), False, 'from time import time\n'), ((1123, 1137), 'numpy.sqrt', 'np.sqrt', (['(2 * d)'], {}), '(2 * d)\n', (1130, 1137), True, 'import numpy as np\n')] |
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.express as px
import seaborn as sns
import streamlit as st
from sklearn.metrics import confusion_matrix, matthews_corrcoef
from sklearn.model_selection import StratifiedKFold, train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVC
from .constants import RANDOM_STATE, DATASET_PATH
def app():
st.title('Unoptimized SVM')
st.header('Dataset Setting')
col1, col2 = st.beta_columns(2)
with col1:
sampling_size = st.number_input('Sampling Size (%):', 5, 100, 100, 5)
with col2:
train_size = st.number_input('Train Size (%):', 60, 90, 80, 5)
df = pd.read_csv(DATASET_PATH)
if sampling_size != 100:
sampling_size = sampling_size/100
df = df.sample(frac=sampling_size, random_state=RANDOM_STATE)
X = df.iloc[:, :195]
y = df['technique']
y_sub = df['subtechnique']
train_size = train_size/100
X_train, X_test, y_train, y_test = train_test_split(
X, y,
train_size=train_size,
random_state=RANDOM_STATE,
stratify=y_sub
)
dataset_summarize = pd.DataFrame(
[[X_train.shape[1], X_train.shape[0], X_test.shape[0], X.shape[0]]],
columns=['Num. Features', 'Num. Train Samples',
'Num. Test Samples', 'Total Samples']
)
st.table(dataset_summarize.assign(hack='').set_index('hack'))
if st.button('Train & Test'):
st.write('**Start The Training Process**')
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler.fit(X_train)
X_train_ = scaler.transform(X_train)
X_test_ = scaler.transform(X_test)
clf = SVC(kernel='rbf', decision_function_shape='ovo')
clf.fit(X_train_, y_train)
st.subheader('Evaluate The Model')
y_pred = clf.predict(X_test_)
test_sample = pd.DataFrame(X_test)
test_sample['target'] = y_test
test_sample['prediction'] = y_pred
st.write('Test Samples + Prediction')
st.dataframe(test_sample)
fig = plt.figure()
conf_matrix = confusion_matrix(y_test, y_pred)
sns.heatmap(
conf_matrix,
cmap=sns.color_palette("light:b", as_cmap=True),
cbar=False,
annot=True,
xticklabels=np.unique(y_test),
yticklabels=np.unique(y_test),
fmt="d"
)
plt.ylabel("Actual", fontweight='bold')
plt.xlabel("Predicted", fontweight='bold')
st.pyplot(fig)
mcc = matthews_corrcoef(y_test, y_pred)
st.subheader(f'MCC: `{mcc:.2%}`') | [
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.MinMaxScaler",
"streamlit.title",
"matplotlib.pyplot.figure",
"sklearn.svm.SVC",
"numpy.unique",
"pandas.DataFrame",
"streamlit.subheader",
"streamlit.button",
"streamlit.beta_columns",
"streamlit.header",
... | [((436, 463), 'streamlit.title', 'st.title', (['"""Unoptimized SVM"""'], {}), "('Unoptimized SVM')\n", (444, 463), True, 'import streamlit as st\n'), ((469, 497), 'streamlit.header', 'st.header', (['"""Dataset Setting"""'], {}), "('Dataset Setting')\n", (478, 497), True, 'import streamlit as st\n'), ((515, 533), 'streamlit.beta_columns', 'st.beta_columns', (['(2)'], {}), '(2)\n', (530, 533), True, 'import streamlit as st\n'), ((723, 748), 'pandas.read_csv', 'pd.read_csv', (['DATASET_PATH'], {}), '(DATASET_PATH)\n', (734, 748), True, 'import pandas as pd\n'), ((1042, 1134), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'train_size': 'train_size', 'random_state': 'RANDOM_STATE', 'stratify': 'y_sub'}), '(X, y, train_size=train_size, random_state=RANDOM_STATE,\n stratify=y_sub)\n', (1058, 1134), False, 'from sklearn.model_selection import StratifiedKFold, train_test_split\n'), ((1194, 1371), 'pandas.DataFrame', 'pd.DataFrame', (['[[X_train.shape[1], X_train.shape[0], X_test.shape[0], X.shape[0]]]'], {'columns': "['Num. Features', 'Num. Train Samples', 'Num. Test Samples', 'Total Samples']"}), "([[X_train.shape[1], X_train.shape[0], X_test.shape[0], X.shape\n [0]]], columns=['Num. Features', 'Num. Train Samples',\n 'Num. Test Samples', 'Total Samples'])\n", (1206, 1371), True, 'import pandas as pd\n'), ((1476, 1501), 'streamlit.button', 'st.button', (['"""Train & Test"""'], {}), "('Train & Test')\n", (1485, 1501), True, 'import streamlit as st\n'), ((573, 626), 'streamlit.number_input', 'st.number_input', (['"""Sampling Size (%):"""', '(5)', '(100)', '(100)', '(5)'], {}), "('Sampling Size (%):', 5, 100, 100, 5)\n", (588, 626), True, 'import streamlit as st\n'), ((663, 712), 'streamlit.number_input', 'st.number_input', (['"""Train Size (%):"""', '(60)', '(90)', '(80)', '(5)'], {}), "('Train Size (%):', 60, 90, 80, 5)\n", (678, 712), True, 'import streamlit as st\n'), ((1511, 1553), 'streamlit.write', 'st.write', (['"""**Start The Training Process**"""'], {}), "('**Start The Training Process**')\n", (1519, 1553), True, 'import streamlit as st\n'), ((1572, 1607), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(-1, 1)'}), '(feature_range=(-1, 1))\n', (1584, 1607), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1738, 1786), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'decision_function_shape': '"""ovo"""'}), "(kernel='rbf', decision_function_shape='ovo')\n", (1741, 1786), False, 'from sklearn.svm import SVC\n'), ((1831, 1865), 'streamlit.subheader', 'st.subheader', (['"""Evaluate The Model"""'], {}), "('Evaluate The Model')\n", (1843, 1865), True, 'import streamlit as st\n'), ((1927, 1947), 'pandas.DataFrame', 'pd.DataFrame', (['X_test'], {}), '(X_test)\n', (1939, 1947), True, 'import pandas as pd\n'), ((2038, 2075), 'streamlit.write', 'st.write', (['"""Test Samples + Prediction"""'], {}), "('Test Samples + Prediction')\n", (2046, 2075), True, 'import streamlit as st\n'), ((2084, 2109), 'streamlit.dataframe', 'st.dataframe', (['test_sample'], {}), '(test_sample)\n', (2096, 2109), True, 'import streamlit as st\n'), ((2125, 2137), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2135, 2137), True, 'import matplotlib.pyplot as plt\n'), ((2160, 2192), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2176, 2192), False, 'from sklearn.metrics import confusion_matrix, matthews_corrcoef\n'), ((2472, 2511), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Actual"""'], {'fontweight': '"""bold"""'}), "('Actual', fontweight='bold')\n", (2482, 2511), True, 'import matplotlib.pyplot as plt\n'), ((2520, 2562), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted"""'], {'fontweight': '"""bold"""'}), "('Predicted', fontweight='bold')\n", (2530, 2562), True, 'import matplotlib.pyplot as plt\n'), ((2571, 2585), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (2580, 2585), True, 'import streamlit as st\n'), ((2601, 2634), 'sklearn.metrics.matthews_corrcoef', 'matthews_corrcoef', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2618, 2634), False, 'from sklearn.metrics import confusion_matrix, matthews_corrcoef\n'), ((2643, 2676), 'streamlit.subheader', 'st.subheader', (['f"""MCC: `{mcc:.2%}`"""'], {}), "(f'MCC: `{mcc:.2%}`')\n", (2655, 2676), True, 'import streamlit as st\n'), ((2256, 2298), 'seaborn.color_palette', 'sns.color_palette', (['"""light:b"""'], {'as_cmap': '(True)'}), "('light:b', as_cmap=True)\n", (2273, 2298), True, 'import seaborn as sns\n'), ((2372, 2389), 'numpy.unique', 'np.unique', (['y_test'], {}), '(y_test)\n', (2381, 2389), True, 'import numpy as np\n'), ((2415, 2432), 'numpy.unique', 'np.unique', (['y_test'], {}), '(y_test)\n', (2424, 2432), True, 'import numpy as np\n')] |
import random
import numpy as np
from Agents.pedestrian import pedestrian
from Agents.vehicle import vehicle
from TB_common_functions import log, calculateDistance
# from Agents.traffic_light import traffic_light
def interpolate_path(path_tuple, increments_resolution):
path_tuple_interpolated = []
path_tuple_interpolated.append(path_tuple[0])
for i in range(len(path_tuple)-1):
diff = abs(np.subtract(path_tuple[i], path_tuple[i+1]))
dist = np.hypot(diff[0],diff[1])
if dist > increments_resolution:
num_points_remainder = dist%increments_resolution
num_points = int(np.ceil((dist - num_points_remainder)/increments_resolution)) + 1
if num_points_remainder > 0:
num_points = num_points + 1
x = np.linspace(path_tuple[i][0], path_tuple[i+1][0], num_points)
x = x[1:]
x = [round(num, 1) for num in x]
y = np.linspace(path_tuple[i][1], path_tuple[i+1][1], num_points)
y = y[1:]
y = [round(num, 1) for num in y]
interolated_points = list(zip(x, y))
path_tuple_interpolated = path_tuple_interpolated + interolated_points
# print(path_tuple_interpolated)
return(path_tuple_interpolated)
# =================================================================================================================================================
# Case study to provide log files for game transaltion to OpenScenario files
#
# =================================================================================================================================================
class GameRun():
def __init__(self,world):
self.spawn_height = 2
self.tests_ended = False
self.world = world
self.logging_time_increment = 0.1
self.tests_logs = log(self.logging_time_increment)
# self.i = 0
def set(self):
self.timeout_thresh = 30
self.actors_list = []
self.veh1 = vehicle(1) # vehicle turning right
self.veh2 = vehicle(2) # vehicle turning right
# self.ped1 = pedestrian(2,"adult") # pedestrian crossing zebra line
# self.ped1.set_speed(1.5)
self.actors_list.append(self.veh1)
self.actors_list.append(self.veh2)
# self.actors_list.append(self.ped1)
self.test_ended = False
self.tests_logs = log(self.logging_time_increment)
# spawn(self,x,y,z,yaw):
# Setting Veh 1
self.veh1_path = [[-5.26,-98.13],[-4.36,-124.83], [-6.16,-132.83], [-12.96,-135.23], [-39.36,-135.43]]
# self.veh2_path = [[16.63,-134.53],[-65.76,-136.13]] # safe
self.veh2_path = [[30.63,-134.23],[-65.76,-136.13]] # crash
# self.veh2_path = [[-26.26,-7.95],[-16.04,-16.99], [-8.64,-25.19], [-6.84,-35.19], [-5.44,-74.89],[-4.44,-127.99], [-8.84,-134.99], [-31.54,-135.39]] # crash
# self.ped1_path = [[-22.24,-62.49], [-24.64,-81.39], [-17.44,-100.79], [-49.84,-97.19]]
# # Setting Ped 1
# if self.i == 0:
# # Creating several paths for contious runs
# ped1_path_series_start = [-283.90, 176.50] # Start of Trajectory series for pedestrian with varying starting point
# ped1_path_series_end = [-295.80,182.10] # End of Trajectory series for pedestrian with varying starting point
# step = 2
# x = [-ped1_path_series_start[0], -ped1_path_series_end[0]]
# print("x[0] = ",x[0])
# print("x[1] = ",x[1])
# print("step = ",step)
# x_new = np.arange(x[0],x[1],step)
# print("x_new = ",x_new)
# y = [ped1_path_series_start[1], ped1_path_series_end[1]]
# self.y_new = np.interp(x_new, x, y)
# self.y_new = np.append(self.y_new,ped1_path_series_end[1])
# self.x_new = x_new * -1
# self.x_new = np.append(self.x_new,ped1_path_series_end[0])
# self.ped1_path = [(self.x_new[self.i],self.y_new[self.i]),(-295.80,182.10), (-300.50,172.50), (-288.30,165.70)]
# # self.ped1_path = [[-279.80,170.10], [-310.50,185.40]]
# self.i = self.i + 1
# if self.i == len(self.x_new):
# self.tests_ended = True
# print("i",self.i)
# print("self.x_new",self.x_new)
# print("length of x_new",len(self.x_new))
# interpolate path(s)
interpolation_resolution_min = 1
self.veh1_path_interpolated = interpolate_path(self.veh1_path, interpolation_resolution_min)
self.veh2_path_interpolated = interpolate_path(self.veh2_path, interpolation_resolution_min)
# self.ped1_path_interpolated = interpolate_path(self.ped1_path, interpolation_resolution_min)
# self.veh3_path_interpolated = interpolate_path(self.veh3_path, interpolation_resolution_min)
self.veh1.set_path(self.veh1_path_interpolated)
self.veh2.set_path(self.veh2_path_interpolated)
# self.ped1.set_path(self.ped1_path_interpolated)
# self.veh3.set_path(self.veh3_path_interpolated)
self.veh1.spawn(self.veh1_path[0][0],self.veh1_path[0][1], self.spawn_height, 240)
self.veh2.spawn(self.veh2_path[0][0],self.veh2_path[0][1], self.spawn_height, 240)
# self.ped1.spawn(self.ped1_path[0][0],self.ped1_path[0][1], self.spawn_height, 0)
# self.veh3.spawn(self.veh3_path[0][0],self.veh3_path[0][1], self.spawn_height, 155)
# self.veh1_speed = 5
# self.veh2_speed = 0
# self.veh3_speed = 5
def step(self):
self.veh1.step()
self.veh2.step()
# self.ped1.step()
# self.veh3.step()
# Log
t = self.world.get_snapshot().timestamp.elapsed_seconds
fps = 1 / (self.world.get_snapshot().timestamp.frame_count)
self.tests_logs.append(0,0,self.actors_list,t,fps)
if self.tests_logs.time_array[-1] >= self.timeout_thresh:
self.test_ended = True
self.tests_ended = True
pass
def destroy(self):
self.veh1.destroy()
self.veh2.destroy()
# self.ped1.destroy()
# self.veh3.destroy()
# self.tests_logs.write_file("GameRunTown3_intersection_safe.txt")
self.tests_logs.write_file("GameRunTown3_intersection_dangerous.txt")
# self.tests_logs.write_file("AssertionCheckingCaseStudyLogs_NearMiss.txt")
# self.tests_logs.write_file("AssertionCheckingCaseStudyLogs_Crash.txt")
pass | [
"numpy.subtract",
"numpy.ceil",
"Agents.vehicle.vehicle",
"TB_common_functions.log",
"numpy.hypot",
"numpy.linspace"
] | [((461, 487), 'numpy.hypot', 'np.hypot', (['diff[0]', 'diff[1]'], {}), '(diff[0], diff[1])\n', (469, 487), True, 'import numpy as np\n'), ((1777, 1809), 'TB_common_functions.log', 'log', (['self.logging_time_increment'], {}), '(self.logging_time_increment)\n', (1780, 1809), False, 'from TB_common_functions import log, calculateDistance\n'), ((1909, 1919), 'Agents.vehicle.vehicle', 'vehicle', (['(1)'], {}), '(1)\n', (1916, 1919), False, 'from Agents.vehicle import vehicle\n'), ((1959, 1969), 'Agents.vehicle.vehicle', 'vehicle', (['(2)'], {}), '(2)\n', (1966, 1969), False, 'from Agents.vehicle import vehicle\n'), ((2255, 2287), 'TB_common_functions.log', 'log', (['self.logging_time_increment'], {}), '(self.logging_time_increment)\n', (2258, 2287), False, 'from TB_common_functions import log, calculateDistance\n'), ((407, 452), 'numpy.subtract', 'np.subtract', (['path_tuple[i]', 'path_tuple[i + 1]'], {}), '(path_tuple[i], path_tuple[i + 1])\n', (418, 452), True, 'import numpy as np\n'), ((746, 809), 'numpy.linspace', 'np.linspace', (['path_tuple[i][0]', 'path_tuple[i + 1][0]', 'num_points'], {}), '(path_tuple[i][0], path_tuple[i + 1][0], num_points)\n', (757, 809), True, 'import numpy as np\n'), ((865, 928), 'numpy.linspace', 'np.linspace', (['path_tuple[i][1]', 'path_tuple[i + 1][1]', 'num_points'], {}), '(path_tuple[i][1], path_tuple[i + 1][1], num_points)\n', (876, 928), True, 'import numpy as np\n'), ((599, 661), 'numpy.ceil', 'np.ceil', (['((dist - num_points_remainder) / increments_resolution)'], {}), '((dist - num_points_remainder) / increments_resolution)\n', (606, 661), True, 'import numpy as np\n')] |
# coding: utf-8
# Copyright (c) <NAME>.
# Distributed under the terms of the MIT License.
"""
This module implements functions to calculate the ionic conductivity.
"""
from typing import Union
import numpy as np
from tqdm.notebook import tqdm
from scipy import stats
from MDAnalysis import Universe, AtomGroup
__author__ = "<NAME>, <NAME>"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Feb 9, 2021"
"""
Algorithms in this section are adapted from DOI: 10.1051/sfn/201112010 and
http://stackoverflow.com/questions/34222272/computing-mean-square-displacement-using-python-and-fft#34222273
"""
def autocorr_fft(x: np.ndarray) -> np.ndarray:
"""Calculates the autocorrelation function using the fast Fourier transform.
Args:
x (numpy.array): function on which to compute autocorrelation function
Returns a numpy.array of the autocorrelation function
"""
N = len(x)
F = np.fft.fft(x, n=2 * N) # 2*N because of zero-padding
PSD = F * F.conjugate()
res = np.fft.ifft(PSD)
res = (res[:N]).real
n = N * np.ones(N) - np.arange(0, N)
return res / n
def msd_fft(r: np.ndarray) -> np.ndarray:
"""Calculates mean square displacement of the array r using the fast Fourier transform.
Args:
r (numpy.array): atom positions over time
Returns a numpy.array containing the mean-squared displacement over time
"""
N = len(r)
D = np.square(r).sum(axis=1)
D = np.append(D, 0)
S2 = sum([autocorr_fft(r[:, i]) for i in range(r.shape[1])])
Q = 2 * D.sum()
S1 = np.zeros(N)
for m in range(N):
Q = Q - D[m - 1] - D[N - m]
S1[m] = Q / (N - m)
return S1 - 2 * S2
def calc_cond_msd(
u: Universe,
anions: AtomGroup,
cations: AtomGroup,
run_start: int,
cation_charge: Union[int, float] = 1,
anion_charge: Union[int, float] = -1,
) -> np.ndarray:
"""Calculates the conductivity "mean square displacement" over time
Note:
Coordinates must be unwrapped (in dcd file when creating MDAnalysis Universe)
Ions selections may consist of only one atom per ion, or include all of the atoms
in the ion. The ion AtomGroups may consist of multiple types of cations/anions.
Args:
u: MDAnalysis universe
anions: MDAnalysis AtomGroup containing all anions
cations: MDAnalysis AtomGroup containing all cations
run_start (int): index of trajectory from which to start analysis
cation_charge (int): net charge of cation
anion_charge (int): net charge of anion
Returns a numpy.array containing conductivity "MSD" over time
"""
# convert AtomGroup into list of molecules
cation_list = cations.split("residue")
anion_list = anions.split("residue")
# compute sum over all charges and positions
qr = []
for ts in tqdm(u.trajectory[run_start:]):
qr_temp = np.zeros(3)
for anion in anion_list:
qr_temp += anion.center_of_mass() * anion_charge
for cation in cation_list:
qr_temp += cation.center_of_mass() * cation_charge
qr.append(qr_temp)
msd = msd_fft(np.array(qr))
return msd
def get_beta(
msd: np.ndarray,
time_array: np.ndarray,
start: int,
end: int,
) -> tuple:
"""Fits the MSD to the form t^(beta) and returns beta. beta = 1 corresponds
to the diffusive regime.
Args:
msd (numpy.array): mean squared displacement
time_array (numpy.array): times at which position data was collected in the simulation
start (int): index at which to start fitting linear regime of the MSD
end (int): index at which to end fitting linear regime of the MSD
Returns beta (int) and the range of beta values within the region
"""
msd_slope = np.gradient(np.log(msd[start:end]), np.log(time_array[start:end]))
beta = np.mean(np.array(msd_slope))
beta_range = np.max(msd_slope) - np.min(msd_slope)
return beta, beta_range
def choose_msd_fitting_region(
msd: np.ndarray,
time_array: np.ndarray,
) -> tuple:
"""Chooses the optimal fitting regime for a mean-squared displacement.
The MSD should be of the form t^(beta), where beta = 1 corresponds
to the diffusive regime; as a rule of thumb, the MSD should exhibit this
linear behavior for at least a decade of time. Finds the region of the
MSD with the beta value closest to 1.
Note:
If a beta value great than 0.9 cannot be found, returns a warning
that the computed conductivity may not be reliable, and that longer
simulations or more replicates are necessary.
Args:
msd (numpy.array): mean squared displacement
time_array (numpy.array): times at which position data was collected in the simulation
Returns at tuple with the start of the fitting regime (int), end of the
fitting regime (int), and the beta value of the fitting regime (float).
"""
beta_best = 0 # region with greatest linearity (beta = 1)
# choose fitting regions to check
for i in np.logspace(np.log10(2), np.log10(len(time_array) / 10), 10): # try 10 regions
start = int(i)
end = int(i * 10) # fit over one decade
beta, beta_range = get_beta(msd, time_array, start, end)
slope_tolerance = 2 # acceptable level of noise in beta values
# check if beta in this region is better than regions tested so far
if (np.abs(beta - 1) < np.abs(beta_best - 1) and beta_range < slope_tolerance) or beta_best == 0:
beta_best = beta
start_final = start
end_final = end
if beta_best < 0.9:
print(f"WARNING: MSD is not sufficiently linear (beta = {beta_best}). Consider running simulations longer.")
return start_final, end_final, beta_best
def conductivity_calculator(
time_array: np.ndarray,
cond_array: np.ndarray,
v: Union[int, float],
name: str,
start: int,
end: int,
T: Union[int, float],
units: str = "real",
) -> float:
"""Calculates the overall conductivity of the system
Args:
time_array (numpy.array): times at which position data was collected in the simulation
cond_array (numpy.array): conductivity "mean squared displacement"
v (float): simulation volume (Angstroms^3)
name (str): system name
start (int): index at which to start fitting linear regime of the MSD
end (int): index at which to end fitting linear regime of the MSD
units (str): unit system (currently 'real' and 'lj' are supported)
Returns the overall ionic conductivity (float)
"""
# Unit conversions
if units == "real":
A2cm = 1e-8 # Angstroms to cm
ps2s = 1e-12 # picoseconds to seconds
e2c = 1.60217662e-19 # elementary charge to Coulomb
kb = 1.38064852e-23 # Boltzmann Constant, J/K
convert = e2c * e2c / ps2s / A2cm * 1000
cond_units = "mS/cm"
elif units == "lj":
kb = 1
convert = 1
cond_units = "q^2/(tau sigma epsilon)"
else:
raise ValueError("units selection not supported")
slope, _, _, _, _ = stats.linregress(time_array[start:end], cond_array[start:end])
cond = slope / 6 / kb / T / v * convert
print("Conductivity of " + name + ": " + str(cond) + " " + cond_units)
return cond
| [
"numpy.fft.ifft",
"numpy.abs",
"numpy.log",
"numpy.fft.fft",
"numpy.square",
"numpy.zeros",
"tqdm.notebook.tqdm",
"numpy.ones",
"numpy.append",
"numpy.max",
"numpy.min",
"numpy.arange",
"scipy.stats.linregress",
"numpy.array",
"numpy.log10"
] | [((939, 961), 'numpy.fft.fft', 'np.fft.fft', (['x'], {'n': '(2 * N)'}), '(x, n=2 * N)\n', (949, 961), True, 'import numpy as np\n'), ((1031, 1047), 'numpy.fft.ifft', 'np.fft.ifft', (['PSD'], {}), '(PSD)\n', (1042, 1047), True, 'import numpy as np\n'), ((1472, 1487), 'numpy.append', 'np.append', (['D', '(0)'], {}), '(D, 0)\n', (1481, 1487), True, 'import numpy as np\n'), ((1582, 1593), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1590, 1593), True, 'import numpy as np\n'), ((2872, 2902), 'tqdm.notebook.tqdm', 'tqdm', (['u.trajectory[run_start:]'], {}), '(u.trajectory[run_start:])\n', (2876, 2902), False, 'from tqdm.notebook import tqdm\n'), ((7193, 7255), 'scipy.stats.linregress', 'stats.linregress', (['time_array[start:end]', 'cond_array[start:end]'], {}), '(time_array[start:end], cond_array[start:end])\n', (7209, 7255), False, 'from scipy import stats\n'), ((1098, 1113), 'numpy.arange', 'np.arange', (['(0)', 'N'], {}), '(0, N)\n', (1107, 1113), True, 'import numpy as np\n'), ((2922, 2933), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2930, 2933), True, 'import numpy as np\n'), ((3171, 3183), 'numpy.array', 'np.array', (['qr'], {}), '(qr)\n', (3179, 3183), True, 'import numpy as np\n'), ((3834, 3856), 'numpy.log', 'np.log', (['msd[start:end]'], {}), '(msd[start:end])\n', (3840, 3856), True, 'import numpy as np\n'), ((3858, 3887), 'numpy.log', 'np.log', (['time_array[start:end]'], {}), '(time_array[start:end])\n', (3864, 3887), True, 'import numpy as np\n'), ((3908, 3927), 'numpy.array', 'np.array', (['msd_slope'], {}), '(msd_slope)\n', (3916, 3927), True, 'import numpy as np\n'), ((3946, 3963), 'numpy.max', 'np.max', (['msd_slope'], {}), '(msd_slope)\n', (3952, 3963), True, 'import numpy as np\n'), ((3966, 3983), 'numpy.min', 'np.min', (['msd_slope'], {}), '(msd_slope)\n', (3972, 3983), True, 'import numpy as np\n'), ((5104, 5115), 'numpy.log10', 'np.log10', (['(2)'], {}), '(2)\n', (5112, 5115), True, 'import numpy as np\n'), ((1085, 1095), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (1092, 1095), True, 'import numpy as np\n'), ((1439, 1451), 'numpy.square', 'np.square', (['r'], {}), '(r)\n', (1448, 1451), True, 'import numpy as np\n'), ((5469, 5485), 'numpy.abs', 'np.abs', (['(beta - 1)'], {}), '(beta - 1)\n', (5475, 5485), True, 'import numpy as np\n'), ((5488, 5509), 'numpy.abs', 'np.abs', (['(beta_best - 1)'], {}), '(beta_best - 1)\n', (5494, 5509), True, 'import numpy as np\n')] |
# coding: utf-8
# # Digit Recognition
#
# ## 1. Introduction
#
# In this analysis, the handwritten digits are identified using support vector machines and radial basis functions.
#
# ### 1.1 Libraries
#
# The essential libraries used here are numpy, matplotlib, and scikit-learn. For convenience, pandas and IPython.display are used for displaying tables, and tqdm is used for progress bars.
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from itertools import product
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score, cross_val_predict, ShuffleSplit, KFold
from tqdm import tqdm
from IPython.display import display, Math, Latex, HTML
get_ipython().magic('matplotlib inline')
np.set_printoptions(precision=4,threshold=200)
tqdm_bar_fmt='{percentage:3.0f}%|{bar}|'
# ### 1.2 Dataset
#
# The US Postal Service Zip Code dataset is used, which contains handwritten digits zero to nine. The data has been preprocessed, whereby features of intensity and symmetry are extracted.
# In[2]:
def download_data():
train_url = "http://www.amlbook.com/data/zip/features.train"
test_url = "http://www.amlbook.com/data/zip/features.test"
column_names = ['digit','intensity','symmetry']
train = pd.read_table(train_url,names=column_names,header=None,delim_whitespace=True)
test = pd.read_table(test_url,names=column_names,header=None,delim_whitespace=True)
train.digit = train.digit.astype(int)
test.digit = test.digit.astype(int)
return train,test
def process_data(train,test):
X_train = train.iloc[:,1:].values
y_train = train.iloc[:,0].values
X_test = test.iloc[:,1:].values
y_test = test.iloc[:,0].values
return X_train,y_train,X_test,y_test
# In[3]:
train,test = download_data()
X_train,y_train,X_test,y_test = process_data(train,test)
# ## 2. Support Vector Machines for Digit Recognition
# ### 2.1 Polynomial Kernels
#
# We wish to implement the following polynomial kernel for our support vector machine:
#
# $$K\left(\mathbf{x_n,x_m}\right) = \left(1+\mathbf{x_n^Tx_m}\right)^Q$$
#
# This is implemented in scikit-learn in the subroutine [sklearn.svm.SVC](http://scikit-learn.org/stable/modules/svm.html), where the kernel function takes the form:
#
# $$\left(\gamma \langle x,x' \rangle + r\right)^d$$
#
# where $d$ is specified by the keyword `degree`, and $r$ by `coef0`.
# ### 2.1.1 One vs Rest Classification
# In the following subroutine, the data is split into "one-vs-rest", where $y=1$ corresponds to a match to the digit, and $y=0$ corresponds to all the other digits. The training step is implemented in the call to `clf.fit()`.
# In[4]:
def get_misclassification_ovr(X_train,y_train,X_test,y_test,digit,
Q=2,r=1.0,C=0.01,kernel='poly',verbose=False):
clf = SVC(C=C, kernel=kernel, degree=Q, coef0 = r, gamma = 1.0,
decision_function_shape='ovr', verbose=False)
y_in = (y_train==digit).astype(int)
y_out = (y_test==digit).astype(int)
model = clf.fit(X_train,y_in) # print(model)
E_in = np.mean(y_in != clf.predict(X_train))
E_out = np.mean(y_out != clf.predict(X_test))
n_support_vectors = len(clf.support_vectors_)
if verbose is True:
print()
print("Q = {}, C = {}: Support vectors: {}".format(Q, C, n_support_vectors))
print("{} vs all: E_in = {}".format(digit,E_in))
print("{} vs all: E_out = {}".format(digit,E_out))
return E_in,E_out,n_support_vectors
# The following code trains on the data for the cases: 0 vs all, 1 vs all, ..., 9 vs all. For each of the digits, 0 to 9, the errors $E_{in}, E_{out}$ and the number of support vectors are recorded and stored in a pandas dataframe.
# In[5]:
results = pd.DataFrame()
i=0
for digit in tqdm(range(10),bar_format=tqdm_bar_fmt):
ei, eo, n = get_misclassification_ovr(X_train,y_train,X_test,y_test,digit)
df = pd.DataFrame({'digit': digit, 'E_in': ei, 'E_out': eo, 'n': n}, index=[i])
results = results.append(df)
i += 1
# In[6]:
display(HTML(results[['digit','E_in','E_out','n']].iloc[::2].to_html(index=False)))
# In[7]:
display(HTML(results[['digit','E_in','E_out','n']].iloc[1::2].to_html(index=False)))
# In[8]:
from tabulate import tabulate
print(tabulate(results, headers='keys', tablefmt='simple'))
# ### 2.1.2 One vs One Classification
#
# One vs one classification makes better use of the data, but is more computatationally expensive. The following subroutine splits the data so that $y=0$ for the first digit, and $y=1$ for the second digit. The rows of data corresponding to all other digits are removed.
# In[9]:
def get_misclassification_ovo(X_train,y_train,X_test,y_test,digit1,digit2,
Q=2,r=1.0,C=0.01,kernel='poly'):
clf = SVC(C=C, kernel=kernel, degree=Q, coef0 = r, gamma = 1.0,
decision_function_shape='ovo', verbose=False)
select_in = np.logical_or(y_train==digit1,y_train==digit2)
y_in = (y_train[select_in]==digit1).astype(int)
X_in = X_train[select_in]
select_out = np.logical_or(y_test==digit1,y_test==digit2)
y_out = (y_test[select_out]==digit1).astype(int)
X_out = X_test[select_out]
model = clf.fit(X_in,y_in)
E_in = np.mean(y_in != clf.predict(X_in))
E_out = np.mean(y_out != clf.predict(X_out))
n_support_vectors = len(clf.support_vectors_)
return E_in,E_out,n_support_vectors
# In the following code, a 1-vs-5 classifier is tested for $Q=2,5$ and $C=0.001,0.01,0.1,1$.
# In[10]:
C_arr = [0.0001, 0.001, 0.01, 1]
Q_arr = [2, 5]
CQ_arr = list(product(C_arr,Q_arr))
results = pd.DataFrame()
i=0
for C, Q in tqdm(CQ_arr,bar_format=tqdm_bar_fmt):
ei, eo, n = get_misclassification_ovo(X_train,y_train,X_test,y_test,
digit1=1,digit2=5,Q=Q,r=1.0,C=C)
df = pd.DataFrame({'C': C, 'Q': Q, 'E_in': ei, 'E_out': eo, 'n': n}, index=[i])
results = results.append(df)
i += 1
display(HTML(results[['C','Q','E_in','E_out','n']].to_html(index=False)))
# ### 2.1.3 Polynomial Kernel with Cross-Validation
#
# For k-fold cross-validation, the subroutine [`sklearn.model_selection.KFold`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html#sklearn.model_selection.KFold)`()` is employed to split the data into folds. Random shuffling is enabled with the `shuffle=True` option.
# In[11]:
def select_digits_ovo(X,y,digit1,digit2):
subset = np.logical_or(y==digit1,y==digit2)
X_in = X[subset].copy()
y_in = (y[subset]==digit1).astype(int).copy()
return X_in,y_in
# In[12]:
def get_misclassification_ovo_cv(X_in,y_in,Q=2,r=1.0,C=0.01,kernel='poly'):
kf = KFold(n_splits=10, shuffle=True)
kf.get_n_splits(X_in)
E_cv = []
for train_index, test_index in kf.split(X_in):
X_trn, X_tst = X_in[train_index], X_in[test_index]
y_trn, y_tst = y_in[train_index], y_in[test_index]
clf = SVC(C=C, kernel=kernel, degree=Q, coef0 = r, gamma = 1.0,
decision_function_shape='ovo', verbose=False)
model = clf.fit(X_trn,y_trn)
E_cv.append(np.mean(y_tst != clf.predict(X_tst)))
return E_cv
# In this example, our parameters are $Q=2, C \in \{0.0001,0.001,0.01,0.1,1\}$ and we are considering the 1-vs-5 classifier.
# In[13]:
C_arr = [0.0001, 0.001, 0.01, 0.1, 1]; d1 = 1; d2 = 5
Q=2
X_in,y_in = select_digits_ovo(X_train,y_train,d1,d2)
# Due to the effect of random shuffling, 100 runs are carried out, and the results tabulated.
# In[14]:
E_cv_arr = []
count_arr = []
for n in tqdm(range(100),bar_format=tqdm_bar_fmt):
counts = np.zeros(len(C_arr),int)
kf = KFold(n_splits=10, shuffle=True)
kf.get_n_splits(X_in)
for train_index, test_index in kf.split(X_in):
X_trn, X_tst = X_in[train_index], X_in[test_index]
y_trn, y_tst = y_in[train_index], y_in[test_index]
E_cv = []
for C in C_arr:
clf = SVC(C=C, kernel='poly', degree=Q, coef0=1.0, gamma=1.0,
decision_function_shape='ovo', verbose=False)
model = clf.fit(X_trn,y_trn)
E_cv.append(np.mean(y_tst != clf.predict(X_tst)))
counts[np.argmin(E_cv)] += 1
E_cv_arr.append(np.array(E_cv))
count_arr.append(counts)
# The number of times each particular value of $C$ is picked (for having the smallest $E_{cv}$) is calculated, as well as the mean cross validation error. The data is tabulated as follows:
# In[15]:
df = pd.DataFrame({'C': C_arr})
df['count'] = np.sum(np.array(count_arr),axis=0)
df['E_cv'] = np.mean(np.array(E_cv_arr),axis=0)
display(HTML(df.to_html(index=False)))
# In[16]:
fig = plt.figure(figsize=(10,5))
ax1 = fig.add_subplot(121)
_ = df.plot(y=['count'],ax=ax1,marker='x',color='red',markersize=10,grid=True)
ax2 = fig.add_subplot(122)
_ = df.plot(y=['E_cv'],ax=ax2,marker='o',color='green',markersize=7,grid=True)
# ### 2.2 Scaling Law for Support Vector Machines
#
# This is based on [scikit-learn example code](http://scikit-learn.org/stable/tutorial/basic/tutorial.html), but modified to demonstrate how the training time scales with the size of the data. The dataset in this example is the original MNIST data.
# In[17]:
from sklearn.datasets import fetch_mldata
from sklearn import svm
import timeit
digits = fetch_mldata('MNIST original') # stores data in ~/scikit_learn_data by default
n_max = len(digits.target)
selection = np.random.permutation(n_max)
# In[18]:
n_arr = [500, 1000, 1500, 2000, 5000, 10000]
t_arr = []
for n in n_arr:
sel = selection[:n]
X = digits.data[sel]
y = digits.target[sel]
clf = svm.SVC(gamma=0.001, C=100.)
t0 = timeit.default_timer()
clf.fit(X,y)
t_arr.append(timeit.default_timer() - t0)
print("n = {}, time = {}".format(n, t_arr[-1]))
# In[19]:
plt.plot(np.array(n_arr),np.array(t_arr),'bx-')
plt.xlabel('no. of samples')
plt.ylabel('time (s)')
plt.grid()
# ## 3. Radial Basis Functions
#
# ### 3.1 Background
#
# The hypothesis is given by:
#
# $$h\left(\mathbf{x}\right) = \sum\limits_{k=1}^K w_k \exp\left(-\gamma \Vert \mathbf{x} - \mu_k \Vert^2\right)$$
#
# This is implemented in the subroutine [`sklearn.svm.SVC`](http://scikit-learn.org/stable/modules/svm.html)`(..., kernel='rbf', ...)` as shown in the code below.
# In[20]:
def get_misclassification_rbf(X_train,y_train,X_test,y_test,C,digit1=1,digit2=5):
clf = SVC(C=C, kernel='rbf', gamma = 1.0,
decision_function_shape='ovo', verbose=False)
select_in = np.logical_or(y_train==digit1,y_train==digit2)
y_in = (y_train[select_in]==digit1).astype(float)
X_in = X_train[select_in]
select_out = np.logical_or(y_test==digit1,y_test==digit2)
y_out = (y_test[select_out]==digit1).astype(float)
X_out = X_test[select_out]
model = clf.fit(X_in,y_in)
E_in = np.mean(y_in != clf.predict(X_in))
E_out = np.mean(y_out != clf.predict(X_out))
n_support_vectors = len(clf.support_vectors_)
return E_in,E_out,n_support_vectors
# For $C \in \{0.01, 1, 100, 10^4, 10^6\}$, the in-sample and out-of-sample errors, as well as the number of support vectors are tabulated as follows:
# In[21]:
C_arr = [0.01, 1., 100., 1e4, 1e6]
results = pd.DataFrame()
i=0
for C in tqdm(C_arr,bar_format=tqdm_bar_fmt):
ei, eo, n = get_misclassification_rbf(X_train,y_train,X_test,y_test,
C,digit1=1,digit2=5)
df = pd.DataFrame({'C': C, 'E_in': ei, 'E_out': eo, 'n': n}, index=[i])
results = results.append(df)
i += 1
display(HTML(results.to_html(index=False)))
# The table is also plotted graphically below:
# In[22]:
def plot_Ein_Eout(df):
fig = plt.figure(figsize=(12,5))
ax1 = fig.add_subplot(121)
df.plot(ax=ax1, x='C', y='E_in', kind='line', marker='o', markersize=7, logx=True)
df.plot(ax=ax1, x='C', y='E_out', kind='line', marker='x', markersize=7, logx=True)
ax1.legend(loc='best',frameon=False)
ax1.grid(True)
ax1.set_title('In-Sample vs Out-of-Sample Errors')
ax2 = fig.add_subplot(122)
df.plot(ax=ax2, x='C', y='n', kind='line', marker='+', markersize=10, logx=True)
ax2.legend(loc='best',frameon=False)
ax2.grid(True)
ax2.set_title('Number of Support Vectors')
plot_Ein_Eout(results)
| [
"pandas.DataFrame",
"tqdm.tqdm",
"numpy.set_printoptions",
"itertools.product",
"timeit.default_timer",
"sklearn.model_selection.KFold",
"numpy.argmin",
"pandas.read_table",
"matplotlib.pyplot.figure",
"tabulate.tabulate",
"numpy.logical_or",
"numpy.array",
"sklearn.svm.SVC",
"numpy.random... | [((749, 796), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)', 'threshold': '(200)'}), '(precision=4, threshold=200)\n', (768, 796), True, 'import numpy as np\n'), ((3777, 3791), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3789, 3791), True, 'import pandas as pd\n'), ((5656, 5670), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5668, 5670), True, 'import pandas as pd\n'), ((5687, 5724), 'tqdm.tqdm', 'tqdm', (['CQ_arr'], {'bar_format': 'tqdm_bar_fmt'}), '(CQ_arr, bar_format=tqdm_bar_fmt)\n', (5691, 5724), False, 'from tqdm import tqdm\n'), ((8532, 8558), 'pandas.DataFrame', 'pd.DataFrame', (["{'C': C_arr}"], {}), "({'C': C_arr})\n", (8544, 8558), True, 'import pandas as pd\n'), ((8715, 8742), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (8725, 8742), True, 'import matplotlib.pyplot as plt\n'), ((9360, 9390), 'sklearn.datasets.fetch_mldata', 'fetch_mldata', (['"""MNIST original"""'], {}), "('MNIST original')\n", (9372, 9390), False, 'from sklearn.datasets import fetch_mldata\n'), ((9479, 9507), 'numpy.random.permutation', 'np.random.permutation', (['n_max'], {}), '(n_max)\n', (9500, 9507), True, 'import numpy as np\n'), ((9916, 9944), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""no. of samples"""'], {}), "('no. of samples')\n", (9926, 9944), True, 'import matplotlib.pyplot as plt\n'), ((9945, 9967), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""time (s)"""'], {}), "('time (s)')\n", (9955, 9967), True, 'import matplotlib.pyplot as plt\n'), ((9968, 9978), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (9976, 9978), True, 'import matplotlib.pyplot as plt\n'), ((11276, 11290), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11288, 11290), True, 'import pandas as pd\n'), ((11304, 11340), 'tqdm.tqdm', 'tqdm', (['C_arr'], {'bar_format': 'tqdm_bar_fmt'}), '(C_arr, bar_format=tqdm_bar_fmt)\n', (11308, 11340), False, 'from tqdm import tqdm\n'), ((1274, 1359), 'pandas.read_table', 'pd.read_table', (['train_url'], {'names': 'column_names', 'header': 'None', 'delim_whitespace': '(True)'}), '(train_url, names=column_names, header=None, delim_whitespace=True\n )\n', (1287, 1359), True, 'import pandas as pd\n'), ((1363, 1442), 'pandas.read_table', 'pd.read_table', (['test_url'], {'names': 'column_names', 'header': 'None', 'delim_whitespace': '(True)'}), '(test_url, names=column_names, header=None, delim_whitespace=True)\n', (1376, 1442), True, 'import pandas as pd\n'), ((2843, 2946), 'sklearn.svm.SVC', 'SVC', ([], {'C': 'C', 'kernel': 'kernel', 'degree': 'Q', 'coef0': 'r', 'gamma': '(1.0)', 'decision_function_shape': '"""ovr"""', 'verbose': '(False)'}), "(C=C, kernel=kernel, degree=Q, coef0=r, gamma=1.0,\n decision_function_shape='ovr', verbose=False)\n", (2846, 2946), False, 'from sklearn.svm import SVC\n'), ((3938, 4012), 'pandas.DataFrame', 'pd.DataFrame', (["{'digit': digit, 'E_in': ei, 'E_out': eo, 'n': n}"], {'index': '[i]'}), "({'digit': digit, 'E_in': ei, 'E_out': eo, 'n': n}, index=[i])\n", (3950, 4012), True, 'import pandas as pd\n'), ((4298, 4350), 'tabulate.tabulate', 'tabulate', (['results'], {'headers': '"""keys"""', 'tablefmt': '"""simple"""'}), "(results, headers='keys', tablefmt='simple')\n", (4306, 4350), False, 'from tabulate import tabulate\n'), ((4827, 4930), 'sklearn.svm.SVC', 'SVC', ([], {'C': 'C', 'kernel': 'kernel', 'degree': 'Q', 'coef0': 'r', 'gamma': '(1.0)', 'decision_function_shape': '"""ovo"""', 'verbose': '(False)'}), "(C=C, kernel=kernel, degree=Q, coef0=r, gamma=1.0,\n decision_function_shape='ovo', verbose=False)\n", (4830, 4930), False, 'from sklearn.svm import SVC\n'), ((4957, 5008), 'numpy.logical_or', 'np.logical_or', (['(y_train == digit1)', '(y_train == digit2)'], {}), '(y_train == digit1, y_train == digit2)\n', (4970, 5008), True, 'import numpy as np\n'), ((5106, 5155), 'numpy.logical_or', 'np.logical_or', (['(y_test == digit1)', '(y_test == digit2)'], {}), '(y_test == digit1, y_test == digit2)\n', (5119, 5155), True, 'import numpy as np\n'), ((5624, 5645), 'itertools.product', 'product', (['C_arr', 'Q_arr'], {}), '(C_arr, Q_arr)\n', (5631, 5645), False, 'from itertools import product\n'), ((5882, 5956), 'pandas.DataFrame', 'pd.DataFrame', (["{'C': C, 'Q': Q, 'E_in': ei, 'E_out': eo, 'n': n}"], {'index': '[i]'}), "({'C': C, 'Q': Q, 'E_in': ei, 'E_out': eo, 'n': n}, index=[i])\n", (5894, 5956), True, 'import pandas as pd\n'), ((6501, 6540), 'numpy.logical_or', 'np.logical_or', (['(y == digit1)', '(y == digit2)'], {}), '(y == digit1, y == digit2)\n', (6514, 6540), True, 'import numpy as np\n'), ((6736, 6768), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)', 'shuffle': '(True)'}), '(n_splits=10, shuffle=True)\n', (6741, 6768), False, 'from sklearn.model_selection import cross_val_score, cross_val_predict, ShuffleSplit, KFold\n'), ((7706, 7738), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)', 'shuffle': '(True)'}), '(n_splits=10, shuffle=True)\n', (7711, 7738), False, 'from sklearn.model_selection import cross_val_score, cross_val_predict, ShuffleSplit, KFold\n'), ((8580, 8599), 'numpy.array', 'np.array', (['count_arr'], {}), '(count_arr)\n', (8588, 8599), True, 'import numpy as np\n'), ((8630, 8648), 'numpy.array', 'np.array', (['E_cv_arr'], {}), '(E_cv_arr)\n', (8638, 8648), True, 'import numpy as np\n'), ((9679, 9708), 'sklearn.svm.SVC', 'svm.SVC', ([], {'gamma': '(0.001)', 'C': '(100.0)'}), '(gamma=0.001, C=100.0)\n', (9686, 9708), False, 'from sklearn import svm\n'), ((9717, 9739), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (9737, 9739), False, 'import timeit\n'), ((9877, 9892), 'numpy.array', 'np.array', (['n_arr'], {}), '(n_arr)\n', (9885, 9892), True, 'import numpy as np\n'), ((9893, 9908), 'numpy.array', 'np.array', (['t_arr'], {}), '(t_arr)\n', (9901, 9908), True, 'import numpy as np\n'), ((10457, 10536), 'sklearn.svm.SVC', 'SVC', ([], {'C': 'C', 'kernel': '"""rbf"""', 'gamma': '(1.0)', 'decision_function_shape': '"""ovo"""', 'verbose': '(False)'}), "(C=C, kernel='rbf', gamma=1.0, decision_function_shape='ovo', verbose=False)\n", (10460, 10536), False, 'from sklearn.svm import SVC\n'), ((10565, 10616), 'numpy.logical_or', 'np.logical_or', (['(y_train == digit1)', '(y_train == digit2)'], {}), '(y_train == digit1, y_train == digit2)\n', (10578, 10616), True, 'import numpy as np\n'), ((10716, 10765), 'numpy.logical_or', 'np.logical_or', (['(y_test == digit1)', '(y_test == digit2)'], {}), '(y_test == digit1, y_test == digit2)\n', (10729, 10765), True, 'import numpy as np\n'), ((11486, 11552), 'pandas.DataFrame', 'pd.DataFrame', (["{'C': C, 'E_in': ei, 'E_out': eo, 'n': n}"], {'index': '[i]'}), "({'C': C, 'E_in': ei, 'E_out': eo, 'n': n}, index=[i])\n", (11498, 11552), True, 'import pandas as pd\n'), ((11735, 11762), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (11745, 11762), True, 'import matplotlib.pyplot as plt\n'), ((6992, 7095), 'sklearn.svm.SVC', 'SVC', ([], {'C': 'C', 'kernel': 'kernel', 'degree': 'Q', 'coef0': 'r', 'gamma': '(1.0)', 'decision_function_shape': '"""ovo"""', 'verbose': '(False)'}), "(C=C, kernel=kernel, degree=Q, coef0=r, gamma=1.0,\n decision_function_shape='ovo', verbose=False)\n", (6995, 7095), False, 'from sklearn.svm import SVC\n'), ((7994, 8099), 'sklearn.svm.SVC', 'SVC', ([], {'C': 'C', 'kernel': '"""poly"""', 'degree': 'Q', 'coef0': '(1.0)', 'gamma': '(1.0)', 'decision_function_shape': '"""ovo"""', 'verbose': '(False)'}), "(C=C, kernel='poly', degree=Q, coef0=1.0, gamma=1.0,\n decision_function_shape='ovo', verbose=False)\n", (7997, 8099), False, 'from sklearn.svm import SVC\n'), ((8232, 8247), 'numpy.argmin', 'np.argmin', (['E_cv'], {}), '(E_cv)\n', (8241, 8247), True, 'import numpy as np\n'), ((8278, 8292), 'numpy.array', 'np.array', (['E_cv'], {}), '(E_cv)\n', (8286, 8292), True, 'import numpy as np\n'), ((9774, 9796), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (9794, 9796), False, 'import timeit\n')] |
#!/usr/bin/python
#
# Copyright 2020 The Authors of "Rate-Distortion Optimization Guided Autoencoder for Isometric Embedding in Euclidean Latent Space."
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
import tensorflow_compression as tfc
import argparse,shutil
import os,sys,glob,scipy.misc
import matplotlib.pyplot as plt
import ms_ssim
import inputpipeline
from metric import Psnr, msssim
import math
import ssim_matrix
from sklearn.linear_model import LinearRegression
def analysis_transform(tensor, num_filters):
"""Builds the analysis transform."""
with tf.variable_scope("analysis"):
with tf.variable_scope("layer_0"):
layer = tfc.SignalConv2D(
num_filters, (9, 9), corr=True, strides_down=2, padding="same_zeros",
use_bias=True, activation=tfc.GDN())
tensor = layer(tensor)
with tf.variable_scope("layer_1"):
layer = tfc.SignalConv2D(
num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros",
use_bias=True, activation=tfc.GDN())
tensor = layer(tensor)
with tf.variable_scope("layer_2"):
layer = tfc.SignalConv2D(
num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros",
use_bias=True, activation=tfc.GDN())
tensor = layer(tensor)
with tf.variable_scope("layer_3"):
layer = tfc.SignalConv2D(
num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros",
use_bias=False, activation=None)
tensor = layer(tensor)
with tf.variable_scope('reshape'):
tensor = tf.layers.flatten(tensor)
if args.activation == 'sigmoid':
with tf.variable_scope('encoder'):
tensor = tf.nn.sigmoid(tf.layers.dense(tensor, args.dim1))
tensor = tf.layers.dense(tensor, args.z)
elif args.activation == 'softplus':
with tf.variable_scope('encoder'):
tensor = tf.nn.softplus(tf.layers.dense(tensor, args.dim1))
# mean of z
mean = tf.layers.dense(tensor, args.z)
# mean of sigma
sigma = tf.layers.dense(tensor, args.z)
# dense layer
# Sampler: Normal (gaussian) random distribution
eps = tf.random_normal(tf.shape(mean), dtype=tf.float32, mean=0., stddev=1.0,
name='epsilon')
# reparameterization trick
z = mean + tf.exp(sigma / 2) * eps
# x = tf.layers.dense(x, 128, tf.nn.tanh)
elif args.activation == 'None':
with tf.variable_scope('encoder'):
tensor = tf.layers.dense(tensor, args.z)
return z, mean, sigma
def synthesis_transform(tensor, num_filters):
"""Builds the synthesis transform."""
with tf.variable_scope("synthesis", reuse=tf.AUTO_REUSE):
if args.activation == 'sigmoid':
with tf.variable_scope('decoder'):
tensor = tf.nn.sigmoid(tf.layers.dense(tensor, args.dim1))
tensor = tf.layers.dense(tensor, 4 * 4 * num_filters)
elif args.activation == 'softplus':
with tf.variable_scope('decoder'):
tensor = tf.nn.softplus(tf.layers.dense(tensor, args.dim1))
if args.ac2 == 'True':
tensor = tf.nn.softplus(tf.layers.dense(tensor, 4 * 4 * num_filters))
else:
tensor = tf.layers.dense(tensor, 4 * 4 * num_filters)
elif args.activation == 'None':
with tf.variable_scope('decoder'):
tensor = tf.layers.dense(tensor, 4 * 4 * num_filters)
with tf.variable_scope('reshape'):
# dense layer
tensor = tf.reshape(tensor, [-1, 4, 4, num_filters])
with tf.variable_scope("layer_0"):
layer = tfc.SignalConv2D(
num_filters, (5, 5), corr=False, strides_up=2, padding="same_zeros",
use_bias=True, activation=tfc.GDN(inverse=True))
tensor = layer(tensor)
with tf.variable_scope("layer_1"):
layer = tfc.SignalConv2D(
num_filters, (5, 5), corr=False, strides_up=2, padding="same_zeros",
use_bias=True, activation=tfc.GDN(inverse=True))
tensor = layer(tensor)
with tf.variable_scope("layer_2"):
layer = tfc.SignalConv2D(
num_filters // 2, (5, 5), corr=False, strides_up=2, padding="same_zeros",
use_bias=True, activation=tfc.GDN(inverse=True))
tensor = layer(tensor)
with tf.variable_scope("layer_3"):
layer = tfc.SignalConv2D(
3, (9, 9), corr=False, strides_up=2, padding="same_zeros",
use_bias=True, activation=None)
tensor = layer(tensor)
return tensor
def quantize_image(image):
image = tf.round(image * 255)
image = tf.saturate_cast(image, tf.uint8)
return image
def train():
if not os.path.exists(args.checkpoint_dir):
# shutil.rmtree(args.checkpoint_dir)
os.makedirs(args.checkpoint_dir)
log_name = os.path.join(args.checkpoint_dir, 'params.log')
if os.path.exists(log_name):
print('remove file:%s' % log_name)
os.remove(log_name)
params = open(log_name, 'w')
for arg in vars(args):
str_ = '%s: %s.\n' % (arg, getattr(args, arg))
print(str_)
params.write(str_)
params.close()
tf.logging.set_verbosity(tf.logging.INFO)
# tf Graph input (only pictures)
if args.data_set.lower() == 'celeba':
data_glob = imgs_path = args.img_path + '/*.png'
print(imgs_path)
ip_train = inputpipeline.InputPipeline(
inputpipeline.get_dataset(data_glob),
args.patch_size, batch_size=args.batch_size,
shuffle=True,
num_preprocess_threads=6,
num_crops_per_img=6)
X = ip_train.get_batch()
# Construct model
#encoder_op = analysis_transform(X, 64)
encoder_op, mean, var = analysis_transform(X, 64)
if args.split == 'None':
X_pred = synthesis_transform(encoder_op, 64)
else:
X_pred = synthesis_transform(mean, 64)
X_pred2 = synthesis_transform(encoder_op, 64)
# Define loss and optimizer, minimize the squared error
mse_loss = tf.reduce_mean(tf.squared_difference(255 * X, 255 * X_pred))
msssim_loss = ms_ssim.MultiScaleSSIM(X * 255, X_pred * 255, data_format='NHWC')
if args.loss1 =="mse":
# mse loss
d1 = tf.reduce_mean(tf.squared_difference( X, X_pred))
elif args.loss1 == 'ssim':
d1 = tf.reduce_mean(ssim_matrix.ssim(X * 255, (X - X_pred) * 255, X_pred, max_val=255, mode='train',compensation=1))
else:
print('error invalid loss1')
return -1
if args.split != 'None':
if args.loss2 =="mse":
# mse loss
d2 = tf.reduce_mean(tf.squared_difference(X_pred, X_pred2))
elif args.loss2 == 'ssim':
d2 = tf.reduce_mean(ssim_matrix.ssim(X_pred * 255, (X_pred - X_pred2) * 255, X_pred2, max_val=255, mode='train',compensation=1))
# KL loss
kl_div_loss = 1 + var - tf.square(mean) - tf.exp(var)
kl_div_loss = -0.5 * tf.reduce_sum(kl_div_loss, 1)
kl_div_loss = tf.reduce_mean(kl_div_loss)
# total loss
if args.split != 'None':
train_loss = args.lambda1 * d1 + args.lambda2 * d2 + kl_div_loss
else:
train_loss = args.lambda1 * d1 + kl_div_loss
step = tf.train.create_global_step()
if args.finetune != 'None':
learning_rate = 0.00001
else:
learning_rate = 0.0001
main_lr = tf.train.AdamOptimizer(learning_rate)
optimizer = main_lr.minimize(train_loss, global_step=step)
tf.summary.scalar("loss", train_loss)
#tf.summary.scalar("bpp", bpp)
tf.summary.scalar("mse", mse_loss)
logged_tensors = [
tf.identity(train_loss, name="train_loss"),
# tf.identity(bpp, name="train_bpp"),
tf.identity(msssim_loss, name="ms-ssim")
]
tf.summary.image("original", quantize_image(X))
tf.summary.image("reconstruction", quantize_image(X_pred))
hooks = [
tf.train.StopAtStepHook(last_step=args.num_steps),
tf.train.NanTensorHook(train_loss),
tf.train.LoggingTensorHook(logged_tensors, every_n_secs=60),
tf.train.SummarySaverHook(save_steps=args.save_steps, summary_op=tf.summary.merge_all()),
tf.train.CheckpointSaverHook(save_steps=args.save_steps, checkpoint_dir=args.checkpoint_dir)
]
X_rec = tf.clip_by_value(X_pred, 0, 1)
X_rec = tf.round(X_rec * 255)
X_rec = tf.cast(X_rec, tf.uint8)
X_ori = tf.clip_by_value(X, 0, 1)
X_ori = tf.round(X_ori * 255)
X_ori = tf.cast(X_ori, tf.uint8)
if args.finetune != 'None':
init_fn_ae = tf.contrib.framework.assign_from_checkpoint_fn(args.finetune,tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))
train_count = 0
parameter = 'VAE_%s' % (args.loss2)
with tf.train.MonitoredTrainingSession(
hooks=hooks) as sess:
if args.finetune != 'None':
init_fn_ae(sess)
print('load from %s'%(args.finetune))
while not sess.should_stop():
if args.split != 'None':
_, train_loss_ , d1_, d2_, kl_div_loss_, rec_img, X_ori_ = sess.run(
[optimizer, train_loss, d1, d2, kl_div_loss, X_rec, X_ori])
if (train_count + 1) % args.display_steps == 0:
f_log = open('%s/log.csv' % (args.checkpoint_dir), 'a')
f_log.write('%d,loss,%f, kl,%f, d1,%f, d2,%f\n' % (train_count + 1, train_loss_, kl_div_loss_, d1_, d2_))
print('%d,loss,%f, kl,%f, d1,%f, d2,%f\n' % (train_count + 1, train_loss_, kl_div_loss_, d1_, d2_))
f_log.close()
else:
_, train_loss_ , d1_, kl_div_loss_, rec_img, X_ori_ = sess.run(
[optimizer, train_loss, d1, kl_div_loss, X_rec, X_ori])
if (train_count + 1) % args.display_steps == 0:
f_log = open('%s/log.csv' % (args.checkpoint_dir), 'a')
f_log.write('%d,loss,%f, kl,%f, d1,%f\n' % (train_count + 1, train_loss_, kl_div_loss_, d1_))
print('%d,loss,%f, kl,%f, d1,%f\n' % (train_count + 1, train_loss_, kl_div_loss_, d1_))
f_log.close()
if (train_count + 1) % args.save_steps == 0:
num = math.floor(math.sqrt(rec_img.shape[0]))
show_img = np.zeros([num * args.patch_size, num * args.patch_size, 3])
ori_img = np.zeros([num * args.patch_size, num * args.patch_size, 3])
for i in range(num):
for j in range(num):
show_img[i * args.patch_size:(i + 1) * args.patch_size,
j * args.patch_size:(j + 1) * args.patch_size, :] = rec_img[num * i + j, :, :, :]
ori_img[i * args.patch_size:(i + 1) * args.patch_size,
j * args.patch_size:(j + 1) * args.patch_size, :] = X_ori_[num * i + j, :, :, :]
save_name = os.path.join(args.checkpoint_dir, 'rec_%s_%s.png' % (parameter, train_count + 1))
scipy.misc.imsave(save_name, show_img)
psnr_ = Psnr(ori_img, show_img)
msssim_ = msssim(ori_img, show_img)
# print('FOR calculation %s_%s_%s_%s_la1%s_la2%s_%s'%(
# args.activation, args.dim1, args.dim2, args.z, args.lambda1, args.lambda2, train_count))
print("PSNR (dB), %.2f,Multiscale SSIM, %.4f,Multiscale SSIM (dB), %.2f" % (
psnr_, msssim_, -10 * np.log10(1 - msssim_)))
f_log_ssim = open('%s/log_ssim_%s.csv' % (args.checkpoint_dir, parameter), 'a')
f_log_ssim.write('%s,%d,PSNR (dB), %.2f,Multiscale SSIM, %.4f,Multiscale SSIM (dB), %.2f\n' % (
parameter, train_count + 1,
psnr_, msssim_, -10 * np.log10(1 - msssim_)
))
f_log_ssim.close()
train_count += 1
def read_img(n):
import random
if args.data_set.lower() == 'celeba':
images_path = args.img_path + '/*.png'
images = glob.glob(images_path)
images = sorted(images)
# print(imgs.shape)
imgs = np.zeros([n * n, args.patch_size, args.patch_size, 3])
show_img = np.zeros([n * args.patch_size, n * args.patch_size, 3])
for i in range(n * n):
img_p = images[i]
img = scipy.misc.imread(img_p).astype(np.float)
h, w = img.shape[:2]
if h > w:
j = (h - w) // 2
temp = scipy.misc.imresize(img[j:h - j, :, :], [args.patch_size, args.patch_size])
else:
j = (w - h) // 2
temp = scipy.misc.imresize(img[:, j:w - j, :], [args.patch_size, args.patch_size])
imgs[i, :, :, :] = temp
for i in range(n):
for j in range(n):
show_img[i * args.patch_size:(i + 1) * args.patch_size, j * args.patch_size:(j + 1) * args.patch_size,
:] = imgs[n * i + j, :, :, :]
save_name = os.path.join(args.checkpoint_dir, 'clic_rdae_ori.png')
scipy.misc.imsave(save_name, show_img)
return imgs.astype(np.float), show_img
def read_png(filename):
"""Loads a PNG image file."""
string = tf.read_file(filename)
image = tf.image.decode_image(string, channels=3)
image = tf.cast(image, tf.float32)
image /= 255
return image
def plot_analysis():
cdim = args.cdim
preprocess_threads = 6
# read_img
train_path = args.img_path + '/*.png'
train_files = glob.glob(train_path)
if not train_files:
raise RuntimeError(
"No training images found with glob '{}'.".format(train_path))
train_dataset = tf.data.Dataset.from_tensor_slices(train_files)
train_dataset = train_dataset.shuffle(buffer_size=len(train_files)) # .repeat()
train_dataset = train_dataset.map(
read_png, num_parallel_calls=preprocess_threads)
train_dataset = train_dataset.map(
lambda x: tf.random_crop(x, (args.patch_size, args.patch_size, 3)))
train_dataset = train_dataset.batch(args.batch_size)
train_dataset = train_dataset.prefetch(32)
x = train_dataset.make_one_shot_iterator().get_next()
# Construct model
encoder_op, mean, var = analysis_transform(x, 64)
x_pred = synthesis_transform(encoder_op,64)
# Bring both images back to 0..255 range.
x_pred = tf.clip_by_value(x_pred, 0, 1)
x_pred = tf.round(x_pred * 255)
x_pred = tf.cast(x_pred, tf.uint8)
#end construct model
#images_path = '../../data/CelebA/img_align_celeba_png/*.png'
images_path = args.img_path + '/*.png'
images = glob.glob(images_path)
images = sorted(images)
#temp = scipy.misc.imresize(img[:, j:-j, :], [args.patch_size, args.patch_size])
batch_num = math.floor(len(images) / args.batch_size)
print(len(images), batch_num)
num = int(math.floor(math.sqrt(args.batch_size)))
show_img = np.zeros([num * args.patch_size, num * args.patch_size, 3])
parameter = 'VAE_%s' % (args.loss2)
n = 0
with tf.Session() as sess:
# restore model
latest = tf.train.latest_checkpoint(checkpoint_dir=args.checkpoint_dir)
#latest = os.path.join(args.checkpoint_dir, 'model.ckpt-%s' % (args.num_steps))
tf.train.Saver().restore(sess, save_path=latest)
try:
while True:
#if n > 100:
# break
#rec_img, z, mean_, var_ = sess.run([x_pred, encoder_op, mean, var], feed_dict={inputs: x})
rec_img, z, mean_, var_ = sess.run([x_pred, encoder_op, mean, var])
if n == 0:
zs = z
means_ = mean_
vars_ = 1/(np.power( np.exp(var_ * 0.5),2))
else:
zs = np.vstack((zs, z))
means_ = np.vstack((means_, mean_))
vars_ = np.vstack((vars_, 1/(np.power( np.exp(var_ * 0.5),2))))
n += 1
except tf.errors.OutOfRangeError:\
print('end!')
print('zs', zs.shape)
stds = np.squeeze(np.std(zs, axis=0))
means = np.squeeze(np.mean(zs, axis=0))
print('stds', stds.shape, 'means', means.shape)
std_sorted = np.sort(stds)[::-1]
std_index = np.argsort(stds)[::-1]
var_sorted =np.power(std_sorted, 2)
df = var_sorted.cumsum() / var_sorted.sum()
x_1 = np.arange(0, var_sorted.shape[0], 1)
fig, ax1 = plt.subplots()
ax1.bar(x_1, var_sorted)
ax1.set_xlabel('z(sorted by variance(descending order))')
ax1.set_ylabel('variance of z')
fig_name = os.path.join(args.checkpoint_dir, 'variance_df_%s.png' % (parameter))
plt.savefig(fig_name)
std_name = os.path.join(args.checkpoint_dir, 'std_df_%s.npy' % (parameter))
np.save(std_name, stds)
std_iname = os.path.join(args.checkpoint_dir, 'std_index_%s.npy' % (parameter))
np.save(std_iname, std_index)
mean_name = os.path.join(args.checkpoint_dir, 'mean_%s.npy' % (parameter))
np.save(mean_name, means)
def sample_vw(n_batch, n_dim):
import random
v = []
w = []
for b in range(n_batch):
v_init = np.zeros([n_dim])
v_init[0] = 1
#print(v_init)
alpha = np.random.rand(n_dim) * 2 * np.pi
# w_init = (np.random.rand(20) - 0.5 ) * 2 * 2* np.pi
r = 1
# x = []
w_init = []
#print(alpha[0:1])
sin_alpha = np.sin(alpha)
cos_alpha = np.cos(alpha)
cos_alpha[0] = (np.random.rand(1) - 0.5) * 2
sin_alpha[0] = np.sin(np.arccos(cos_alpha[0]))
# print(cos_alpha)
for i in range(alpha.shape[0]):
if i == alpha.shape[0]:
w_init.append(r * np.prod(sin_alpha))
else:
w_init.append(r * np.prod(sin_alpha[0:i]) * cos_alpha[i])
w_init = np.array(w_init)
gm = np.random.rand(1) * 2 * np.pi
x = -(cos_alpha[0] / sin_alpha[0]) * v_init + (1.0 / sin_alpha[0]) * w_init
y = v_init
v_dash = -np.sin(gm) * x + np.cos(gm) * y
w_dash = (np.cos(gm) * sin_alpha[0] - np.sin(gm) * cos_alpha[0]) * x + (np.sin(gm) * sin_alpha[0] + np.cos(gm) * cos_alpha[0]) * y
v.append(v_dash)
w.append(w_dash)
return np.array(v), np.array(w)
def sample_image_v2():
sample_num = 9
cdim = args.cdim
decoder_inputs = tf.placeholder(tf.float32, [sample_num * sample_num, args.z])
inputs = tf.placeholder(tf.float32, [sample_num * sample_num, args.patch_size, args.patch_size, cdim])
# Construct model
encoder_op, mean_op, var_op = analysis_transform(inputs, 64)
#x_pred = synthesis_transform(y_hat, 64)
x_pred = synthesis_transform(encoder_op,64)
z_p = tf.random_normal(shape=(sample_num*sample_num, args.z), mean=0.0, stddev=1.0)
x_pred_s = synthesis_transform(z_p, 64)
# Bring both images back to 0..255 range.
x_pred_r = tf.clip_by_value(x_pred, 0, 1)
x_pred_r = tf.round(x_pred_r * 255)
x_pred_r = tf.cast(x_pred_r, tf.uint8)
x_pred_s_r = tf.clip_by_value(x_pred_s, 0, 1)
x_pred_s_r = tf.round(x_pred_s_r * 255)
x_pred_s_r = tf.cast(x_pred_s_r, tf.uint8)
x_pred_2 = synthesis_transform(decoder_inputs, 64)
x_pred_2_r = tf.clip_by_value(x_pred_2, 0, 1)
x_pred_2_r = tf.round(x_pred_2_r * 255)
x_pred_2_r = tf.cast(x_pred_2_r, tf.uint8)
# metric
###add
ssim_loss_gt = tf.image.ssim(inputs * 255, x_pred * 255, max_val=255)
mse2_loss_gt = tf.reduce_mean(tf.squared_difference(inputs, x_pred), reduction_indices=[1,2,3])
decoder_inputs_loss1 = tf.placeholder(tf.float32, [sample_num * sample_num, args.z])
decoder_inputs_loss2 = tf.placeholder(tf.float32, [sample_num * sample_num, args.z])
decoder_inputs_loss3 = tf.placeholder(tf.float32, [sample_num * sample_num, args.z])
decoder_inputs_loss4 = tf.placeholder(tf.float32, [sample_num * sample_num, args.z])
decoder_inputs_loss5 = tf.placeholder(tf.float32, [sample_num * sample_num, args.z])
#y = tf.reshape(encoder_op, [-1, 1, 1, z_num])
x_pred_loss1 = synthesis_transform(decoder_inputs_loss1, 64)
x_pred_loss2 = synthesis_transform(decoder_inputs_loss2, 64)
x_pred_loss3 = synthesis_transform(decoder_inputs_loss3, 64)
x_pred_loss4 = synthesis_transform(decoder_inputs_loss4, 64)
x_pred_loss5 = synthesis_transform(decoder_inputs_loss5, 64)
# metric
if args.loss1 == "mse":
loss_2_gt = (1 - mse2_loss_gt)
elif args.loss1 == "ssim":
#dammy
loss_2_gt = (1 - ssim_loss_gt)
loss_2_d = ssim_matrix.ssim(255 * x_pred_loss1, 255 * (x_pred_loss1 - x_pred_loss2),
255 * (x_pred_loss1 - x_pred_loss3), max_val=255, compensation=1)
# define_input
# images_path = '../../data/CelebA/img_align_celeba_png/*.png'
images_path = args.img_path + '/*.png'
images = glob.glob(images_path)
images = sorted(images)
#temp = scipy.misc.imresize(img[:, j:-j, :], [args.patch_size, args.patch_size])
parameter = 'VAE_%s' % (args.loss2)
std_name = os.path.join(args.checkpoint_dir, 'std_df_%s.npy' % (parameter))
std = np.load(std_name)
mean_name = os.path.join(args.checkpoint_dir, 'mean_%s.npy' % (parameter))
mean = np.load(mean_name)
std_iname = os.path.join(args.checkpoint_dir, 'std_index_%s.npy' % (parameter))
std_index = np.load(std_iname)
sample_num = 9
sample_cen = int((sample_num - 1)/2)
sh_ind=[0,1,2,20,21,22,200,201,202]
samples_t = np.zeros([sample_num , sample_num, args.z])
samples_h = np.zeros([sample_num, sample_num, args.z])
std_ranges = []
std_ranges_h = []
for i in range(sample_num):
std_ranges.append(std[std_index[sh_ind[i]]] / ((sample_num - 1) / 2) * 2)
std_ranges_h.append(std[std_index[i]] / ((sample_num - 1) / 2) * 2)
show_img_sample_t = np.zeros([sample_num * args.patch_size, sample_num * args.patch_size, 3])
show_img_sample_h = np.zeros([sample_num * args.patch_size, sample_num * args.patch_size, 3])
for i in range(sample_num):
for j in range(sample_num):
samples_t[i, j] = mean
samples_t[i, j][std_index[sh_ind[i]]] += (std_ranges[i] * (j - sample_cen))
samples_h[i, j] = mean
samples_h[i, j][std_index[i]] += (std_ranges_h[i]*(j-sample_cen))
samples_t = samples_t.reshape((-1, args.z))
samples_h = samples_h.reshape((-1, args.z))
num = 9
x, ori_img = read_img(num)
x = x / 255.
with tf.Session() as sess:
# restore model
latest = tf.train.latest_checkpoint(checkpoint_dir=args.checkpoint_dir)
#latest = os.path.join(args.checkpoint_dir, 'model.ckpt-%s' % (args.num_steps))
tf.train.Saver().restore(sess, save_path=latest)
encoder_op_ , x_pred_ = sess.run([encoder_op, x_pred], feed_dict={inputs: x})
sample_rec_t = sess.run(x_pred_2_r, feed_dict={decoder_inputs: samples_t})
sample_rec_h = sess.run(x_pred_2_r, feed_dict={decoder_inputs: samples_h})
for i in range(num):
for j in range(num):
show_img_sample_t[i * args.patch_size:(i + 1) * args.patch_size,
j * args.patch_size:(j + 1) * args.patch_size, :] = sample_rec_t[num * i + j, :, :, :]
show_img_sample_h[i * args.patch_size:(i + 1) * args.patch_size,
j * args.patch_size:(j + 1) * args.patch_size, :] = sample_rec_h[num * i + j, :, :, :]
save_name = os.path.join(args.checkpoint_dir, '%s_sample_traverse.png' % (parameter))
scipy.misc.imsave(save_name, show_img_sample_t)
save_name = os.path.join(args.checkpoint_dir, '%s_traverse_top9.png' % (parameter))
scipy.misc.imsave(save_name, show_img_sample_h)
dist_num = 20
d_list_c_all = []
d_num=50
z_mtrs = np.arange(0)
x_mtrs = np.arange(0)
dists = np.arange(0)
delta = args.delta
d_eps = np.arange(0)
d_eps_sigma = np.arange(0)
epsln = np.ones((sample_num * sample_num, args.z)) * args.delta
for n in range(dist_num):
imgs = np.zeros([sample_num * sample_num, args.patch_size, args.patch_size, 3])
for i in range(num * num):
# print(i)
img_p = images[n * sample_num * sample_num + i]
img = scipy.misc.imread(img_p).astype(np.float)
imgs[i, :, :, :] = img
x = imgs.astype(np.float) / 255.
encoder_op_, mean_op_, x_pred_, loss_gt, var_op_ = sess.run([encoder_op, mean_op, x_pred, loss_2_gt, var_op], feed_dict={inputs: x})
dec_ip = mean_op_.copy() # .reshape(-1, args.z)
dec_ip_d = mean_op_.copy() # .reshape(-1, args.z)
dec_ip_d_2 = mean_op_.copy() # .reshape(-1, args.z)
dec_ip_d_e = mean_op_.copy() # .reshape(-1, args.z)
dec_ip_d_e_sigma = mean_op_.copy() # .reshape(-1, args.z)
dec_ip_d_e = dec_ip_d_e + epsln
dec_ip_d_e_sigma = dec_ip_d_e_sigma + epsln * np.exp(var_op_/2)
dv, dw = sample_vw(sample_num * sample_num, args.z)
dv = delta * dv
dw = delta * dw
dec_ip_d = dec_ip_d + dv
dec_ip_d_2 = dec_ip_d_2 + dw
x_hat, v_hat, w_hat, gv_e, gv_e_sigma, dist_s = sess.run([x_pred_loss1, x_pred_loss2, x_pred_loss3, x_pred_loss4, x_pred_loss5, loss_2_d],
feed_dict={decoder_inputs_loss1: dec_ip,
decoder_inputs_loss2: dec_ip_d,
decoder_inputs_loss3: dec_ip_d_2,
decoder_inputs_loss4: dec_ip_d_e,
decoder_inputs_loss5: dec_ip_d_e_sigma,
inputs: x})
v_hat = np.reshape(v_hat - x_hat, [sample_num * sample_num, -1])
w_hat = np.reshape(w_hat - x_hat, [sample_num * sample_num, -1])
d_ep = np.reshape(gv_e - x_hat, [sample_num * sample_num, -1])
d_ep_sigma = np.reshape(gv_e_sigma - x_hat, [sample_num * sample_num, -1])
z_mtr = np.sum(dv * dw, axis=1)
z_mtrs = np.append(z_mtrs, z_mtr)
#print(z_mtrs.shape)
x_mtr = np.sum(v_hat * w_hat, axis=1)
x_mtrs = np.append(x_mtrs, x_mtr)
#print(x_mtrs.shape)
d_eps = np.append( d_eps, np.sum(d_ep * d_ep, axis=1) )
d_eps_sigma = np.append(d_eps_sigma, np.sum(d_ep_sigma * d_ep_sigma, axis=1))
dists = np.append(dists, dist_s)
#print(dists.shape)
clf = LinearRegression()
clf.fit(z_mtrs.reshape(-1,1), x_mtrs)
prd = clf.predict(z_mtrs.reshape(-1,1))
print('prd',prd.shape)
var = np.power((x_mtrs-prd),2)
var = np.sqrt(np.mean(var))
print('var', var)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
xmin = np.min(z_mtrs)
xmax = np.max(z_mtrs)
ymin = np.min(prd) - 2 * var
ymax = np.max(prd) + 2 * var
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.scatter(z_mtrs, x_mtrs, s=2)
fig.text(0.2, 0.8, 'r=%.4f' % (np.corrcoef(z_mtrs, x_mtrs)[0][1]))
fig.tight_layout()
fig_pass = os.path.join(args.checkpoint_dir, '%s_isometoric_mse_%s.png' % (parameter, args.delta))
plt.savefig(fig_pass)
clf = LinearRegression()
clf.fit(z_mtrs.reshape(-1, 1), dists)
prd = clf.predict(z_mtrs.reshape(-1, 1))
var = np.power((dists - prd), 2)
var = np.sqrt(np.mean(var))
fig = plt.figure()
# for d in range(8):
ax = fig.add_subplot(1, 1, 1)
ax.scatter(z_mtrs, dists, s=2)
xmin = np.min(z_mtrs)
xmax = np.max(z_mtrs)
ymin = np.min(prd) - 2 * var
ymax = np.max(prd) + 2 * var
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
fig.tight_layout()
fig.text(0.2, 0.8, 'r=%.4f' % (np.corrcoef(z_mtrs, dists)[0][1]))
fig_pass = os.path.join(args.checkpoint_dir, '%s_isometoric_ssim_%s.png' % (parameter, args.delta))
plt.savefig(fig_pass)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"command", choices=["train", 'test', 'analy','sample', 'sample_1', 'analy_q','inter'],
help="What to do: 'train' loads training data and trains (or continues "
"to train) a new model. 'test' load trained model and test.")
parser.add_argument(
"input", nargs="?",
help="Input filename.")
parser.add_argument(
"output", nargs="?",
help="Output filename.")
parser.add_argument(
"--batch_size", type=int, default=64,
help="Batch size for training.")
parser.add_argument(
"--patch_size", type=int, default=64,
help="Patch size for training.")
parser.add_argument(
"--data_set", default='CelebA',
help="Batch size for training.")
parser.add_argument(
"--checkpoint_dir", default="sanity",
help="Directory where to save/load model checkpoints.")
parser.add_argument(
"--img_path", default="../../data/CelebA/centered_celeba_64_10per/",
help="Directory where to save/load model checkpoints.")
parser.add_argument(
"--num_steps", type=int, default=500000,
help="Train up to this number of steps.")
parser.add_argument(
"--save_steps", type=int, default=100000,
help="Train up to this number of steps.")
parser.add_argument(
"--display_steps", type=int, default=100,
help="save loss for plot every this number of steps.")
parser.add_argument(
"--lambda1", type=float, default=1000000,
help="Lambda for distortion tradeoff.")
parser.add_argument(
"--lambda2", type=float, default=1000000,
help="Lambda for rate tradeoff.")
parser.add_argument(
"--loss1", type=str, default='mse',
help="mse, logmse, ssim, logssim")
parser.add_argument(
"--loss2", type=str, default='mse',
help=" mse or ssim")
parser.add_argument(
"--z", type=int, default=256,
help="bottleneck number.")
parser.add_argument(
"--cdim", type=int, default=3,
help="channel.")
parser.add_argument(
"--delta", type=float, default=0.01,
help="bottleneck number.")
parser.add_argument(
"--dim1", type=int, default=8192,
help="AE layer1.")
parser.add_argument(
"--activation", default="softplus")
parser.add_argument(
"--ac2", default='Treu')
parser.add_argument(
"--finetune", default="None")
parser.add_argument(
"--split", default="False")
parser.add_argument('-gpu', '--gpu_id',
help='GPU device id to use [0]', default=0, type=int)
args = parser.parse_args()
# cpu mode
if args.gpu_id < 0:
os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID'
os.environ["CUDA_VISIBLE_DEVICES"] = '-1'
else:
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
z_num = args.z
if args.command == 'train':
train()
# python ae.py train --checkpoint_dir ae_718 --lambda 10 -gpu 0
elif args.command == 'analy':
plot_analysis()
# if args.input is None or a
elif args.command == 'sample':
sample_image_v2() | [
"numpy.sum",
"tensorflow.clip_by_value",
"tensorflow.image.ssim",
"numpy.ones",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.exp",
"numpy.max",
"inputpipeline.get_dataset",
"matplotlib.pyplot.subplots",
"numpy.save",
"tensorflow.round",
"numpy.min",
"tensorflow.squ... | [((5935, 5956), 'tensorflow.round', 'tf.round', (['(image * 255)'], {}), '(image * 255)\n', (5943, 5956), True, 'import tensorflow as tf\n'), ((5968, 6001), 'tensorflow.saturate_cast', 'tf.saturate_cast', (['image', 'tf.uint8'], {}), '(image, tf.uint8)\n', (5984, 6001), True, 'import tensorflow as tf\n'), ((6189, 6236), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', '"""params.log"""'], {}), "(args.checkpoint_dir, 'params.log')\n", (6201, 6236), False, 'import os, sys, glob, scipy.misc\n'), ((6245, 6269), 'os.path.exists', 'os.path.exists', (['log_name'], {}), '(log_name)\n', (6259, 6269), False, 'import os, sys, glob, scipy.misc\n'), ((6536, 6577), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (6560, 6577), True, 'import tensorflow as tf\n'), ((7491, 7556), 'ms_ssim.MultiScaleSSIM', 'ms_ssim.MultiScaleSSIM', (['(X * 255)', '(X_pred * 255)'], {'data_format': '"""NHWC"""'}), "(X * 255, X_pred * 255, data_format='NHWC')\n", (7513, 7556), False, 'import ms_ssim\n'), ((8390, 8417), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['kl_div_loss'], {}), '(kl_div_loss)\n', (8404, 8417), True, 'import tensorflow as tf\n'), ((8621, 8650), 'tensorflow.train.create_global_step', 'tf.train.create_global_step', ([], {}), '()\n', (8648, 8650), True, 'import tensorflow as tf\n'), ((8779, 8816), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (8801, 8816), True, 'import tensorflow as tf\n'), ((8886, 8923), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'train_loss'], {}), "('loss', train_loss)\n", (8903, 8923), True, 'import tensorflow as tf\n'), ((8965, 8999), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mse"""', 'mse_loss'], {}), "('mse', mse_loss)\n", (8982, 8999), True, 'import tensorflow as tf\n'), ((9714, 9744), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['X_pred', '(0)', '(1)'], {}), '(X_pred, 0, 1)\n', (9730, 9744), True, 'import tensorflow as tf\n'), ((9758, 9779), 'tensorflow.round', 'tf.round', (['(X_rec * 255)'], {}), '(X_rec * 255)\n', (9766, 9779), True, 'import tensorflow as tf\n'), ((9793, 9817), 'tensorflow.cast', 'tf.cast', (['X_rec', 'tf.uint8'], {}), '(X_rec, tf.uint8)\n', (9800, 9817), True, 'import tensorflow as tf\n'), ((9831, 9856), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['X', '(0)', '(1)'], {}), '(X, 0, 1)\n', (9847, 9856), True, 'import tensorflow as tf\n'), ((9870, 9891), 'tensorflow.round', 'tf.round', (['(X_ori * 255)'], {}), '(X_ori * 255)\n', (9878, 9891), True, 'import tensorflow as tf\n'), ((9905, 9929), 'tensorflow.cast', 'tf.cast', (['X_ori', 'tf.uint8'], {}), '(X_ori, tf.uint8)\n', (9912, 9929), True, 'import tensorflow as tf\n'), ((13629, 13683), 'numpy.zeros', 'np.zeros', (['[n * n, args.patch_size, args.patch_size, 3]'], {}), '([n * n, args.patch_size, args.patch_size, 3])\n', (13637, 13683), True, 'import numpy as np\n'), ((13700, 13755), 'numpy.zeros', 'np.zeros', (['[n * args.patch_size, n * args.patch_size, 3]'], {}), '([n * args.patch_size, n * args.patch_size, 3])\n', (13708, 13755), True, 'import numpy as np\n'), ((14449, 14503), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', '"""clic_rdae_ori.png"""'], {}), "(args.checkpoint_dir, 'clic_rdae_ori.png')\n", (14461, 14503), False, 'import os, sys, glob, scipy.misc\n'), ((14670, 14692), 'tensorflow.read_file', 'tf.read_file', (['filename'], {}), '(filename)\n', (14682, 14692), True, 'import tensorflow as tf\n'), ((14706, 14747), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['string'], {'channels': '(3)'}), '(string, channels=3)\n', (14727, 14747), True, 'import tensorflow as tf\n'), ((14761, 14787), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (14768, 14787), True, 'import tensorflow as tf\n'), ((14976, 14997), 'glob.glob', 'glob.glob', (['train_path'], {}), '(train_path)\n', (14985, 14997), False, 'import os, sys, glob, scipy.misc\n'), ((15149, 15196), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['train_files'], {}), '(train_files)\n', (15183, 15196), True, 'import tensorflow as tf\n'), ((15855, 15885), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x_pred', '(0)', '(1)'], {}), '(x_pred, 0, 1)\n', (15871, 15885), True, 'import tensorflow as tf\n'), ((15900, 15922), 'tensorflow.round', 'tf.round', (['(x_pred * 255)'], {}), '(x_pred * 255)\n', (15908, 15922), True, 'import tensorflow as tf\n'), ((15937, 15962), 'tensorflow.cast', 'tf.cast', (['x_pred', 'tf.uint8'], {}), '(x_pred, tf.uint8)\n', (15944, 15962), True, 'import tensorflow as tf\n'), ((16116, 16138), 'glob.glob', 'glob.glob', (['images_path'], {}), '(images_path)\n', (16125, 16138), False, 'import os, sys, glob, scipy.misc\n'), ((16423, 16482), 'numpy.zeros', 'np.zeros', (['[num * args.patch_size, num * args.patch_size, 3]'], {}), '([num * args.patch_size, num * args.patch_size, 3])\n', (16431, 16482), True, 'import numpy as np\n'), ((20071, 20132), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[sample_num * sample_num, args.z]'], {}), '(tf.float32, [sample_num * sample_num, args.z])\n', (20085, 20132), True, 'import tensorflow as tf\n'), ((20147, 20245), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[sample_num * sample_num, args.patch_size, args.patch_size, cdim]'], {}), '(tf.float32, [sample_num * sample_num, args.patch_size, args.\n patch_size, cdim])\n', (20161, 20245), True, 'import tensorflow as tf\n'), ((20442, 20521), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '(sample_num * sample_num, args.z)', 'mean': '(0.0)', 'stddev': '(1.0)'}), '(shape=(sample_num * sample_num, args.z), mean=0.0, stddev=1.0)\n', (20458, 20521), True, 'import tensorflow as tf\n'), ((20630, 20660), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x_pred', '(0)', '(1)'], {}), '(x_pred, 0, 1)\n', (20646, 20660), True, 'import tensorflow as tf\n'), ((20677, 20701), 'tensorflow.round', 'tf.round', (['(x_pred_r * 255)'], {}), '(x_pred_r * 255)\n', (20685, 20701), True, 'import tensorflow as tf\n'), ((20718, 20745), 'tensorflow.cast', 'tf.cast', (['x_pred_r', 'tf.uint8'], {}), '(x_pred_r, tf.uint8)\n', (20725, 20745), True, 'import tensorflow as tf\n'), ((20766, 20798), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x_pred_s', '(0)', '(1)'], {}), '(x_pred_s, 0, 1)\n', (20782, 20798), True, 'import tensorflow as tf\n'), ((20817, 20843), 'tensorflow.round', 'tf.round', (['(x_pred_s_r * 255)'], {}), '(x_pred_s_r * 255)\n', (20825, 20843), True, 'import tensorflow as tf\n'), ((20862, 20891), 'tensorflow.cast', 'tf.cast', (['x_pred_s_r', 'tf.uint8'], {}), '(x_pred_s_r, tf.uint8)\n', (20869, 20891), True, 'import tensorflow as tf\n'), ((20968, 21000), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x_pred_2', '(0)', '(1)'], {}), '(x_pred_2, 0, 1)\n', (20984, 21000), True, 'import tensorflow as tf\n'), ((21019, 21045), 'tensorflow.round', 'tf.round', (['(x_pred_2_r * 255)'], {}), '(x_pred_2_r * 255)\n', (21027, 21045), True, 'import tensorflow as tf\n'), ((21064, 21093), 'tensorflow.cast', 'tf.cast', (['x_pred_2_r', 'tf.uint8'], {}), '(x_pred_2_r, tf.uint8)\n', (21071, 21093), True, 'import tensorflow as tf\n'), ((21142, 21196), 'tensorflow.image.ssim', 'tf.image.ssim', (['(inputs * 255)', '(x_pred * 255)'], {'max_val': '(255)'}), '(inputs * 255, x_pred * 255, max_val=255)\n', (21155, 21196), True, 'import tensorflow as tf\n'), ((21328, 21389), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[sample_num * sample_num, args.z]'], {}), '(tf.float32, [sample_num * sample_num, args.z])\n', (21342, 21389), True, 'import tensorflow as tf\n'), ((21418, 21479), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[sample_num * sample_num, args.z]'], {}), '(tf.float32, [sample_num * sample_num, args.z])\n', (21432, 21479), True, 'import tensorflow as tf\n'), ((21508, 21569), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[sample_num * sample_num, args.z]'], {}), '(tf.float32, [sample_num * sample_num, args.z])\n', (21522, 21569), True, 'import tensorflow as tf\n'), ((21598, 21659), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[sample_num * sample_num, args.z]'], {}), '(tf.float32, [sample_num * sample_num, args.z])\n', (21612, 21659), True, 'import tensorflow as tf\n'), ((21688, 21749), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[sample_num * sample_num, args.z]'], {}), '(tf.float32, [sample_num * sample_num, args.z])\n', (21702, 21749), True, 'import tensorflow as tf\n'), ((22327, 22471), 'ssim_matrix.ssim', 'ssim_matrix.ssim', (['(255 * x_pred_loss1)', '(255 * (x_pred_loss1 - x_pred_loss2))', '(255 * (x_pred_loss1 - x_pred_loss3))'], {'max_val': '(255)', 'compensation': '(1)'}), '(255 * x_pred_loss1, 255 * (x_pred_loss1 - x_pred_loss2), \n 255 * (x_pred_loss1 - x_pred_loss3), max_val=255, compensation=1)\n', (22343, 22471), False, 'import ssim_matrix\n'), ((22646, 22668), 'glob.glob', 'glob.glob', (['images_path'], {}), '(images_path)\n', (22655, 22668), False, 'import os, sys, glob, scipy.misc\n'), ((22845, 22907), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', "('std_df_%s.npy' % parameter)"], {}), "(args.checkpoint_dir, 'std_df_%s.npy' % parameter)\n", (22857, 22907), False, 'import os, sys, glob, scipy.misc\n'), ((22921, 22938), 'numpy.load', 'np.load', (['std_name'], {}), '(std_name)\n', (22928, 22938), True, 'import numpy as np\n'), ((22958, 23018), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', "('mean_%s.npy' % parameter)"], {}), "(args.checkpoint_dir, 'mean_%s.npy' % parameter)\n", (22970, 23018), False, 'import os, sys, glob, scipy.misc\n'), ((23033, 23051), 'numpy.load', 'np.load', (['mean_name'], {}), '(mean_name)\n', (23040, 23051), True, 'import numpy as np\n'), ((23071, 23136), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', "('std_index_%s.npy' % parameter)"], {}), "(args.checkpoint_dir, 'std_index_%s.npy' % parameter)\n", (23083, 23136), False, 'import os, sys, glob, scipy.misc\n'), ((23156, 23174), 'numpy.load', 'np.load', (['std_iname'], {}), '(std_iname)\n', (23163, 23174), True, 'import numpy as np\n'), ((23299, 23341), 'numpy.zeros', 'np.zeros', (['[sample_num, sample_num, args.z]'], {}), '([sample_num, sample_num, args.z])\n', (23307, 23341), True, 'import numpy as np\n'), ((23360, 23402), 'numpy.zeros', 'np.zeros', (['[sample_num, sample_num, args.z]'], {}), '([sample_num, sample_num, args.z])\n', (23368, 23402), True, 'import numpy as np\n'), ((23671, 23744), 'numpy.zeros', 'np.zeros', (['[sample_num * args.patch_size, sample_num * args.patch_size, 3]'], {}), '([sample_num * args.patch_size, sample_num * args.patch_size, 3])\n', (23679, 23744), True, 'import numpy as np\n'), ((23770, 23843), 'numpy.zeros', 'np.zeros', (['[sample_num * args.patch_size, sample_num * args.patch_size, 3]'], {}), '([sample_num * args.patch_size, sample_num * args.patch_size, 3])\n', (23778, 23843), True, 'import numpy as np\n'), ((28726, 28744), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (28742, 28744), False, 'from sklearn.linear_model import LinearRegression\n'), ((28872, 28897), 'numpy.power', 'np.power', (['(x_mtrs - prd)', '(2)'], {}), '(x_mtrs - prd, 2)\n', (28880, 28897), True, 'import numpy as np\n'), ((28964, 28976), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (28974, 28976), True, 'import matplotlib.pyplot as plt\n'), ((29026, 29040), 'numpy.min', 'np.min', (['z_mtrs'], {}), '(z_mtrs)\n', (29032, 29040), True, 'import numpy as np\n'), ((29053, 29067), 'numpy.max', 'np.max', (['z_mtrs'], {}), '(z_mtrs)\n', (29059, 29067), True, 'import numpy as np\n'), ((29346, 29437), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', "('%s_isometoric_mse_%s.png' % (parameter, args.delta))"], {}), "(args.checkpoint_dir, '%s_isometoric_mse_%s.png' % (parameter,\n args.delta))\n", (29358, 29437), False, 'import os, sys, glob, scipy.misc\n'), ((29439, 29460), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_pass'], {}), '(fig_pass)\n', (29450, 29460), True, 'import matplotlib.pyplot as plt\n'), ((29474, 29492), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (29490, 29492), False, 'from sklearn.linear_model import LinearRegression\n'), ((29593, 29617), 'numpy.power', 'np.power', (['(dists - prd)', '(2)'], {}), '(dists - prd, 2)\n', (29601, 29617), True, 'import numpy as np\n'), ((29664, 29676), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (29674, 29676), True, 'import matplotlib.pyplot as plt\n'), ((29788, 29802), 'numpy.min', 'np.min', (['z_mtrs'], {}), '(z_mtrs)\n', (29794, 29802), True, 'import numpy as np\n'), ((29815, 29829), 'numpy.max', 'np.max', (['z_mtrs'], {}), '(z_mtrs)\n', (29821, 29829), True, 'import numpy as np\n'), ((30069, 30161), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', "('%s_isometoric_ssim_%s.png' % (parameter, args.delta))"], {}), "(args.checkpoint_dir, '%s_isometoric_ssim_%s.png' % (parameter,\n args.delta))\n", (30081, 30161), False, 'import os, sys, glob, scipy.misc\n'), ((30163, 30184), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_pass'], {}), '(fig_pass)\n', (30174, 30184), True, 'import matplotlib.pyplot as plt\n'), ((30231, 30310), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (30254, 30310), False, 'import argparse, shutil\n'), ((1387, 1416), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""analysis"""'], {}), "('analysis')\n", (1404, 1416), True, 'import tensorflow as tf\n'), ((3813, 3864), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""synthesis"""'], {'reuse': 'tf.AUTO_REUSE'}), "('synthesis', reuse=tf.AUTO_REUSE)\n", (3830, 3864), True, 'import tensorflow as tf\n'), ((6048, 6083), 'os.path.exists', 'os.path.exists', (['args.checkpoint_dir'], {}), '(args.checkpoint_dir)\n', (6062, 6083), False, 'import os, sys, glob, scipy.misc\n'), ((6140, 6172), 'os.makedirs', 'os.makedirs', (['args.checkpoint_dir'], {}), '(args.checkpoint_dir)\n', (6151, 6172), False, 'import os, sys, glob, scipy.misc\n'), ((6324, 6343), 'os.remove', 'os.remove', (['log_name'], {}), '(log_name)\n', (6333, 6343), False, 'import os, sys, glob, scipy.misc\n'), ((6799, 6835), 'inputpipeline.get_dataset', 'inputpipeline.get_dataset', (['data_glob'], {}), '(data_glob)\n', (6824, 6835), False, 'import inputpipeline\n'), ((7426, 7470), 'tensorflow.squared_difference', 'tf.squared_difference', (['(255 * X)', '(255 * X_pred)'], {}), '(255 * X, 255 * X_pred)\n', (7447, 7470), True, 'import tensorflow as tf\n'), ((8303, 8314), 'tensorflow.exp', 'tf.exp', (['var'], {}), '(var)\n', (8309, 8314), True, 'import tensorflow as tf\n'), ((8341, 8370), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['kl_div_loss', '(1)'], {}), '(kl_div_loss, 1)\n', (8354, 8370), True, 'import tensorflow as tf\n'), ((9033, 9075), 'tensorflow.identity', 'tf.identity', (['train_loss'], {'name': '"""train_loss"""'}), "(train_loss, name='train_loss')\n", (9044, 9075), True, 'import tensorflow as tf\n'), ((9132, 9172), 'tensorflow.identity', 'tf.identity', (['msssim_loss'], {'name': '"""ms-ssim"""'}), "(msssim_loss, name='ms-ssim')\n", (9143, 9172), True, 'import tensorflow as tf\n'), ((9325, 9374), 'tensorflow.train.StopAtStepHook', 'tf.train.StopAtStepHook', ([], {'last_step': 'args.num_steps'}), '(last_step=args.num_steps)\n', (9348, 9374), True, 'import tensorflow as tf\n'), ((9385, 9419), 'tensorflow.train.NanTensorHook', 'tf.train.NanTensorHook', (['train_loss'], {}), '(train_loss)\n', (9407, 9419), True, 'import tensorflow as tf\n'), ((9430, 9489), 'tensorflow.train.LoggingTensorHook', 'tf.train.LoggingTensorHook', (['logged_tensors'], {'every_n_secs': '(60)'}), '(logged_tensors, every_n_secs=60)\n', (9456, 9489), True, 'import tensorflow as tf\n'), ((9599, 9696), 'tensorflow.train.CheckpointSaverHook', 'tf.train.CheckpointSaverHook', ([], {'save_steps': 'args.save_steps', 'checkpoint_dir': 'args.checkpoint_dir'}), '(save_steps=args.save_steps, checkpoint_dir=\n args.checkpoint_dir)\n', (9627, 9696), True, 'import tensorflow as tf\n'), ((10179, 10225), 'tensorflow.train.MonitoredTrainingSession', 'tf.train.MonitoredTrainingSession', ([], {'hooks': 'hooks'}), '(hooks=hooks)\n', (10212, 10225), True, 'import tensorflow as tf\n'), ((13532, 13554), 'glob.glob', 'glob.glob', (['images_path'], {}), '(images_path)\n', (13541, 13554), False, 'import os, sys, glob, scipy.misc\n'), ((16549, 16561), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (16559, 16561), True, 'import tensorflow as tf\n'), ((16614, 16676), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', ([], {'checkpoint_dir': 'args.checkpoint_dir'}), '(checkpoint_dir=args.checkpoint_dir)\n', (16640, 16676), True, 'import tensorflow as tf\n'), ((17878, 17901), 'numpy.power', 'np.power', (['std_sorted', '(2)'], {}), '(std_sorted, 2)\n', (17886, 17901), True, 'import numpy as np\n'), ((17974, 18010), 'numpy.arange', 'np.arange', (['(0)', 'var_sorted.shape[0]', '(1)'], {}), '(0, var_sorted.shape[0], 1)\n', (17983, 18010), True, 'import numpy as np\n'), ((18033, 18047), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (18045, 18047), True, 'import matplotlib.pyplot as plt\n'), ((18210, 18277), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', "('variance_df_%s.png' % parameter)"], {}), "(args.checkpoint_dir, 'variance_df_%s.png' % parameter)\n", (18222, 18277), False, 'import os, sys, glob, scipy.misc\n'), ((18289, 18310), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_name'], {}), '(fig_name)\n', (18300, 18310), True, 'import matplotlib.pyplot as plt\n'), ((18333, 18395), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', "('std_df_%s.npy' % parameter)"], {}), "(args.checkpoint_dir, 'std_df_%s.npy' % parameter)\n", (18345, 18395), False, 'import os, sys, glob, scipy.misc\n'), ((18407, 18430), 'numpy.save', 'np.save', (['std_name', 'stds'], {}), '(std_name, stds)\n', (18414, 18430), True, 'import numpy as np\n'), ((18454, 18519), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', "('std_index_%s.npy' % parameter)"], {}), "(args.checkpoint_dir, 'std_index_%s.npy' % parameter)\n", (18466, 18519), False, 'import os, sys, glob, scipy.misc\n'), ((18531, 18560), 'numpy.save', 'np.save', (['std_iname', 'std_index'], {}), '(std_iname, std_index)\n', (18538, 18560), True, 'import numpy as np\n'), ((18584, 18644), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', "('mean_%s.npy' % parameter)"], {}), "(args.checkpoint_dir, 'mean_%s.npy' % parameter)\n", (18596, 18644), False, 'import os, sys, glob, scipy.misc\n'), ((18656, 18681), 'numpy.save', 'np.save', (['mean_name', 'means'], {}), '(mean_name, means)\n', (18663, 18681), True, 'import numpy as np\n'), ((18807, 18824), 'numpy.zeros', 'np.zeros', (['[n_dim]'], {}), '([n_dim])\n', (18815, 18824), True, 'import numpy as np\n'), ((19089, 19102), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (19095, 19102), True, 'import numpy as np\n'), ((19124, 19137), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (19130, 19137), True, 'import numpy as np\n'), ((19525, 19541), 'numpy.array', 'np.array', (['w_init'], {}), '(w_init)\n', (19533, 19541), True, 'import numpy as np\n'), ((19954, 19965), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (19962, 19965), True, 'import numpy as np\n'), ((19967, 19978), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (19975, 19978), True, 'import numpy as np\n'), ((21232, 21269), 'tensorflow.squared_difference', 'tf.squared_difference', (['inputs', 'x_pred'], {}), '(inputs, x_pred)\n', (21253, 21269), True, 'import tensorflow as tf\n'), ((24337, 24349), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (24347, 24349), True, 'import tensorflow as tf\n'), ((24402, 24464), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', ([], {'checkpoint_dir': 'args.checkpoint_dir'}), '(checkpoint_dir=args.checkpoint_dir)\n', (24428, 24464), True, 'import tensorflow as tf\n'), ((25328, 25399), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', "('%s_sample_traverse.png' % parameter)"], {}), "(args.checkpoint_dir, '%s_sample_traverse.png' % parameter)\n", (25340, 25399), False, 'import os, sys, glob, scipy.misc\n'), ((25482, 25551), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', "('%s_traverse_top9.png' % parameter)"], {}), "(args.checkpoint_dir, '%s_traverse_top9.png' % parameter)\n", (25494, 25551), False, 'import os, sys, glob, scipy.misc\n'), ((25699, 25711), 'numpy.arange', 'np.arange', (['(0)'], {}), '(0)\n', (25708, 25711), True, 'import numpy as np\n'), ((25730, 25742), 'numpy.arange', 'np.arange', (['(0)'], {}), '(0)\n', (25739, 25742), True, 'import numpy as np\n'), ((25760, 25772), 'numpy.arange', 'np.arange', (['(0)'], {}), '(0)\n', (25769, 25772), True, 'import numpy as np\n'), ((25818, 25830), 'numpy.arange', 'np.arange', (['(0)'], {}), '(0)\n', (25827, 25830), True, 'import numpy as np\n'), ((25854, 25866), 'numpy.arange', 'np.arange', (['(0)'], {}), '(0)\n', (25863, 25866), True, 'import numpy as np\n'), ((28916, 28928), 'numpy.mean', 'np.mean', (['var'], {}), '(var)\n', (28923, 28928), True, 'import numpy as np\n'), ((29082, 29093), 'numpy.min', 'np.min', (['prd'], {}), '(prd)\n', (29088, 29093), True, 'import numpy as np\n'), ((29116, 29127), 'numpy.max', 'np.max', (['prd'], {}), '(prd)\n', (29122, 29127), True, 'import numpy as np\n'), ((29639, 29651), 'numpy.mean', 'np.mean', (['var'], {}), '(var)\n', (29646, 29651), True, 'import numpy as np\n'), ((29844, 29855), 'numpy.min', 'np.min', (['prd'], {}), '(prd)\n', (29850, 29855), True, 'import numpy as np\n'), ((29878, 29889), 'numpy.max', 'np.max', (['prd'], {}), '(prd)\n', (29884, 29889), True, 'import numpy as np\n'), ((1432, 1460), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""layer_0"""'], {}), "('layer_0')\n", (1449, 1460), True, 'import tensorflow as tf\n'), ((1694, 1722), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""layer_1"""'], {}), "('layer_1')\n", (1711, 1722), True, 'import tensorflow as tf\n'), ((1956, 1984), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""layer_2"""'], {}), "('layer_2')\n", (1973, 1984), True, 'import tensorflow as tf\n'), ((2218, 2246), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""layer_3"""'], {}), "('layer_3')\n", (2235, 2246), True, 'import tensorflow as tf\n'), ((2269, 2393), 'tensorflow_compression.SignalConv2D', 'tfc.SignalConv2D', (['num_filters', '(5, 5)'], {'corr': '(True)', 'strides_down': '(2)', 'padding': '"""same_zeros"""', 'use_bias': '(False)', 'activation': 'None'}), "(num_filters, (5, 5), corr=True, strides_down=2, padding=\n 'same_zeros', use_bias=False, activation=None)\n", (2285, 2393), True, 'import tensorflow_compression as tfc\n'), ((2476, 2504), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""reshape"""'], {}), "('reshape')\n", (2493, 2504), True, 'import tensorflow as tf\n'), ((2528, 2553), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['tensor'], {}), '(tensor)\n', (2545, 2553), True, 'import tensorflow as tf\n'), ((4676, 4704), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""reshape"""'], {}), "('reshape')\n", (4693, 4704), True, 'import tensorflow as tf\n'), ((4755, 4798), 'tensorflow.reshape', 'tf.reshape', (['tensor', '[-1, 4, 4, num_filters]'], {}), '(tensor, [-1, 4, 4, num_filters])\n', (4765, 4798), True, 'import tensorflow as tf\n'), ((4815, 4843), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""layer_0"""'], {}), "('layer_0')\n", (4832, 4843), True, 'import tensorflow as tf\n'), ((5088, 5116), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""layer_1"""'], {}), "('layer_1')\n", (5105, 5116), True, 'import tensorflow as tf\n'), ((5361, 5389), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""layer_2"""'], {}), "('layer_2')\n", (5378, 5389), True, 'import tensorflow as tf\n'), ((5639, 5667), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""layer_3"""'], {}), "('layer_3')\n", (5656, 5667), True, 'import tensorflow as tf\n'), ((5690, 5801), 'tensorflow_compression.SignalConv2D', 'tfc.SignalConv2D', (['(3)', '(9, 9)'], {'corr': '(False)', 'strides_up': '(2)', 'padding': '"""same_zeros"""', 'use_bias': '(True)', 'activation': 'None'}), "(3, (9, 9), corr=False, strides_up=2, padding='same_zeros',\n use_bias=True, activation=None)\n", (5706, 5801), True, 'import tensorflow_compression as tfc\n'), ((7636, 7668), 'tensorflow.squared_difference', 'tf.squared_difference', (['X', 'X_pred'], {}), '(X, X_pred)\n', (7657, 7668), True, 'import tensorflow as tf\n'), ((8285, 8300), 'tensorflow.square', 'tf.square', (['mean'], {}), '(mean)\n', (8294, 8300), True, 'import tensorflow as tf\n'), ((10048, 10099), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {}), '(tf.GraphKeys.TRAINABLE_VARIABLES)\n', (10065, 10099), True, 'import tensorflow as tf\n'), ((15440, 15496), 'tensorflow.random_crop', 'tf.random_crop', (['x', '(args.patch_size, args.patch_size, 3)'], {}), '(x, (args.patch_size, args.patch_size, 3))\n', (15454, 15496), True, 'import tensorflow as tf\n'), ((16378, 16404), 'math.sqrt', 'math.sqrt', (['args.batch_size'], {}), '(args.batch_size)\n', (16387, 16404), False, 'import math\n'), ((17639, 17657), 'numpy.std', 'np.std', (['zs'], {'axis': '(0)'}), '(zs, axis=0)\n', (17645, 17657), True, 'import numpy as np\n'), ((17687, 17706), 'numpy.mean', 'np.mean', (['zs'], {'axis': '(0)'}), '(zs, axis=0)\n', (17694, 17706), True, 'import numpy as np\n'), ((17791, 17804), 'numpy.sort', 'np.sort', (['stds'], {}), '(stds)\n', (17798, 17804), True, 'import numpy as np\n'), ((17832, 17848), 'numpy.argsort', 'np.argsort', (['stds'], {}), '(stds)\n', (17842, 17848), True, 'import numpy as np\n'), ((19223, 19246), 'numpy.arccos', 'np.arccos', (['cos_alpha[0]'], {}), '(cos_alpha[0])\n', (19232, 19246), True, 'import numpy as np\n'), ((25886, 25928), 'numpy.ones', 'np.ones', (['(sample_num * sample_num, args.z)'], {}), '((sample_num * sample_num, args.z))\n', (25893, 25928), True, 'import numpy as np\n'), ((25999, 26071), 'numpy.zeros', 'np.zeros', (['[sample_num * sample_num, args.patch_size, args.patch_size, 3]'], {}), '([sample_num * sample_num, args.patch_size, args.patch_size, 3])\n', (26007, 26071), True, 'import numpy as np\n'), ((27905, 27961), 'numpy.reshape', 'np.reshape', (['(v_hat - x_hat)', '[sample_num * sample_num, -1]'], {}), '(v_hat - x_hat, [sample_num * sample_num, -1])\n', (27915, 27961), True, 'import numpy as np\n'), ((27983, 28039), 'numpy.reshape', 'np.reshape', (['(w_hat - x_hat)', '[sample_num * sample_num, -1]'], {}), '(w_hat - x_hat, [sample_num * sample_num, -1])\n', (27993, 28039), True, 'import numpy as np\n'), ((28062, 28117), 'numpy.reshape', 'np.reshape', (['(gv_e - x_hat)', '[sample_num * sample_num, -1]'], {}), '(gv_e - x_hat, [sample_num * sample_num, -1])\n', (28072, 28117), True, 'import numpy as np\n'), ((28144, 28205), 'numpy.reshape', 'np.reshape', (['(gv_e_sigma - x_hat)', '[sample_num * sample_num, -1]'], {}), '(gv_e_sigma - x_hat, [sample_num * sample_num, -1])\n', (28154, 28205), True, 'import numpy as np\n'), ((28229, 28252), 'numpy.sum', 'np.sum', (['(dv * dw)'], {'axis': '(1)'}), '(dv * dw, axis=1)\n', (28235, 28252), True, 'import numpy as np\n'), ((28275, 28299), 'numpy.append', 'np.append', (['z_mtrs', 'z_mtr'], {}), '(z_mtrs, z_mtr)\n', (28284, 28299), True, 'import numpy as np\n'), ((28357, 28386), 'numpy.sum', 'np.sum', (['(v_hat * w_hat)'], {'axis': '(1)'}), '(v_hat * w_hat, axis=1)\n', (28363, 28386), True, 'import numpy as np\n'), ((28409, 28433), 'numpy.append', 'np.append', (['x_mtrs', 'x_mtr'], {}), '(x_mtrs, x_mtr)\n', (28418, 28433), True, 'import numpy as np\n'), ((28655, 28679), 'numpy.append', 'np.append', (['dists', 'dist_s'], {}), '(dists, dist_s)\n', (28664, 28679), True, 'import numpy as np\n'), ((2614, 2642), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encoder"""'], {}), "('encoder')\n", (2631, 2642), True, 'import tensorflow as tf\n'), ((2746, 2777), 'tensorflow.layers.dense', 'tf.layers.dense', (['tensor', 'args.z'], {}), '(tensor, args.z)\n', (2761, 2777), True, 'import tensorflow as tf\n'), ((3926, 3954), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder"""'], {}), "('decoder')\n", (3943, 3954), True, 'import tensorflow as tf\n'), ((4058, 4102), 'tensorflow.layers.dense', 'tf.layers.dense', (['tensor', '(4 * 4 * num_filters)'], {}), '(tensor, 4 * 4 * num_filters)\n', (4073, 4102), True, 'import tensorflow as tf\n'), ((7732, 7833), 'ssim_matrix.ssim', 'ssim_matrix.ssim', (['(X * 255)', '((X - X_pred) * 255)', 'X_pred'], {'max_val': '(255)', 'mode': '"""train"""', 'compensation': '(1)'}), "(X * 255, (X - X_pred) * 255, X_pred, max_val=255, mode=\n 'train', compensation=1)\n", (7748, 7833), False, 'import ssim_matrix\n'), ((8019, 8057), 'tensorflow.squared_difference', 'tf.squared_difference', (['X_pred', 'X_pred2'], {}), '(X_pred, X_pred2)\n', (8040, 8057), True, 'import tensorflow as tf\n'), ((9565, 9587), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (9585, 9587), True, 'import tensorflow as tf\n'), ((11769, 11828), 'numpy.zeros', 'np.zeros', (['[num * args.patch_size, num * args.patch_size, 3]'], {}), '([num * args.patch_size, num * args.patch_size, 3])\n', (11777, 11828), True, 'import numpy as np\n'), ((11856, 11915), 'numpy.zeros', 'np.zeros', (['[num * args.patch_size, num * args.patch_size, 3]'], {}), '([num * args.patch_size, num * args.patch_size, 3])\n', (11864, 11915), True, 'import numpy as np\n'), ((12399, 12484), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', "('rec_%s_%s.png' % (parameter, train_count + 1))"], {}), "(args.checkpoint_dir, 'rec_%s_%s.png' % (parameter, train_count +\n 1))\n", (12411, 12484), False, 'import os, sys, glob, scipy.misc\n'), ((12562, 12585), 'metric.Psnr', 'Psnr', (['ori_img', 'show_img'], {}), '(ori_img, show_img)\n', (12566, 12585), False, 'from metric import Psnr, msssim\n'), ((12613, 12638), 'metric.msssim', 'msssim', (['ori_img', 'show_img'], {}), '(ori_img, show_img)\n', (12619, 12638), False, 'from metric import Psnr, msssim\n'), ((16775, 16791), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (16789, 16791), True, 'import tensorflow as tf\n'), ((18889, 18910), 'numpy.random.rand', 'np.random.rand', (['n_dim'], {}), '(n_dim)\n', (18903, 18910), True, 'import numpy as np\n'), ((19163, 19180), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (19177, 19180), True, 'import numpy as np\n'), ((19556, 19573), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (19570, 19573), True, 'import numpy as np\n'), ((19731, 19741), 'numpy.cos', 'np.cos', (['gm'], {}), '(gm)\n', (19737, 19741), True, 'import numpy as np\n'), ((24563, 24579), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (24577, 24579), True, 'import tensorflow as tf\n'), ((28511, 28538), 'numpy.sum', 'np.sum', (['(d_ep * d_ep)'], {'axis': '(1)'}), '(d_ep * d_ep, axis=1)\n', (28517, 28538), True, 'import numpy as np\n'), ((28591, 28630), 'numpy.sum', 'np.sum', (['(d_ep_sigma * d_ep_sigma)'], {'axis': '(1)'}), '(d_ep_sigma * d_ep_sigma, axis=1)\n', (28597, 28630), True, 'import numpy as np\n'), ((1631, 1640), 'tensorflow_compression.GDN', 'tfc.GDN', ([], {}), '()\n', (1638, 1640), True, 'import tensorflow_compression as tfc\n'), ((1893, 1902), 'tensorflow_compression.GDN', 'tfc.GDN', ([], {}), '()\n', (1900, 1902), True, 'import tensorflow_compression as tfc\n'), ((2155, 2164), 'tensorflow_compression.GDN', 'tfc.GDN', ([], {}), '()\n', (2162, 2164), True, 'import tensorflow_compression as tfc\n'), ((2684, 2718), 'tensorflow.layers.dense', 'tf.layers.dense', (['tensor', 'args.dim1'], {}), '(tensor, args.dim1)\n', (2699, 2718), True, 'import tensorflow as tf\n'), ((2841, 2869), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encoder"""'], {}), "('encoder')\n", (2858, 2869), True, 'import tensorflow as tf\n'), ((3001, 3032), 'tensorflow.layers.dense', 'tf.layers.dense', (['tensor', 'args.z'], {}), '(tensor, args.z)\n', (3016, 3032), True, 'import tensorflow as tf\n'), ((3092, 3123), 'tensorflow.layers.dense', 'tf.layers.dense', (['tensor', 'args.z'], {}), '(tensor, args.z)\n', (3107, 3123), True, 'import tensorflow as tf\n'), ((3996, 4030), 'tensorflow.layers.dense', 'tf.layers.dense', (['tensor', 'args.dim1'], {}), '(tensor, args.dim1)\n', (4011, 4030), True, 'import tensorflow as tf\n'), ((4166, 4194), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder"""'], {}), "('decoder')\n", (4183, 4194), True, 'import tensorflow as tf\n'), ((5013, 5034), 'tensorflow_compression.GDN', 'tfc.GDN', ([], {'inverse': '(True)'}), '(inverse=True)\n', (5020, 5034), True, 'import tensorflow_compression as tfc\n'), ((5286, 5307), 'tensorflow_compression.GDN', 'tfc.GDN', ([], {'inverse': '(True)'}), '(inverse=True)\n', (5293, 5307), True, 'import tensorflow_compression as tfc\n'), ((5564, 5585), 'tensorflow_compression.GDN', 'tfc.GDN', ([], {'inverse': '(True)'}), '(inverse=True)\n', (5571, 5585), True, 'import tensorflow_compression as tfc\n'), ((8129, 8242), 'ssim_matrix.ssim', 'ssim_matrix.ssim', (['(X_pred * 255)', '((X_pred - X_pred2) * 255)', 'X_pred2'], {'max_val': '(255)', 'mode': '"""train"""', 'compensation': '(1)'}), "(X_pred * 255, (X_pred - X_pred2) * 255, X_pred2, max_val=\n 255, mode='train', compensation=1)\n", (8145, 8242), False, 'import ssim_matrix\n'), ((11712, 11739), 'math.sqrt', 'math.sqrt', (['rec_img.shape[0]'], {}), '(rec_img.shape[0])\n', (11721, 11739), False, 'import math\n'), ((17323, 17341), 'numpy.vstack', 'np.vstack', (['(zs, z)'], {}), '((zs, z))\n', (17332, 17341), True, 'import numpy as np\n'), ((17372, 17398), 'numpy.vstack', 'np.vstack', (['(means_, mean_)'], {}), '((means_, mean_))\n', (17381, 17398), True, 'import numpy as np\n'), ((19714, 19724), 'numpy.sin', 'np.sin', (['gm'], {}), '(gm)\n', (19720, 19724), True, 'import numpy as np\n'), ((26943, 26962), 'numpy.exp', 'np.exp', (['(var_op_ / 2)'], {}), '(var_op_ / 2)\n', (26949, 26962), True, 'import numpy as np\n'), ((29270, 29297), 'numpy.corrcoef', 'np.corrcoef', (['z_mtrs', 'x_mtrs'], {}), '(z_mtrs, x_mtrs)\n', (29281, 29297), True, 'import numpy as np\n'), ((30018, 30044), 'numpy.corrcoef', 'np.corrcoef', (['z_mtrs', 'dists'], {}), '(z_mtrs, dists)\n', (30029, 30044), True, 'import numpy as np\n'), ((2912, 2946), 'tensorflow.layers.dense', 'tf.layers.dense', (['tensor', 'args.dim1'], {}), '(tensor, args.dim1)\n', (2927, 2946), True, 'import tensorflow as tf\n'), ((3263, 3277), 'tensorflow.shape', 'tf.shape', (['mean'], {}), '(mean)\n', (3271, 3277), True, 'import tensorflow as tf\n'), ((3588, 3616), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encoder"""'], {}), "('encoder')\n", (3605, 3616), True, 'import tensorflow as tf\n'), ((3644, 3675), 'tensorflow.layers.dense', 'tf.layers.dense', (['tensor', 'args.z'], {}), '(tensor, args.z)\n', (3659, 3675), True, 'import tensorflow as tf\n'), ((4237, 4271), 'tensorflow.layers.dense', 'tf.layers.dense', (['tensor', 'args.dim1'], {}), '(tensor, args.dim1)\n', (4252, 4271), True, 'import tensorflow as tf\n'), ((4457, 4501), 'tensorflow.layers.dense', 'tf.layers.dense', (['tensor', '(4 * 4 * num_filters)'], {}), '(tensor, 4 * 4 * num_filters)\n', (4472, 4501), True, 'import tensorflow as tf\n'), ((4561, 4589), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder"""'], {}), "('decoder')\n", (4578, 4589), True, 'import tensorflow as tf\n'), ((4617, 4661), 'tensorflow.layers.dense', 'tf.layers.dense', (['tensor', '(4 * 4 * num_filters)'], {}), '(tensor, 4 * 4 * num_filters)\n', (4632, 4661), True, 'import tensorflow as tf\n'), ((19391, 19409), 'numpy.prod', 'np.prod', (['sin_alpha'], {}), '(sin_alpha)\n', (19398, 19409), True, 'import numpy as np\n'), ((19765, 19775), 'numpy.cos', 'np.cos', (['gm'], {}), '(gm)\n', (19771, 19775), True, 'import numpy as np\n'), ((19793, 19803), 'numpy.sin', 'np.sin', (['gm'], {}), '(gm)\n', (19799, 19803), True, 'import numpy as np\n'), ((19827, 19837), 'numpy.sin', 'np.sin', (['gm'], {}), '(gm)\n', (19833, 19837), True, 'import numpy as np\n'), ((19855, 19865), 'numpy.cos', 'np.cos', (['gm'], {}), '(gm)\n', (19861, 19865), True, 'import numpy as np\n'), ((3446, 3463), 'tensorflow.exp', 'tf.exp', (['(sigma / 2)'], {}), '(sigma / 2)\n', (3452, 3463), True, 'import tensorflow as tf\n'), ((4358, 4402), 'tensorflow.layers.dense', 'tf.layers.dense', (['tensor', '(4 * 4 * num_filters)'], {}), '(tensor, 4 * 4 * num_filters)\n', (4373, 4402), True, 'import tensorflow as tf\n'), ((17251, 17269), 'numpy.exp', 'np.exp', (['(var_ * 0.5)'], {}), '(var_ * 0.5)\n', (17257, 17269), True, 'import numpy as np\n'), ((19465, 19488), 'numpy.prod', 'np.prod', (['sin_alpha[0:i]'], {}), '(sin_alpha[0:i])\n', (19472, 19488), True, 'import numpy as np\n'), ((12956, 12977), 'numpy.log10', 'np.log10', (['(1 - msssim_)'], {}), '(1 - msssim_)\n', (12964, 12977), True, 'import numpy as np\n'), ((13274, 13295), 'numpy.log10', 'np.log10', (['(1 - msssim_)'], {}), '(1 - msssim_)\n', (13282, 13295), True, 'import numpy as np\n'), ((17459, 17477), 'numpy.exp', 'np.exp', (['(var_ * 0.5)'], {}), '(var_ * 0.5)\n', (17465, 17477), True, 'import numpy as np\n')] |
import collections
import datetime
import itertools
import numpy as np
import pandas as pd
import pytz
from .datum import Tag, Field
from .exceptions import MissingFieldError, MissingTagError
def is_tag(args):
return isinstance(args[1], Tag)
def is_field(args):
return isinstance(args[1], Field)
class MeasurementMeta(type):
def __new__(mcs, name, bases, attrs):
tags = {}
for key, tag in filter(is_tag, attrs.items()):
if tag.db_name is None:
tag.db_name = key
tags[key] = tag
fields = {}
for key, field in filter(is_field, attrs.items()):
if field.db_name is None:
field.db_name = key
fields[key] = field
for key in itertools.chain(tags.keys(), fields.keys()):
del attrs[key]
# Make the fields and tags aware of their attribute names (these will be
# utilized as field and tag names within the database)
# for attname, datum in {**tags, **fields}.items():
# datum.name = attname
new_class = type.__new__(mcs, name, bases, attrs)
# Build a mapping of field and tag attribute names
new_class._register_tags(tags)
new_class._register_fields(fields)
# Bind tags and fields as properties on instances
for attname in new_class.tags_and_fields:
setattr(new_class, attname, property(
new_class.getter_factory(attname),
new_class.setter_factory(attname)
))
return new_class
def _register_tags(cls, tags):
try:
base_tags = cls.tags_by_attname
except AttributeError:
base_tags = {}
cls.tags_by_attname = collections.OrderedDict([
(key, base_tags.get(key, tags.get(key)))
for key in sorted(itertools.chain(base_tags.keys(), tags.keys()))
])
def _register_fields(cls, fields):
try:
base_fields = cls.fields_by_attname
except AttributeError:
base_fields = {}
cls.fields_by_attname = collections.OrderedDict([
(key, base_fields.get(key, fields.get(key)))
for key in sorted(
itertools.chain(base_fields.keys(), fields.keys())
)
])
@staticmethod
def getter_factory(name):
def getter(instance):
return instance._get_column(name)
return getter
@staticmethod
def setter_factory(name):
def setter(instance, value):
instance._set_column(name, value)
return setter
@property
def tags_and_fields(cls):
if not hasattr(cls, "__tags_and_fields"):
cls.__tags_and_fields = collections.OrderedDict([
(
key,
cls.fields_by_attname.get(
key, cls.tags_by_attname.get(key)
)
)
for key in sorted(
itertools.chain(
cls.fields_by_attname.keys(),
cls.tags_by_attname.keys()
)
)
])
return cls.__tags_and_fields
class Measurement(metaclass=MeasurementMeta):
"""
Wrapper around a `pandas.DataFrame`, which provides an application layer
representation of time-series data stored in an influxdb instance.
Provides utilities to serialize/deserialize the dataframe into payloads
which can be sent/retrieved from an influxdb instance
"""
@classmethod
def from_json(cls, content):
"""
Deserializes a JSON response from an influxDB client, into an
instance of this class
:param content: JSON string received from an influxdb client
:return: An instance of this class
"""
series = []
if "results" in content:
for s in [result["series"] for result in content["results"] if "series" in result]:
series.extend(s)
elif "series" in content:
series = [s for s in content["series"]]
elif "name" in content:
series = [content]
for s in series:
if s.get("name", None) == cls.__name__:
column_names = ["time"]
for column_name in s["columns"]:
if column_name == "time":
continue
for name, datum in cls.tags_and_fields.items():
if datum.db_name == column_name:
column_names.append(name)
break
else:
raise ValueError("Unrecognized column name {}".format(column_name))
df = pd.DataFrame.from_records(
s["values"],
columns=column_names
)
return cls(**{
column: df[column] for column in column_names
})
raise ValueError("Invalid JSON")
def __init__(self, time=None, **kwargs):
items = [
(name, kwargs.get(name, None))
for name in self.tags
] + [
(name, kwargs.get(name, None))
for name in self.fields
] + [
('time', np.array(time, dtype='datetime64[ns]') if time is not None else None)
]
self._data_frame = pd.DataFrame.from_items(items)
def __len__(self):
return len(self.data_frame)
@property
def tags(self):
return self.__class__.tags_by_attname
@property
def fields(self):
return self.__class__.fields_by_attname
@property
def data_frame(self):
"""
Returns the underlying pandas dataframe wrapped by this instance
:return: A `pandas.DataFrame` instance
"""
return self._data_frame
@property
def rec_array(self):
return self._data_frame.to_records(index=False)
@property
def time(self):
return self._get_column("time")
@time.setter
def time(self, time):
if time is not None:
self._set_column("time", np.array(time, dtype='datetime64[ns]'))
else:
self._set_column("time", None)
def _get_column(self, name):
return self.data_frame[name].values
def _set_column(self, name, value):
self.data_frame[name] = value
# Serializing
def to_line_protocol(self):
"""
Serializes the underlying dataframe into the InfluxDB line protocol
:return: A string
"""
# Create the measurement+tags prototype
tags = []
tags_prototype = []
for attname, tag in self.tags.items():
if tag.required:
if self.data_frame[attname].isnull().values.any():
raise MissingTagError(
"Required tag \"{}\" not provided".format(attname)
)
tags.append(tag)
tags_prototype.append("{tag_name}=%s".format(
tag_name=tag.db_name
))
# Create the fields prototype
fields = []
fields_prototype = []
for attname, field in self.fields.items():
# First, do a check for missing required fields
if field.required:
if self.data_frame[attname].isnull().values.any():
raise MissingFieldError(
"Required field \"{}\" not provided".format(attname)
)
fields.append(field)
fields_prototype.append("{field_name}=%s".format(
field_name=field.db_name
))
# Generate the line protocol string from the above prototypes
num_tags = len(tags)
return "\n".join([
" ".join([
','.join([self.__class__.__name__] + [
prototype % tag.format(item)
for tag, prototype, item in zip(
tags,
tags_prototype,
row[0:num_tags]
)
if item is not None
])
] + [
",".join([
prototype % field.format(item)
for field, prototype, item in zip(
fields,
fields_prototype,
row[num_tags:]
)
if item is not None
])
] + [
str(row.time.value) if row.time else ""
]) for row in self.data_frame.itertuples(index=False)
])
# Querying
COMPARATORS = dict(
lt='<',
lte="<=",
eq="=",
neq="<>",
gte=">=",
gt=">"
)
@staticmethod
def _format_condition_value(value):
if isinstance(value, str):
return "'{}'".format(value)
elif value is None:
return "null"
elif isinstance(value, datetime.datetime):
return value.astimezone(pytz.UTC).strftime(
"'%Y-%m-%dT%H:%M:%S.%fZ'"
)
else:
return str(value)
@classmethod
def _format_condition(cls, argument, value):
try:
name, compare_type = argument.split("__")
except ValueError:
name, compare_type = argument, "eq"
try:
return " ".join([
name,
cls.COMPARATORS[compare_type],
cls._format_condition_value(value)
])
except KeyError as e:
raise ValueError("Unrecognized comparison operator {}".format(e))
@classmethod
def make_query_string(cls, *, limit=None, offset=None, database=None,
retention_policy=None, **conditions):
if database and retention_policy:
measurement_name = "{database}.{retention_policy}.{measurement}".format(
database=database,
retention_policy=retention_policy,
measurement=cls.__name__
)
else:
measurement_name = cls.__name__
query_string = "SELECT {parameters} FROM {measurement_name}".format(
parameters=",".join(datum.db_name for datum in cls.tags_and_fields.values()),
measurement_name=measurement_name
)
if conditions:
query_string += " WHERE {conditions}".format(
conditions=" AND ".join(
cls._format_condition(argument, value)
for argument, value in conditions.items()
)
)
if limit is not None:
query_string += " LIMIT {}".format(int(limit))
if offset is not None:
query_string += " OFFSET {}".format(int(offset))
return query_string
| [
"pandas.DataFrame.from_items",
"numpy.array",
"pandas.DataFrame.from_records"
] | [((5459, 5489), 'pandas.DataFrame.from_items', 'pd.DataFrame.from_items', (['items'], {}), '(items)\n', (5482, 5489), True, 'import pandas as pd\n'), ((4805, 4865), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (["s['values']"], {'columns': 'column_names'}), "(s['values'], columns=column_names)\n", (4830, 4865), True, 'import pandas as pd\n'), ((6215, 6253), 'numpy.array', 'np.array', (['time'], {'dtype': '"""datetime64[ns]"""'}), "(time, dtype='datetime64[ns]')\n", (6223, 6253), True, 'import numpy as np\n'), ((5352, 5390), 'numpy.array', 'np.array', (['time'], {'dtype': '"""datetime64[ns]"""'}), "(time, dtype='datetime64[ns]')\n", (5360, 5390), True, 'import numpy as np\n')] |
import os
import argparse
import logging
import nmrex
import numpy as np
import copy
def transform(rcsb):
structure = nmrex.entry.structure()
# save separate models
for model in structure.child_list:
new = copy.deepcopy(structure)
new.child_list = [model]
nmrex.entry.save_structure(new, 'model{}'.format(model.get_id()))
# save mean and median atom coordinates
coord = np.array([
np.array([atom.coord for atom in model.get_atoms()])
for model in structure.child_list
])
n_models, n_atoms, _ = coord.shape
mean = coord.mean(axis=0)
median = np.median(coord, 0)
new = copy.deepcopy(structure)
new.child_list = [new.child_list[0]]
it = new.child_list[0].get_atoms()
for i in range(n_atoms):
atom = next(it)
atom.coord = mean[i]
nmrex.entry.save_structure(new, 'mean')
new = copy.deepcopy(structure)
new.child_list = [new.child_list[0]]
it = new.child_list[0].get_atoms()
for i in range(n_atoms):
atom = next(it)
atom.coord = median[i]
nmrex.entry.save_structure(new, 'median')
def main():
logging.basicConfig(level=logging.INFO,
format='%(levelname)s - %(message)s')
parser = argparse.ArgumentParser()
parser.add_argument('db', metavar='PATH', type=str,
help='path to data base')
parser.add_argument('-p', metavar='N', type=int, dest='proc', default=1,
help='number of processes')
args = parser.parse_args()
nmrex.db.apply(transform, os.path.expanduser(args.db), proc=args.proc)
if __name__ == '__main__':
main() | [
"copy.deepcopy",
"argparse.ArgumentParser",
"logging.basicConfig",
"nmrex.entry.save_structure",
"numpy.median",
"nmrex.entry.structure",
"os.path.expanduser"
] | [((124, 147), 'nmrex.entry.structure', 'nmrex.entry.structure', ([], {}), '()\n', (145, 147), False, 'import nmrex\n'), ((630, 649), 'numpy.median', 'np.median', (['coord', '(0)'], {}), '(coord, 0)\n', (639, 649), True, 'import numpy as np\n'), ((661, 685), 'copy.deepcopy', 'copy.deepcopy', (['structure'], {}), '(structure)\n', (674, 685), False, 'import copy\n'), ((907, 931), 'copy.deepcopy', 'copy.deepcopy', (['structure'], {}), '(structure)\n', (920, 931), False, 'import copy\n'), ((1164, 1241), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(levelname)s - %(message)s"""'}), "(level=logging.INFO, format='%(levelname)s - %(message)s')\n", (1183, 1241), False, 'import logging\n'), ((1280, 1305), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1303, 1305), False, 'import argparse\n'), ((229, 253), 'copy.deepcopy', 'copy.deepcopy', (['structure'], {}), '(structure)\n', (242, 253), False, 'import copy\n'), ((856, 895), 'nmrex.entry.save_structure', 'nmrex.entry.save_structure', (['new', '"""mean"""'], {}), "(new, 'mean')\n", (882, 895), False, 'import nmrex\n'), ((1104, 1145), 'nmrex.entry.save_structure', 'nmrex.entry.save_structure', (['new', '"""median"""'], {}), "(new, 'median')\n", (1130, 1145), False, 'import nmrex\n'), ((1606, 1633), 'os.path.expanduser', 'os.path.expanduser', (['args.db'], {}), '(args.db)\n', (1624, 1633), False, 'import os\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pickle
import json
from pylab import plot, show, savefig, xlim, figure, ylim, legend, boxplot, setp, axes
from i3Deep import utils
import pandas as pd
import os
import seaborn as sns
import matplotlib
def plot_dice_box_plots():
sns.set_theme(style="whitegrid")
style = sns.axes_style()
fig = plt.figure(constrained_layout=True, figsize=(12*1.5, 2.4*1.5))
gs = fig.add_gridspec(1, width)
comparison = {"tta": [], "mcdo": [], "ensemble": []}
for i, task in enumerate(tasks):
style["axes.facecolor"] = colors[i]
sns.set_theme(style=style)
df_uncertainty_methods = []
for k, uncertainty_method in enumerate(uncertainty_methods):
filename = base_path + task + "/refinement_" + set + "/eval_results/processed/dice/my_method_" + uncertainty_method + ".csv"
if not os.path.isfile(filename):
continue
df = pd.read_csv(filename)
df = df.iloc[:, 1:]
for j, column in enumerate(df):
scores = df[column].to_numpy()
scores = scores[~np.isnan(scores)]
mean = np.mean(scores)
std = np.std(scores)
print("Task: {}, Class: {}, Method: {}, Mean: {}, Std: {}".format(task, task_classes[i][j], uncertainty_method.replace('\n', ' '), round(mean, 3), round(std, 3)))
comparison[uncertainty_method].append(mean)
df = df.stack().reset_index()
df = df.iloc[:, 1:]
df = df.assign(Predictor=uncertainty_names[k])
df = df.rename(columns={"level_1": "Class", 0: "Dice"})
df_uncertainty_methods.append(df)
df = pd.concat(df_uncertainty_methods)
for j, class_name in enumerate(task_classes[i]):
# df["Class"] = df["Class"].replace(j+1, class_name)
df["Class"].replace({str(j+1): class_name}, inplace=True)
print(df.head())
axs = fig.add_subplot(gs[gridspec_pos[i][0][0]:gridspec_pos[i][1][0], gridspec_pos[i][0][1]:gridspec_pos[i][1][1]])
axs = sns.boxplot(x="Class", y="Dice", hue="Predictor", data=df, ax=axs)
# axs.tick_params(axis='x', rotation=20)
axs.set_ylim(0.4, 1)
axs.set_title("{}".format(task_names[i]), fontsize=16)
plt.savefig(base_path + "Evaluation/Results/" + set + "/uncertainty_ablation/uncertainty_ablation.png", dpi=150, bbox_inches='tight')
plt.clf()
for key in comparison.keys():
mean = np.mean(comparison[key])
print("{}: {}".format(key, mean))
if __name__ == '__main__':
base_path = "/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/nnUNet_raw_data/"
tasks = ["Task002_BrainTumour_guided", "Task008_Pancreas_guided", "Task070_guided_all_public_ggo"]
task_names = ["Brain Tumor", "Pancreas", "COVID-19"]
task_classes = [["Edema", "Non-Enh. Tumor", "Enh. Tumor"], ["Pancreas", "Cancer"], ["GGO"]]
uncertainty_methods = ["tta", "mcdo", "ensemble"]
uncertainty_names = ["TTA", "MC Dropout", "Deep Ensembles"]
set = "val"
width = 6*3
print("width: ", width)
task_widths = [int(round(width*0.5)-1), int(round(width*0.666*0.5)), int(round(width*0.333*0.5))]
print("task_widths: ", task_widths)
gridspec_pos = [[[0, 0], [1, task_widths[0]-1]], [[0, task_widths[0]], [1, task_widths[0]+task_widths[1]-1]], [[0, task_widths[0]+task_widths[1]], [1, width]]]
print("gridspec_pos: ", gridspec_pos)
colors = ["#FFEEEE", "#EEEEFF", "#EEFFF0"]
plot_dice_box_plots() | [
"seaborn.set_theme",
"seaborn.axes_style",
"matplotlib.pyplot.clf",
"pandas.read_csv",
"numpy.std",
"numpy.isnan",
"matplotlib.pyplot.figure",
"seaborn.boxplot",
"numpy.mean",
"os.path.isfile",
"pandas.concat",
"matplotlib.pyplot.savefig"
] | [((292, 324), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""whitegrid"""'}), "(style='whitegrid')\n", (305, 324), True, 'import seaborn as sns\n'), ((337, 353), 'seaborn.axes_style', 'sns.axes_style', ([], {}), '()\n', (351, 353), True, 'import seaborn as sns\n'), ((364, 430), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'constrained_layout': '(True)', 'figsize': '(12 * 1.5, 2.4 * 1.5)'}), '(constrained_layout=True, figsize=(12 * 1.5, 2.4 * 1.5))\n', (374, 430), True, 'import matplotlib.pyplot as plt\n'), ((2337, 2479), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(base_path + 'Evaluation/Results/' + set +\n '/uncertainty_ablation/uncertainty_ablation.png')"], {'dpi': '(150)', 'bbox_inches': '"""tight"""'}), "(base_path + 'Evaluation/Results/' + set +\n '/uncertainty_ablation/uncertainty_ablation.png', dpi=150, bbox_inches=\n 'tight')\n", (2348, 2479), True, 'import matplotlib.pyplot as plt\n'), ((2475, 2484), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2482, 2484), True, 'import matplotlib.pyplot as plt\n'), ((609, 635), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': 'style'}), '(style=style)\n', (622, 635), True, 'import seaborn as sns\n'), ((1736, 1769), 'pandas.concat', 'pd.concat', (['df_uncertainty_methods'], {}), '(df_uncertainty_methods)\n', (1745, 1769), True, 'import pandas as pd\n'), ((2125, 2191), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""Class"""', 'y': '"""Dice"""', 'hue': '"""Predictor"""', 'data': 'df', 'ax': 'axs'}), "(x='Class', y='Dice', hue='Predictor', data=df, ax=axs)\n", (2136, 2191), True, 'import seaborn as sns\n'), ((2534, 2558), 'numpy.mean', 'np.mean', (['comparison[key]'], {}), '(comparison[key])\n', (2541, 2558), True, 'import numpy as np\n'), ((965, 986), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (976, 986), True, 'import pandas as pd\n'), ((897, 921), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (911, 921), False, 'import os\n'), ((1184, 1199), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (1191, 1199), True, 'import numpy as np\n'), ((1222, 1236), 'numpy.std', 'np.std', (['scores'], {}), '(scores)\n', (1228, 1236), True, 'import numpy as np\n'), ((1143, 1159), 'numpy.isnan', 'np.isnan', (['scores'], {}), '(scores)\n', (1151, 1159), True, 'import numpy as np\n')] |
"""
unemloyment_v1
Gym module implementing the Finnish social security including earnings-related components,
e.g., the unemployment benefit
korjauksia julkaistuun versioon:
- kansaeläkkeen yhteensovitus huomioitu väärin, joten kansaneläke huomioitiin liian pienenä, ei kuitenkaan vaikuttanut
takuueläkkeeseen, joten tulokset eivät välttämättä paljon muutu (tarkasta)
"""
import math
import gym
from gym import spaces, logger, utils, error
from gym.utils import seeding
import numpy as np
import fin_benefits
import random
class UnemploymentLargeEnv(gym.Env):
"""
Description:
The Finnish Unemployment Pension Scheme
Source:
This environment corresponds to the environment of the Finnish Social Security
Observation:
Type: Box(10)
Num Observation Min Max
0 Employment status 0 12
1 Ryhmä 0 6
2 time-in-state 0 2
3 Accrued old-age pension 0 inf
4 Paid old-age pension 0 inf
5 Salary 0 inf
6 Age 25 69
7 Työssä-olo-Ehto 0 28/12
8 Työuran kesto 0 50
9 Työstä pois (aika) 0 100
10 Irtisanottu (jos valittu) 0 1
Employment states:
Type: Int
Num State
0 Unemployed
1 Employed
2 Retired
3 Disabled
4 Työttömyysputki
5 Äitiysvapaa
6 Isyysvapaa
7 Kotihoidontuki
8 Vanhuuseläke+Osa-aikatyö
9 Vanhuuseläke+Kokoaikatyö
10 Osa-aikatyö
11 Työvoiman ulkopuolella, ei tukia
12 Opiskelija
13 Työmarkkinatuki
14 Armeijassa
15 Kuollut (jos kuolleisuus mukana)
Actions:
These really depend on the state (see function step)
Type: Discrete(4)
Num Action
0 Stay in the current state
1 Switch to the other state (work -> unemployed; unemployed -> work)
2 Retire if >=min_retirementage
3 Some other transition
Reward:
Reward is the sum of wage and benefit for every step taken, including the termination step
Starting State:
Starting state in unemployed at age 20
Step:
Each step corresponds to three months in time
Episode Termination:
Age 70
"""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 50
}
def __init__(self,**kwargs):
super().__init__()
self.ansiopvraha_toe=0.5 # = 6kk
self.karenssi_kesto=0.25 #0.25 # = 3kk
self.isyysvapaa_kesto=0.25 # = 3kk
self.aitiysvapaa_kesto=0.75 # = 9kk ml vanhempainvapaa
self.min_tyottputki_ika=61 # vuotta. Ikä, jonka täytyttyä pääsee putkeen
self.tyohistoria_tyottputki=5 # vuotta. vähimmäistyöura putkeenpääsylle
self.kht_kesto=2.0 # kotihoidontuen kesto 2 v
self.tyohistoria_vaatimus=3.0 # 3 vuotta
self.tyohistoria_vaatimus500=5.0 # 5 vuotta
self.ansiopvraha_kesto500=500
self.minage_500=58 # minimi-ikä 500 päivälle
self.ansiopvraha_kesto400=400
self.ansiopvraha_kesto300=300
self.min_salary=1000 # julkaistut laskelmat olettavat tämän
#self.min_salary=10000 # julkaistujen laskelmien jälkeen
self.timestep=0.25
self.max_age=71
self.min_age=20
self.min_retirementage=63.5 #65
self.max_retirementage=68 # 70
self.elinaikakerroin=0.925 # etk:n arvio 1962 syntyneille
reaalinen_palkkojenkasvu=1.016
self.include_mort=False # onko kuolleisuus mukana laskelmissa
self.include_preferencenoise=False # onko työllisyyspreferenssissä hajonta mukana
#self.include300=True # onko työuran kesto mukana laskelmissa
self.perustulo=False # onko Kelan perustulo laskelmissa
self.randomness=True # onko stokastiikka mukana
self.mortstop=True # pysäytä kuolleisuuden jälkeen
self.include_putki=True # työttömyysputki mukana
self.include_pinkslip=True # irtisanomiset mukana
self.use_sigma_reduction=True # kumpi palkkareduktio
gamma=0.92
# etuuksien laskentavuosi
self.year=2018
self.plotdebug=False # tulostetaanko rivi riviltä tiloja
if 'kwargs' in kwargs:
kwarg=kwargs['kwargs']
else:
kwarg={}
for key, value in kwarg.items():
if key=='step':
if value is not None:
self.timestep=value
elif key=='mortstop':
if value is not None:
self.mortstop=value
elif key=='gamma':
if value is not None:
gamma=value
elif key=='use_sigma_reduction':
if value is not None:
self.use_sigma_reduction=value
elif key=='min_age':
if value is not None:
self.min_age=value
elif key=='max_age':
if value is not None:
self.max_age=value
elif key=='min_retirementage':
if value is not None:
self.min_retirementage=value
elif key=='max_retirementage':
if value is not None:
self.max_retirementage=value
elif key=='mortality':
if value is not None:
self.include_mort=value
if key=='ansiopvraha_toe':
if value is not None:
self.ansiopvraha_toe=value
elif key=='ansiopvraha_kesto500':
if value is not None:
self.ansiopvraha_kesto500=value
elif key=='ansiopvraha_kesto400':
if value is not None:
self.ansiopvraha_kesto400=value
elif key=='ansiopvraha_kesto300':
if value is not None:
self.ansiopvraha_kesto300=value
elif key=='perustulo':
if value is not None:
self.perustulo=value
elif key=='randomness':
if value is not None:
self.randomness=value
elif key=='pinkslip':
if value is not None:
self.include_pinkslip=value
elif key=='karenssi_kesto':
if value is not None:
self.karenssi_kesto=value
elif key=='include_putki':
if value is not None:
self.include_putki=value
elif key=='include_preferencenoise':
if value is not None:
self.include_preferencenoise=value
elif key=='plotdebug':
if value is not None:
self.plotdebug=value
elif key=='year':
if value is not None:
self.year=value
# ei skaalata!
#self.ansiopvraha_kesto400=self.ansiopvraha_kesto400/(12*21.5)
#self.ansiopvraha_kesto300=self.ansiopvraha_kesto300/(12*21.5)
#self.plotdebug=True
print('plotdebug',self.plotdebug)
self.gamma=gamma**self.timestep # discounting
self.palkkakerroin=(0.8*1+0.2*1.0/reaalinen_palkkojenkasvu)**self.timestep
self.elakeindeksi=(0.2*1+0.8*1.0/reaalinen_palkkojenkasvu)**self.timestep
self.kelaindeksi=(1.0/reaalinen_palkkojenkasvu)**self.timestep
# paljonko työstä poissaolo vaikuttaa palkkaan
self.salary_const=0.05*self.timestep
self.salary_const_up=0.015*self.timestep # työssäolo palauttaa ansioita tämän verran vuodessa
self.salary_const_student=0.025*self.timestep # opiskelu nostaa leikkausta tämän verran vuodessa
# karttumaprosentit
self.acc=0.015*self.timestep
self.acc_over_52=0.019*self.timestep
self.acc_family=1.15*self.acc
self.acc_family_over_52=1.15*self.acc_over_52
self.acc_unemp=0.75*self.acc
self.acc_unemp_over_52=0.75*self.acc_over_52
# parametrejä
self.max_toe=28/12
self.accbasis_kht=719.0*12
self.accbasis_tmtuki=1413.75*12
self.n_age=self.max_age-self.min_age+1
# male low income, male mid, male high, female low, female mid, female high income
self.n_groups=6
# käytetäänkö exp/log-muunnosta tiloissa vai ei?
self.log_transform=False
self.eps=1e-20
self.salary=np.zeros(self.max_age+1)
# ryhmäkohtaisia muuttujia
#self.disability_intensity=self.get_disability_rate()*self.timestep # tn tulla työkyvyttömäksi
self.disability_intensity=self.get_eff_disab_rate()*self.timestep # tn tulla työkyvyttömäksi
if self.include_pinkslip:
self.pinkslip_intensity=np.zeros(6)
if True:
self.pinkslip_intensity[0:3]=0.07*self.timestep # todennäköisyys tulla irtisanotuksi vuodessa, miehet
self.pinkslip_intensity[3:6]=0.03*self.timestep # todennäköisyys tulla irtisanotuksi vuodessa, naiset
else:
self.pinkslip_intensity[0]=0.08*self.timestep # todennäköisyys tulla irtisanotuksi vuodessa, miehet
self.pinkslip_intensity[1]=0.05*self.timestep # todennäköisyys tulla irtisanotuksi vuodessa, miehet
self.pinkslip_intensity[2]=0.03*self.timestep # todennäköisyys tulla irtisanotuksi vuodessa, miehet
self.pinkslip_intensity[3]=0.05*self.timestep # todennäköisyys tulla irtisanotuksi vuodessa, naiset
self.pinkslip_intensity[4]=0.03*self.timestep # todennäköisyys tulla irtisanotuksi vuodessa, naiset
self.pinkslip_intensity[5]=0.02*self.timestep # todennäköisyys tulla irtisanotuksi vuodessa, naiset
else:
self.pinkslip_intensity=0 # .05*self.timestep # todennäköisyys tulla irtisanotuksi vuodessa, skaalaa!
self.birth_intensity=self.get_birth_rate()*self.timestep # todennäköisyys saada lapsi, skaalaa!
self.mort_intensity=self.get_mort_rate()*self.timestep # todennäköisyys , skaalaa!
self.student_inrate,self.student_outrate=self.get_student_rate()
self.student_inrate=self.student_inrate*self.timestep
self.student_outrate=self.student_outrate*self.timestep
self.army_outrate=self.get_army_rate()*self.timestep
self.outsider_inrate,self.outsider_outrate=self.get_outsider_rate()
self.outsider_inrate=self.outsider_inrate*self.timestep
self.outsider_outrate=self.outsider_outrate*self.timestep
self.npv,self.npv0=self.comp_npv()
self.set_state_limits()
if self.include_mort: # and not self.mortstop:
if self.include_mort and self.mortstop:
print('Mortality included, stopped')
else:
print('Mortality included, not stopped')
self.n_empl=16 # state of employment, 0,1,2,3,4
self.state_encode=self.state_encode_mort
else:
print('No mortality included')
self.n_empl=15 # state of employment, 0,1,2,3,4
self.state_encode=self.state_encode_nomort
self.n_actions=4 # valittavien toimenpiteiden määrä
self.action_space = spaces.Discrete(4)
self.observation_space = spaces.Box(self.low, self.high, dtype=np.float32)
print(self.use_sigma_reduction)
if self.use_sigma_reduction:
self.update_wage_reduction=self.update_wage_reduction_sigma
else:
self.update_wage_reduction=self.update_wage_reduction_baseline
#self.seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
self.log_utility=self.log_utility_randomness
if self.perustulo:
self.ben = fin_benefits.BasicIncomeBenefits(**kwargs)
else:
self.ben = fin_benefits.Benefits(**kwargs)
self.ben.set_year(self.year)
self.explain()
def get_n_states(self):
'''
Palauta parametrien arvoja
'''
return self.n_empl,self.n_actions
def get_lc_version(self):
'''
returns the version of life-cycle model's episodestate used
'''
return 1
def comp_npv(self):
'''
lasketaan montako timestep:iä (diskontattuna) max_age:n jälkeen henkilö on vanhuuseläkkeellä
hyvin yksinkertainen toteutus. Tulos on odotettu lukumäärä timestep:jä
npv <- diskontattu
npv0 <- ei ole diskontattu
'''
npv=np.zeros(self.n_groups)
npv0=np.zeros(self.n_groups)
for g in range(self.n_groups):
cpsum=1
cpsum0=1
for x in np.arange(100,self.max_age,-self.timestep):
intx=int(np.floor(x))
m=self.mort_intensity[intx,g]
cpsum=m*1+(1-m)*(1+self.gamma*cpsum)
cpsum0=m*1+(1-m)*(1+cpsum0)
npv[g]=cpsum
npv0[g]=cpsum0
if self.plotdebug:
print('npv: {}',format(npv))
return npv,npv0
def comp_benefits(self,wage,old_wage,pension,employment_state,time_in_state,ika=25,
irtisanottu=0,tyohistoria=0,retq=True):
'''
Kutsuu fin_benefits-modulia, jonka avulla lasketaan etuudet ja huomioidaan verotus
Laske etuuksien arvo, kun
wage on palkka
old_wage on vanha palkka
pension on maksettavan eläkkeen määrä
employment_state on töissä olo (0)/työttömyys (1)/eläkkeellä olo (2) jne.
time_in_state on kesto tilassa
ika on henkilön ikä
'''
p={}
p['perustulo']=self.perustulo
p['opiskelija']=0
p['toimeentulotuki_vahennys']=0
p['ika']=ika
p['lapsia']=0
p['lapsia_paivahoidossa']=0
p['aikuisia']=1
p['veromalli']=0
p['kuntaryhma']=3
p['lapsia_kotihoidontuella']=0
p['lapsia_alle_3v']=0
p['tyottomyyden_kesto']=1
p['puoliso_tyottomyyden_kesto']=10
p['isyysvapaalla']=0
p['aitiysvapaalla']=0
p['kotihoidontuella']=0
p['lapsia_alle_kouluikaisia']=0
p['tyoelake']=0
p['elakkeella']=0
p['sairauspaivarahalla']=0
if employment_state==1:
p['tyoton']=0 # voisi olla työtön siinä mielessä, että oikeutettu soviteltuun päivärahaan
p['t']=wage/12
p['vakiintunutpalkka']=wage/12
p['saa_ansiopaivarahaa']=0
elif employment_state==0: # työtön, ansiopäivärahalla
if ika<65:
#self.render()
p['tyoton']=1
p['t']=0
p['vakiintunutpalkka']=old_wage/12
if irtisanottu>0:
p['tyottomyyden_kesto']=12*21.5*time_in_state
else:
p['tyottomyyden_kesto']=12*21.5*time_in_state-self.karenssi_kesto # tämän voisi tehdä täsmällisemmin
if ((tyohistoria>=self.tyohistoria_vaatimus and p['tyottomyyden_kesto']<=self.ansiopvraha_kesto400) \
or (p['tyottomyyden_kesto']<=self.ansiopvraha_kesto300) \
or (ika>=self.minage_500 and tyohistoria>=self.tyohistoria_vaatimus500 and p['tyottomyyden_kesto']<=self.ansiopvraha_kesto500)) \
and (irtisanottu>0 or time_in_state>=self.karenssi_kesto): # karenssi, jos ei irtisanottu
p['saa_ansiopaivarahaa']=1
else:
p['saa_ansiopaivarahaa']=0
else:
p['tyoton']=0 # ei oikeutta työttömyysturvaan
p['t']=0
p['vakiintunutpalkka']=0
p['saa_ansiopaivarahaa']=0
elif employment_state==13: # työmarkkinatuki
if ika<65:
p['tyoton']=1
p['t']=0
p['vakiintunutpalkka']=0
p['tyottomyyden_kesto']=12*21.5*time_in_state
p['saa_ansiopaivarahaa']=0
else:
p['tyoton']=0 # ei oikeutta työttömyysturvaan
p['t']=0
p['vakiintunutpalkka']=0
p['saa_ansiopaivarahaa']=0
elif employment_state==3: # tk
p['tyoton']=0
p['saa_ansiopaivarahaa']=0
p['t']=0
p['vakiintunutpalkka']=0
p['elakkeella']=1
#p['elake']=pension
elif employment_state==4: # työttömyysputki
if ika<65:
p['tyoton']=1
p['t']=0
p['vakiintunutpalkka']=old_wage/12
p['saa_ansiopaivarahaa']=1
p['tyottomyyden_kesto']=12*21.5*time_in_state
else:
p['tyoton']=0 # ei oikeutta työttömyysturvaan
p['t']=0
p['vakiintunutpalkka']=0
p['saa_ansiopaivarahaa']=0
elif employment_state==5: # ansiosidonnainen vanhempainvapaa, äidit
p['aitiysvapaalla']=1
p['tyoton']=0
p['aitiysvapaa_kesto']=0
p['t']=0
p['vakiintunutpalkka']=old_wage/12
p['saa_ansiopaivarahaa']=1
elif employment_state==6: # ansiosidonnainen vanhempainvapaa, isät
p['isyysvapaalla']=1
p['tyoton']=0
p['t']=0
p['vakiintunutpalkka']=old_wage/12
p['saa_ansiopaivarahaa']=1
elif employment_state==7: # hoitovapaa
p['kotihoidontuella']=1
p['lapsia']=1
p['tyoton']=0
p['lapsia_alle_3v']=1
p['kotihoidontuki_kesto']=time_in_state
p['lapsia_kotihoidontuella']=p['lapsia']
p['t']=0
p['vakiintunutpalkka']=old_wage/12
p['saa_ansiopaivarahaa']=0
elif employment_state==2: # vanhuuseläke
if ika>self.min_retirementage:
p['tyoton']=0
p['saa_ansiopaivarahaa']=0
p['t']=0
p['vakiintunutpalkka']=0
p['elakkeella']=1
p['tyoelake']=pension/12
else:
p['tyoton']=0
p['saa_ansiopaivarahaa']=0
p['t']=0
p['vakiintunutpalkka']=0
p['elakkeella']=0
p['tyoelake']=0
elif employment_state==8: # ve+työ
p['tyoton']=0
p['saa_ansiopaivarahaa']=0
p['t']=wage/12
p['vakiintunutpalkka']=0
p['elakkeella']=1
p['tyoelake']=pension/12
elif employment_state==9: # ve+osatyö
p['tyoton']=0
p['saa_ansiopaivarahaa']=0
p['t']=wage/12
p['vakiintunutpalkka']=0
p['elakkeella']=1
p['tyoelake']=pension/12
elif employment_state==10: # osa-aikatyö
p['tyoton']=0
p['saa_ansiopaivarahaa']=0
p['t']=wage/12
p['vakiintunutpalkka']=0
elif employment_state==11: # työelämän ulkopuolella
p['tyoton']=0
p['toimeentulotuki_vahennys']=0 # oletetaan että ei kieltäytynyt työstä
p['saa_ansiopaivarahaa']=0
p['t']=0
p['vakiintunutpalkka']=0
elif employment_state==12: # opiskelija
p['tyoton']=0
p['opiskelija']=1
p['saa_ansiopaivarahaa']=0
p['t']=0
p['vakiintunutpalkka']=0
elif employment_state==14: # armeijassa, korjaa! ei tosin vaikuta tuloksiin.
p['tyoton']=0
p['opiskelija']=1
p['saa_ansiopaivarahaa']=0
p['t']=0
p['vakiintunutpalkka']=0
else:
print('Unknown employment_state ',employment_state)
# tarkastellaan yksinasuvia henkilöitä
if employment_state==12: # opiskelija
p['asumismenot_toimeentulo']=250
p['asumismenot_asumistuki']=250
else: # muu
p['asumismenot_toimeentulo']=500
p['asumismenot_asumistuki']=500
p['ansiopvrahan_suojaosa']=1
p['ansiopvraha_lapsikorotus']=1
p['puoliso_tulot']=0
p['puoliso_tyoton']=0
p['puoliso_vakiintunutpalkka']=0
p['puoliso_saa_ansiopaivarahaa']=0
netto,benefitq=self.ben.laske_tulot(p)
netto=netto*12
if retq:
return netto,benefitq
else:
return netto
def seed(self, seed=None):
'''
Open AI interfacen mukainen seed-funktio, joka alustaa satunnaisluvut
'''
self.np_random, seed = seeding.np_random(seed)
return [seed]
def env_seed(self, seed=None):
'''
Alustetaan numpy.random enviä varten
'''
np.random.seed(seed)
#return [seed]
def get_mort_rate(self,debug=False):
'''
Kuolleisuus-intensiteetit eri ryhmille
'''
mort=np.zeros((101,self.n_groups))
if debug:
dfactor=np.array([1.0,1.0,1.0,1.0,1.0,1.0])
else:
dfactor=np.array([1.3,1.0,0.8,1.15,1.0,0.85])
# tilastokeskuksen kuolleisuusdata 2017 sukupuolittain
mort[:,1]=np.array([2.12,0.32,0.17,0.07,0.07,0.10,0.00,0.09,0.03,0.13,0.03,0.07,0.10,0.10,0.10,0.23,0.50,0.52,0.42,0.87,0.79,0.66,0.71,0.69,0.98,0.80,0.77,1.07,0.97,0.76,0.83,1.03,0.98,1.20,1.03,0.76,1.22,1.29,1.10,1.26,1.37,1.43,1.71,2.32,2.22,1.89,2.05,2.15,2.71,2.96,3.52,3.54,4.30,4.34,5.09,4.75,6.17,5.88,6.67,8.00,9.20,10.52,10.30,12.26,12.74,13.22,15.03,17.24,18.14,17.78,20.35,25.57,23.53,26.50,28.57,31.87,34.65,40.88,42.43,52.28,59.26,62.92,68.86,72.70,94.04,99.88,113.11,128.52,147.96,161.89,175.99,199.39,212.52,248.32,260.47,284.01,319.98,349.28,301.37,370.17,370.17])/1000.0
mort[:,0]=dfactor[0]*mort[:,1]
mort[:,2]=dfactor[2]*mort[:,1]
mort[:,4]=np.array([1.89,0.30,0.11,0.03,0.14,0.03,0.16,0.07,0.13,0.03,0.00,0.07,0.07,0.07,0.18,0.14,0.07,0.31,0.31,0.30,0.33,0.26,0.18,0.33,0.56,0.17,0.32,0.29,0.35,0.24,0.55,0.35,0.23,0.39,0.48,0.38,0.35,0.80,0.42,0.65,0.50,0.68,0.80,1.12,0.99,0.88,1.13,1.01,1.07,1.68,1.79,2.16,1.87,2.32,2.67,2.69,2.88,2.86,3.73,4.19,3.66,4.97,5.20,5.52,6.05,7.17,7.48,7.32,8.88,10.33,10.72,12.77,12.13,13.30,16.18,18.30,17.50,24.63,26.53,29.88,32.65,38.88,46.95,51.30,60.00,64.73,79.35,90.94,105.11,118.46,141.44,155.07,163.11,198.45,207.92,237.21,254.75,311.31,299.59,356.64,356.64])/1000.0
mort[:,3]=dfactor[3]*mort[:,4]
mort[:,5]=dfactor[5]*mort[:,4]
return mort
def get_student_rate(self,debug=False):
'''
opiskelijoiden intensiteetit eri ryhmille
'''
inrate=np.zeros((101,self.n_groups))
miehet_in=np.array([0.15202 ,0.09165 ,0.08517 ,0.07565 ,0.05787 ,0.04162 ,0.03061 ,0.02336 ,0.01803 ,0.01439 ,0.03214 ,0.02674 ,0.02122 ,0.02005 ,0.01776 ,0.01610 ,0.01490 ,0.01433 ,0.01307 ,0.01175 ,0.01081 ,0.01069 ,0.00921 ,0.00832 ,0.00808 ,0.00783 ,0.00738 ,0.00727 ,0.00712 ,0.00621 ,0.00578 ,0.00540 ,0.00505 ,0.00411 ,0.00434 ,0.00392 ,0.00415 ,0.00362 ,0.00279 ,0.00232 ,0.00184 ,0.00196 ,0.00126 ,0.00239 ,0.00402 ,0.00587 ,0.00587 ,0.00754 ,0 ,0 ])
naiset_in=np.array([0.12538 ,0.09262 ,0.08467 ,0.06923 ,0.05144 ,0.03959 ,0.03101 ,0.02430 ,0.02103 ,0.01834 ,0.03984 ,0.03576 ,0.03300 ,0.03115 ,0.02934 ,0.02777 ,0.02454 ,0.02261 ,0.02127 ,0.01865 ,0.01711 ,0.01631 ,0.01496 ,0.01325 ,0.01251 ,0.01158 ,0.01148 ,0.01034 ,0.00935 ,0.00911 ,0.00848 ,0.00674 ,0.00636 ,0.00642 ,0.00605 ,0.00517 ,0.00501 ,0.00392 ,0.00330 ,0.00291 ,0.00202 ,0.00155 ,0.00118 ,0.00193 ,0.00376 ,0.00567 ,0.00779 ,0.00746 ,0 ,0 ])
inrate[20:70,0] =miehet_in
inrate[20:70,1] =miehet_in
inrate[20:70,2] =miehet_in
inrate[20:70,3] =naiset_in
inrate[20:70,4] =naiset_in
inrate[20:70,5] =naiset_in
outrate=np.zeros((101,self.n_groups))
miehet_ulos=np.array([0.20000,0.20000,0.27503,0.38096,0.43268,0.42941,0.41466,0.40854,0.38759,0.30057,0.66059,0.69549,0.55428,0.61274,0.58602,0.57329,0.53688,0.58737,0.59576,0.58190,0.50682,0.63749,0.59542,0.53201,0.53429,0.55827,0.51792,0.52038,0.63078,0.57287,0.57201,0.56673,0.69290,0.44986,0.60497,0.45890,0.64129,0.73762,0.68664,0.73908,0.47708,0.92437,0.27979,0.54998,0.60635,0.72281,0.45596,0.48120,0.41834,0.55567])
naiset_ulos=np.array([0.2,0.226044511,0.34859165,0.404346193,0.378947854,0.379027678,0.393658729,0.312799282,0.312126148,0.325150199,0.5946454,0.564144808,0.555376244,0.556615568,0.545757439,0.61520002,0.577306728,0.558805476,0.618014582,0.584596312,0.542579298,0.581755996,0.612559266,0.559683811,0.577041852,0.51024909,0.602288269,0.594473782,0.529303275,0.573062208,0.709297989,0.559692954,0.499632245,0.560546551,0.654820741,0.547514252,0.728319756,0.668454496,0.637200351,0.832907039,0.763936815,0.823014939,0.439925972,0.400593267,0.57729364,0.432838681,0.720728303,0.45569566,0.756655823,0.210470698])
outrate[20:70,0]=miehet_ulos
outrate[20:70,1]=miehet_ulos
outrate[20:70,2]=miehet_ulos
outrate[20:70,3]=naiset_ulos
outrate[20:70,4]=naiset_ulos
outrate[20:70,5]=naiset_ulos
return inrate,outrate
def get_outsider_rate_old(self,debug=False):
'''
sairauspäivärahalle jäävien osuudet
'''
inrate=np.zeros((101,self.n_groups))
max_spv=70
miehet_in=np.array([0.00598,0.00236,0.00195,0.00179,0.00222,0.00150,0.00363,0.00142,0.00138,0.00149,0.00561,0.00140,0.00291,0.00390,0.00130,0.00548,0.00120,0.00476,0.00118,0.00315,0.00111,0.00346,0.00117,0.00203,0.00105,0.00189,0.00154,0.00104,0.00488,0.00103,0.00273,0.00104,0.00375,0.00108,0.00314,0.00256,0.00188,0.00115,0.00115,0.00112,0.00112,0.00106,0.00112,0.00000,0.00000,0.00000,0.00257,0.00359,0,0 ])
naiset_in=np.array([0.00246,0.00210,0.00212,0.00211,0.00205,0.00217,0.00233,0.00355,0.00246,0.00247,0.00248,0.00239,0.00238,0.00225,0.00209,0.00194,0.00179,0.01151,0.00823,0.00802,0.00990,0.00515,0.00418,0.00644,0.00334,0.00101,0.00098,0.00256,0.00093,0.00092,0.00089,0.00172,0.00089,0.00248,0.00107,0.00170,0.00105,0.00143,0.00140,0.00233,0.00108,0.00104,0.00112,0.00000,0.00000,0.00000,0.00000,0.00000,0,0 ])
inrate[20:max_spv,0] =miehet_in
inrate[20:max_spv,1] =miehet_in
inrate[20:max_spv,2] =miehet_in
inrate[20:max_spv,3] =naiset_in
inrate[20:max_spv,4] =naiset_in
inrate[20:max_spv,5] =naiset_in
outrate=np.zeros((101,self.n_groups))
miehet_ulos=np.array([0.54528,0.21972,0.05225,0.08766,0.02000,0.07014,0.02000,0.07964,0.05357,0.02000,0.02000,0.12421,0.02000,0.02000,0.09464,0.02000,0.06655,0.02000,0.04816,0.02000,0.09763,0.02000,0.02000,0.02000,0.03777,0.02000,0.02000,0.10725,0.02000,0.05159,0.02000,0.04831,0.02000,0.08232,0.02000,0.02000,0.02000,0.02931,0.07298,0.05129,0.11783,0.07846,0.45489,0.58986,0.15937,0.43817,0.00000,0.00000,0.25798,0.00000])
naiset_ulos=np.array([0.47839484,0.190435122,0.12086902,0.081182033,0.030748876,0.184119897,0.075833908,0.02,0.029741112,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.032506855,0.026333043,0.02,0.023692146,0.050057587,0.037561449,0.02,0.024524018,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.092785925,0.054435714,0.439187202,0.465046705,0.39008036,0.384356347,0.169971142,0.031645066,0,0])
outrate[20:max_spv,0]=miehet_ulos
outrate[20:max_spv,1]=miehet_ulos
outrate[20:max_spv,2]=miehet_ulos
outrate[20:max_spv,3]=naiset_ulos
outrate[20:max_spv,4]=naiset_ulos
outrate[20:max_spv,5]=naiset_ulos
return inrate,outrate
def get_outsider_rate(self,debug=False):
'''
sairauspäivärahalle jäävien osuudet
'''
inrate=np.zeros((101,self.n_groups))
max_spv=70
#miehet_in=np.array([0.00598,0.00236,0.00195,0.00179,0.00222,0.00150,0.00363,0.00142,0.00138,0.00149,0.00561,0.00140,0.00291,0.00390,0.00130,0.00548,0.00120,0.00476,0.00118,0.00315,0.00111,0.00346,0.00117,0.00203,0.00105,0.00189,0.00154,0.00104,0.00488,0.00103,0.00273,0.00104,0.00375,0.00108,0.00314,0.00256,0.00188,0.00115,0.00115,0.00112,0.00112,0.00106,0.00112,0.00000,0.00000,0.00000,0.00257,0.00359,0,0 ])
#miehet_in=np.array([0.00578,0.00226,0.00187,0.00170,0.00248,0.00143,0.00230,0.00134,0.00130,0.00213,0.00450,0.00132,0.00300,0.00353,0.00123,0.00481,0.00112,0.00364,0.00109,0.00295,0.00103,0.00335,0.00095,0.00213,0.00095,0.00094,0.00148,0.00093,0.00400,0.00093,0.00342,0.00097,0.00370,0.00099,0.00259,0.00221,0.00244,0.00106,0.00102,0.00101,0.00099,0.00098,0.00095,0.00000,0.00000,0.00000,0.00000,0.00000,0.00000,0 ])
miehet_in=np.array([0.00578,0.00226,0.00187,0.00170,0.00153,0.00143,0.00137,0.00134,0.00130,0.00129,0.00210,0.00132,0.00348,0.00358,0.00123,0.00312,0.00112,0.00368,0.00109,0.00162,0.00103,0.00271,0.00095,0.00252,0.00095,0.00094,0.00093,0.00093,0.00400,0.00093,0.00342,0.00097,0.00370,0.00099,0.00259,0.00221,0.00244,0.00106,0.00102,0.00101,0.00099,0.00098,0.00095,0.00000,0.00000,0.00000,0.00000,0.00000,0.00000,0.0 ])
#naiset_in=np.array([0.00246,0.00210,0.00212,0.00211,0.00205,0.00217,0.00233,0.00355,0.00246,0.00247,0.00248,0.00239,0.00238,0.00225,0.00209,0.00194,0.00179,0.01151,0.00823,0.00802,0.00990,0.00515,0.00418,0.00644,0.00334,0.00101,0.00098,0.00256,0.00093,0.00092,0.00089,0.00172,0.00089,0.00248,0.00107,0.00170,0.00105,0.00143,0.00140,0.00233,0.00108,0.00104,0.00112,0.00000,0.00000,0.00000,0.00000,0.00000,0,0 ])
#naiset_in=np.array([0.00236,0.00203,0.00205,0.00206,0.00198,0.00210,0.00228,0.00227,0.00241,0.00241,0.00242,0.00234,0.00231,0.00217,0.00202,0.00187,0.00169,0.00817,0.00764,0.00667,0.00984,0.00532,0.00407,0.00593,0.00370,0.00092,0.00089,0.00326,0.00085,0.00084,0.00083,0.00142,0.00080,0.00295,0.00187,0.00086,0.00118,0.00089,0.00166,0.00100,0.00094,0.00092,0.00097,0.00000,0.00000,0.00000,0.00000,0.00000,0,0 ])
naiset_in=np.array([0.00236,0.00203,0.00205,0.00206,0.00198,0.00210,0.00228,0.00227,0.00241,0.00241,0.00242,0.00234,0.00231,0.00217,0.00202,0.00187,0.01046,0.00997,0.00293,0.00918,0.00231,0.00401,0.00850,0.00266,0.00394,0.00172,0.00089,0.00262,0.00113,0.00084,0.00083,0.00142,0.00080,0.00295,0.00187,0.00086,0.00118,0.00089,0.00166,0.00100,0.00094,0.00092,0.00097,0.00000,0.00000,0.00000,0.00000,0.00000,0.0,0.00])
inrate[20:max_spv,0] =miehet_in
inrate[20:max_spv,1] =miehet_in
inrate[20:max_spv,2] =miehet_in
inrate[20:max_spv,3] =naiset_in
inrate[20:max_spv,4] =naiset_in
inrate[20:max_spv,5] =naiset_in
outrate=np.zeros((101,self.n_groups))
#miehet_ulos=np.array([0.54528,0.21972,0.05225,0.08766,0.02000,0.07014,0.02000,0.07964,0.05357,0.02000,0.02000,0.12421,0.02000,0.02000,0.09464,0.02000,0.06655,0.02000,0.04816,0.02000,0.09763,0.02000,0.02000,0.02000,0.03777,0.02000,0.02000,0.10725,0.02000,0.05159,0.02000,0.04831,0.02000,0.08232,0.02000,0.02000,0.02000,0.02931,0.07298,0.05129,0.11783,0.07846,0.45489,0.58986,0.15937,0.43817,0.00000,0.00000,0.25798,0.00000])
#miehet_ulos=np.array([0.55164,0.21920,0.06385,0.08468,0.02000,0.07242,0.02000,0.08007,0.05701,0.02000,0.02000,0.11639,0.02000,0.02000,0.10085,0.02000,0.06289,0.02000,0.03766,0.02000,0.10860,0.02000,0.02765,0.02000,0.03089,0.02520,0.02000,0.08840,0.02000,0.04616,0.02000,0.06061,0.02000,0.08866,0.02000,0.02000,0.02000,0.07034,0.04439,0.08118,0.06923,0.16061,0.51689,0.55980,0.23310,0.25554,0.01519,0.12491,0.06625,0.00000])
miehet_ulos=np.array([0.54333,0.18208,0.09452,0.06729,0.05128,0.03646,0.02952,0.04198,0.02505,0.02711,0.02000,0.07864,0.02000,0.02000,0.09971,0.02000,0.05071,0.02000,0.03236,0.02000,0.09185,0.02000,0.03203,0.02000,0.03167,0.03064,0.02161,0.08480,0.02000,0.04616,0.02000,0.06061,0.02000,0.08866,0.02000,0.02000,0.02000,0.07034,0.04439,0.08118,0.06923,0.16061,0.51689,0.55980,0.23310,0.25554,0.01519,0.12491,0.06625,0.00000])
#naiset_ulos=np.array([0.47839484,0.190435122,0.12086902,0.081182033,0.030748876,0.184119897,0.075833908,0.02,0.029741112,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.032506855,0.026333043,0.02,0.023692146,0.050057587,0.037561449,0.02,0.024524018,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.092785925,0.054435714,0.439187202,0.465046705,0.39008036,0.384356347,0.169971142,0.031645066,0,0])
#naiset_ulos=np.array([0.485631713,0.198175734,0.114871531,0.090719936,0.034279871,0.183220816,0.054071766,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.032778805,0.03154085,0.02,0.021885099,0.03347865,0.070837788,0.02,0.032940802,0.02,0.02,0.02027967,0.02,0.043477638,0.02,0.02,0.080038155,0.071876772,0.477291934,0.454819524,0.428913696,0.287380262,0.140803001,0.054164949,0,0])
naiset_ulos=np.array([0.371419539,0.205661569,0.135265873,0.102702654,0.055240889,0.048992378,0.107111533,0.059592465,0.032056939,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.06991043,0.02,0.02,0.036157545,0.070163829,0.02,0.032241992,0.02,0.02,0.02027967,0.02,0.043477638,0.02,0.02,0.080038155,0.071876772,0.477291934,0.454819524,0.428913696,0.287380262,0.140803001,0.054164949,0,0])
outrate[20:max_spv,0]=miehet_ulos
outrate[20:max_spv,1]=miehet_ulos
outrate[20:max_spv,2]=miehet_ulos
outrate[20:max_spv,3]=naiset_ulos
outrate[20:max_spv,4]=naiset_ulos
outrate[20:max_spv,5]=naiset_ulos
return inrate,outrate
def get_army_rate(self,debug=False):
'''
armeija intensiteetit eri ryhmille
'''
outrate=np.zeros((101,self.n_groups))
miehet_ulos=np.array([0.826082957,0.593698994,0.366283368,0.43758429,0.219910436,0.367689675,0.111588214,0.234498521,0.5,0.96438943,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
naiset_ulos=np.array([0.506854911,0.619103706,0.181591468,0.518294319,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
outrate[20:70,0]=miehet_ulos
outrate[20:70,1]=miehet_ulos
outrate[20:70,2]=miehet_ulos
outrate[20:70,3]=naiset_ulos
outrate[20:70,4]=naiset_ulos
outrate[20:70,5]=naiset_ulos
return outrate
def get_disability_rate(self,debug=False):
'''
Työkyvyttömyys-alkavuudet eri ryhmille
Data ETK:n tilastotietokannasta ja skaalattu ikäluokittaisillä miesten ja naisten määrillä
'''
disab=np.zeros((self.max_age+1,self.n_groups))
# male low, male mid, male high, female low, female mid, female high
if debug:
dfactor=np.array([1.0,1.0,1.0,1.0,1.0,1.0])
else:
# uusitalon selvityksestä Työkyvyttömyyden vuoksi menetetty työura
# skaalattu alaspäin, jotta tk:laisten kokonaismäärä menee paremmin oikein
dfactor=np.array([1.2,0.8,0.4,1.1,0.8,0.5])*0.9
dis_miehet=np.array([0.004697942,0.004435302,0.003631736,0.003141361,0.003457091,0.003005607,0.002905609,0.003029283,0.002289213,0.002137714,0.001854558,0.002813517,0.002607335,0.00292628,0.002937462,0.002784612,0.002846377,0.002776506,0.003017675,0.003129845,0.003349059,0.002991577,0.00305634,0.003446143,0.003633971,0.004045113,0.004002001,0.004517725,0.005527525,0.005565513,0.006319492,0.007399175,0.00731299,0.009142823,0.010254463,0.011784364,0.013783743,0.015299156,0.018282001,0.024051257,0.032338044,0.028290544,0.019444444,0.00454486,0.000330718,0,0,0,0,0,0])
dis_naiset=np.array([0.00532654,0.004917401,0.00453191,0.003799551,0.003253733,0.003092307,0.002822592,0.003309772,0.002482279,0.002615887,0.002416545,0.003546203,0.002665276,0.003095104,0.003129633,0.003406418,0.003171677,0.003320357,0.003391292,0.004007371,0.004310094,0.00438571,0.004267343,0.004889399,0.005043702,0.005793425,0.005569451,0.006298434,0.006363081,0.007043361,0.009389811,0.007457667,0.009251373,0.011154836,0.009524088,0.013689796,0.014658423,0.017440417,0.022804727,0.02677838,0.037438459,0.034691279,0.022649573,0.004414073,0.000264568,0,0,0,0,0,0])
# ei varhaiseläkkeitä mukana, joten oletetaan ettei tk-intensiteetti laske
dis_miehet[41:51]=np.maximum(dis_miehet[41:51],0.02829054)
dis_naiset[41:51]=np.maximum(dis_naiset[41:51],0.03469128)
for g in range(3):
disab[20:71,g]=dfactor[g]*dis_miehet
disab[70:(self.max_age+1),g]=24.45*dfactor[g]/1000
for g in range(3,6):
disab[20:71,g]=dfactor[g]*dis_naiset
disab[70:(self.max_age+1),g]=24.45*dfactor[g]/1000
return disab
def get_eff_disab_rate(self,debug=False):
'''
Työkyvyttömyys-alkavuudet eri ryhmille
Laskettu havaitusta työkyvyttömien lukumäärästä
Siksi efektiivinen
'''
disab=np.zeros((self.max_age+1,self.n_groups))
# male low, male mid, male high, female low, female mid, female high
if debug:
dfactor=np.array([1.0,1.0,1.0,1.0,1.0,1.0])
else:
# uusitalon selvityksestä Työkyvyttömyyden vuoksi menetetty työura
# skaalattu alaspäin, jotta tk:laisten kokonaismäärä menee paremmin oikein
dfactor=np.array([1.3,0.95,0.6,1.2,1.0,0.9])
dis_miehet=np.array([0.0068168,0.003341014,0,0.004279685,0.001118673,0.001802593,0.00217149,0,0,0.002157641,0,0.002545172,0,0.002960375,0.000767293,0,0.002265829,0.000286527,0,0.004899931,0,0.000677208,0.001155069,0.003796412,0.004896709,0.001921327,0.004668376,0.004630126,0.002478899,0.00642266,0.005795605,0.00558426,0.008096878,0.004548654,0.010179089,0.016100661,0.015144889,0.011688053,0.024563474,0.036719657,0.036573355,0.026898066,0.027508352,0.024176173,0.023621633,0.02058014,0.020290345,0.0202976,0.020304995,0.020282729,0.020282729])
dis_naiset=np.array([0.004962318,0.002850008,0.004703008,0,0.001625749,0.000940874,0.001050232,0,0,4.34852E-05,0.003516261,0,8.21901E-05,0.002276047,0.000443789,0.002472653,0,0.001866348,0.002269429,0.001480588,0.00139571,0.002185668,0.002003531,0.003662852,0.003271301,0.003629155,0.002690071,0.003977974,0.005051223,0.00303663,0.008097507,0.004912787,0.005008356,0.007536173,0.007618452,0.017496524,0.012431715,0.020801345,0.025163258,0.027521298,0.039852895,0.023791604,0.025422742,0.02230225,0.021684456,0.01894045,0.018676988,0.018654938,0.01865384,0.018650795,0.018650795])
for g in range(3):
disab[20:71,g]=dfactor[g]*dis_miehet
disab[70:(self.max_age+1),g]=24.45*dfactor[g]/1000
for g in range(3,6):
disab[20:71,g]=dfactor[g]*dis_naiset
disab[70:(self.max_age+1),g]=24.45*dfactor[g]/1000
return disab
def get_birth_rate(self,debug=False):
'''
Syntyvyysdata
'''
birth=np.zeros((self.max_age+1,self.n_groups))
if debug:
dfactor=np.array([1.0,1.0,1.0,1.0,1.0,1.0])
else:
dfactor=np.array([0.75,1.0,1.25,0.5,1.0,1.5])
for g in range(self.n_groups):
factor=dfactor[g] # tämä vaikeuttaa sovitetta
birth[15,g]=0.000177167*factor
birth[16,g]=0.001049171*factor
birth[17,g]=0.002303504*factor
birth[18,g]=0.00630474*factor
birth[19,g]=0.014399394*factor
birth[20,g]=0.023042239*factor
birth[21,g]=0.03088312*factor
birth[22,g]=0.039755923*factor
birth[23,g]=0.047483352*factor
birth[24,g]=0.055630287*factor
birth[25,g]=0.067942889*factor
birth[26,g]=0.077108925*factor
birth[27,g]=0.085396679*factor
birth[28,g]=0.096968809*factor
birth[29,g]=0.10081728*factor
birth[30,g]=0.105586061*factor
birth[31,g]=0.1124004*factor
birth[32,g]=0.102667839*factor
birth[33,g]=0.098528489*factor
birth[34,g]=0.084080311*factor
birth[35,g]=0.072335459*factor
birth[36,g]=0.065203338*factor
birth[37,g]=0.053073374*factor
birth[38,g]=0.044054569*factor
birth[39,g]=0.032984136*factor
birth[40,g]=0.024135797*factor
birth[41,g]=0.0174215*factor
birth[42,g]=0.011621238*factor
birth[43,g]=0.006909705*factor
birth[44,g]=0.003977037*factor
birth[45,g]=0.002171444*factor
birth[46,g]=0.00115119*factor
birth[47,g]=0.000712692*factor
birth[48,g]=9.16478E-05*factor
birth[49,g]=0.000113167*factor
# syntyvyys on lasten määrä suhteessa naisten määrään
# ei siis tarvetta kertoa kahdella, vaikka isät pääsevät isyysvapaalle
return birth
def scale_pension(self,pension,age,scale=True):
'''
Elinaikakertoimen ja lykkäyskorotuksen huomiointi
'''
if scale:
return self.elinaikakerroin*pension*self.elakeindeksi*(1+0.048*(age-self.min_retirementage))
else:
return self.elinaikakerroin*pension*self.elakeindeksi
def move_to_parttime(self,pension,old_wage,age,toe,tyoura,time_in_state,out_of_work,wage_reduction):
'''
Siirtymä osa-aikaiseen työskentelyyn
'''
employment_status = 10 # switch to part-time work
intage=int(np.floor(age))
wage=self.get_wage(intage,wage_reduction)
parttimewage=0.5*wage
toe=min(self.max_toe,toe+self.timestep)
tyoura += self.timestep
time_in_state=0
out_of_work=0
old_wage=0
pension=self.pension_accrual(age,parttimewage,pension,state=employment_status)
netto,benq=self.comp_benefits(parttimewage,old_wage,0,employment_status,time_in_state,age,retq=True)
pinkslip=0
time_in_state=self.timestep
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
return employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq
def move_to_work(self,pension,old_wage,age,time_in_state,toe,tyoura,out_of_work,pinkslip,wage_reduction):
'''
Siirtymä täysiaikaiseen työskentelyyn
'''
employment_status = 1 # töihin
intage=int(np.floor(age))
wage=self.get_wage(intage,wage_reduction,pinkslip=pinkslip)
time_in_state=0
old_wage=0
toe=min(self.max_toe,toe+self.timestep)
tyoura+=self.timestep
out_of_work=0
#pinkslip=0
pension=self.pension_accrual(age,wage*0.5,pension,state=employment_status) # FIXME? 0.5?
netto,benq=self.comp_benefits(wage,old_wage,0,employment_status,time_in_state,age)
time_in_state=self.timestep
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
return employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq
def move_to_retwork(self,pension,old_wage,age,time_in_state,paid_pension,out_of_work,wage_reduction):
'''
Siirtymä vanhuuseläkkeellä työskentelyyn
'''
employment_status = 8 # unchanged
intage=int(np.floor(age))
wage=self.get_wage(intage,wage_reduction)
ptwage=wage*0.5
paid_pension=paid_pension*self.elakeindeksi
pension=self.pension_accrual(age,ptwage,pension,state=employment_status)
time_in_state=0
netto,benq=self.comp_benefits(ptwage,0,paid_pension,employment_status,time_in_state,age)
time_in_state+=self.timestep
out_of_work=0
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
# tyoura+= ??
return employment_status,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq
def move_to_student(self,pension,old_wage,age,time_in_state,toe,tyoura,out_of_work,pinkslip,wage_reduction):
'''
Siirtymä opiskelijaksi
Tässä ei muuttujien päivityksiä, koska se tehdään jo muualla!
'''
employment_status = 12
intage=int(np.floor(age))
time_in_state=0
out_of_work=0
netto,benq=self.comp_benefits(0,0,0,employment_status,time_in_state,age)
time_in_state+=self.timestep
out_of_work=0
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
pinkslip=0
wage=old_wage
return employment_status,pension,wage,time_in_state,netto,out_of_work,pinkslip,wage_reduction,benq
def move_to_retpartwork(self,pension,old_wage,age,time_in_state,paid_pension,out_of_work,wage_reduction):
'''
Siirtymä osa-aikaiseen vanhuuseläkkeellä työskentelyyn
'''
employment_status = 9 # unchanged
intage=int(np.floor(age))
wage=self.get_wage(intage,wage_reduction)
ptwage=0.5*wage
paid_pension=paid_pension*self.elakeindeksi
pension=self.pension_accrual(age,ptwage,pension,state=employment_status)
time_in_state=0
netto,benq=self.comp_benefits(ptwage,0,paid_pension,employment_status,time_in_state,age)
time_in_state+=self.timestep
out_of_work=0
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
# tyoura+= ??
return employment_status,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq
def move_to_retirement(self,pension,old_wage,age,paid_pension,employment_status,out_of_work,wage_reduction,all_acc=True,scale_acc=True):
'''
Siirtymä vanhuuseläkkeelle
'''
if age>=self.min_retirementage:
if all_acc:
if employment_status in set([2,8,9]): # ve, ve+työ, ve+osatyö
if age>=self.max_retirementage:
# ei lykkäyskorotusta
paid_pension = self.elinaikakerroin*self.elakeindeksi*pension+self.elakeindeksi*paid_pension
pension=0
else:
paid_pension = self.elakeindeksi*paid_pension
elif employment_status==3: # tk
# do nothing
employment_status=3
else:
# lykkäyskorotus
paid_pension = self.scale_pension(pension,age,scale=scale_acc)
paid_pension += self.ben.laske_kansanelake(age,paid_pension/12,1)*12 # onko oikein, p.o. self.ben.laske_kansanelake(age,paid_pension/12,1)*12
pension=0
time_in_state=self.timestep
employment_status = 2
wage=old_wage
out_of_work+=self.timestep
netto,benq=self.comp_benefits(0,0,paid_pension,employment_status,0,age)
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
else: # työvoiman ulkopuolella
time_in_state=0
employment_status = 2
wage=old_wage
netto,benq=self.comp_benefits(0,0,0,employment_status,0,age)
time_in_state+=self.timestep
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
return employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq
def move_to_retdisab(self,pension,old_wage,age,paid_pension,employment_status,out_of_work,wage_reduction):
'''
Siirtymä vanhuuseläkkeelle, jossa ei voi tehdä työtä
'''
if age>=self.max_retirementage:
paid_pension= self.elinaikakerroin*self.elakeindeksi*pension+self.elakeindeksi*paid_pension
pension=0
employment_status = 3
out_of_work+=self.timestep
wage=old_wage
netto,benq=self.comp_benefits(0,0,paid_pension,employment_status,0,age)
time_in_state=self.timestep
wage_reduction=0.9
return employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq
def move_to_unemp(self,pension,old_wage,age,paid_pension,toe,irtisanottu,out_of_work,tyoura,wage_reduction,used_unemp_benefit):
'''
Siirtymä työttömyysturvalle
'''
if age>=self.min_retirementage: # ei uusia työttömiä enää alimman ve-iän jälkeen, <NAME>at
pinkslip=0
employment_status=0
employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_retirement(pension,old_wage,age,paid_pension,employment_status,out_of_work,wage_reduction,all_acc=True)
return employment_status,paid_pension,pension,wage,time_in_state,netto,toe,out_of_work,\
wage_reduction,used_unemp_benefit,pinkslip,benq
else:
if toe>=self.ansiopvraha_toe: # täyttyykö työssäoloehto
kesto=12*21.5*used_unemp_benefit
if ((tyoura>=self.tyohistoria_vaatimus500 and kesto>=self.ansiopvraha_kesto500 and age>=self.minage_500) \
or (tyoura>=self.tyohistoria_vaatimus and kesto>=self.ansiopvraha_kesto400 and (age<self.minage_500 or tyoura<self.tyohistoria_vaatimus500)) \
or (tyoura<self.tyohistoria_vaatimus and kesto>=self.ansiopvraha_kesto300)):
if self.include_putki and age>=self.min_tyottputki_ika and tyoura>=self.tyohistoria_tyottputki:
employment_status = 4 # siirto lisäpäiville
else:
employment_status = 13 # siirto työmarkkinatuelle
else:
employment_status = 0 # siirto ansiosidonnaiselle
else:
employment_status = 13 # siirto työmarkkinatuelle
time_in_state=0
toe=0 #max(0,toe-self.timestep) # nollataan työssäoloehto
intage=int(np.floor(age))
wage=old_wage # self.get_wage(intage,wage_reduction) # oletetaan vanha palkka toe-palkaksi
pension=self.pension_accrual(age,old_wage,pension,state=employment_status)
# hmm, omavastuupäivät puuttuvat!
# omavastuupäiviä on 5/(21.5*12*self.timestep), kerroin tällöin
# 1-5/(21.5*12*self.timestep)
netto,benq=self.comp_benefits(0,old_wage,0,employment_status,used_unemp_benefit,age,
irtisanottu=irtisanottu,tyohistoria=tyoura)
time_in_state=self.timestep
out_of_work+=self.timestep
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
if irtisanottu: # muuten ei oikeutta ansiopäivärahaan karenssi vuoksi
used_unemp_benefit+=self.timestep
pinkslip=irtisanottu
return employment_status,paid_pension,pension,wage,time_in_state,netto,toe,out_of_work,\
wage_reduction,used_unemp_benefit,pinkslip,benq
def move_to_outsider(self,pension,old_wage,age,toe,irtisanottu,out_of_work,wage_reduction):
'''
Siirtymä työvoiman ulkopuolelle
'''
employment_status = 11 # switch
time_in_state=0
out_of_work+=self.timestep
intage=int(np.floor(age))
#old_wage=self.get_wage(intage-1,0)
toe=max(0,toe-self.timestep)
wage=old_wage
pension=pension*self.palkkakerroin
netto,benq=self.comp_benefits(0,0,0,employment_status,time_in_state,age,irtisanottu=0)
paid_pension=0
time_in_state+=self.timestep
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
pinkslip=0
return employment_status,paid_pension,pension,wage,time_in_state,toe,netto,out_of_work,wage_reduction,pinkslip,benq
def move_to_disab(self,pension,old_wage,age,out_of_work,wage_reduction):
'''
Siirtymä työkyvyttömyyseläkkeelle
'''
employment_status = 3 # tk
paid_pension=self.elinaikakerroin*pension*self.elakeindeksi + self.acc*old_wage*max(0,self.min_retirementage-age) # p.o. 5v keskiarvo
paid_pension=self.ben.laske_kokonaiselake(65,paid_pension)
pension=0
#old_wage=0
time_in_state=0
out_of_work+=self.timestep
wage=old_wage
netto,benq=self.comp_benefits(0,0,paid_pension,employment_status,0,age)
time_in_state+=self.timestep
wage_reduction=0.60 # vastaa määritelmää
return employment_status,pension,paid_pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq
def move_to_deceiced(self,pension,old_wage,age):
'''
Siirtymä tilaan kuollut
'''
employment_status = 15 # deceiced
wage=old_wage
pension=pension
netto=0
time_in_state=0
return employment_status,pension,wage,time_in_state,netto
def move_to_kht(self,pension,old_wage,age,out_of_work,wage_reduction):
'''
Siirtymä kotihoidontuelle
'''
employment_status = 7 # kotihoidontuelle
wage=old_wage
pension=self.pension_accrual(age,old_wage,pension,state=7)
time_in_state=0
out_of_work+=self.timestep
netto,benq=self.comp_benefits(0,old_wage,0,employment_status,time_in_state,age)
time_in_state+=self.timestep
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
return employment_status,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq
def move_to_fatherleave(self,pension,old_wage,age,out_of_work,wage_reduction):
'''
Siirtymä isyysvapaalle
'''
employment_status = 6 # isyysvapaa
time_in_state=0
wage=old_wage
out_of_work+=self.timestep
pension=self.pension_accrual(age,old_wage,pension,state=6)
netto,benq=self.comp_benefits(0,old_wage,0,employment_status,0,age)
time_in_state+=self.timestep
pinkslip=0
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
return employment_status,pension,wage,time_in_state,netto,out_of_work,pinkslip,wage_reduction,benq
def move_to_motherleave(self,pension,old_wage,age,out_of_work,wage_reduction):
'''
Siirtymä äitiysvapaalle
'''
employment_status = 5 # äitiysvapaa
time_in_state=0
wage=old_wage
out_of_work+=self.timestep
pension=self.pension_accrual(age,old_wage,pension,state=5)
netto,benq=self.comp_benefits(0,old_wage,0,employment_status,0,age)
time_in_state+=self.timestep
pinkslip=0
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
return employment_status,pension,wage,time_in_state,netto,out_of_work,pinkslip,wage_reduction,benq
#
# def reset_info_state(self):
# '''
# Ei käytössä
# Tapa tallentaa lapsia yms
# '''
# self.infostate=(0,0,-1,-1,-1,-1)
#
# def decode_info_state(self):
# '''
# Ei käytössä
# Tällä menetelmällä voi tallentaa tarkempia tietoja lapsista,puolisoista yms
# '''
# tyoura,lapsia,lapsen_ika,lapsia_paivakodissa,n_lapsilisa,n_tyotturva=self.infostate
# return tyoura,lapsia,lapsen_ika,lapsia_paivakodissa,n_lapsilisa,n_tyotturva
#
# def encode_info_state(self,tyoura,lapsia,lapsen_ika,lapsia_paivakodissa,n_lapsilisa,n_tyotturva):
# '''
# Ei käytössä
# Tällä menetelmällä voi tallentaa tarkempia tietoja lapsista,puolisoista yms
# '''
# self.infostate=(tyoura,lapsia,lapsen_ika,lapsia_paivakodissa,n_lapsilisa,n_tyotturva)
#
# def update_info_state(self):
# '''
# Ei käytössä
# Tällä menetelmällä voi tallentaa tarkempia tietoja lapsista,puolisoista yms
# '''
# tyoura,lapsia,lapsen_ika,lapsia_paivakodissa,n_lapsilisa,n_tyotturva=self.decode_info_state()
#
# lapsia_paivakodissa=0
# n_lapsilisa=0
# n_tyotturva=0
# if lapsia<1:
# return
# for l in range(lapsia):
# lapsen_ika[l]+=self.timestep
# if lapsen_ika[l]>=1.0 and lapsen_ika[l]<7:
# lapsia_paivakodissa += 1
# if lapsen_ika[l]<17:
# n_lapsilisa += 1
# if lapsen_ika[l]<18:
# n_tyotturva += 1
#
# self.encode_info_state(tyoura,lapsia,lapsen_ika,lapsia_paivakodissa,n_lapsilisa,n_tyotturva)
def pension_accrual(self,age,wage,pension,state=1):
'''
Eläkkeen karttumisrutiini
'''
if age>=52 and age<63:
acc=self.acc_over_52
else:
acc=self.acc
if state in set([0,4]):
if age>=52 and age<63:
acc=self.acc_unemp_over_52
else:
acc=self.acc_unemp
if age<self.min_retirementage:
pension=pension*self.palkkakerroin+acc*wage
else: # muuten ei karttumaa
pension=pension*self.palkkakerroin
elif state in set([1,10]):
if age<self.max_retirementage:
pension=pension*self.palkkakerroin+acc*wage
else:
pension=pension*self.palkkakerroin
elif state in set([5,6]):
if age>=52 and age<63:
acc=self.acc_family_over_52
else:
acc=self.acc_family
if age<self.max_retirementage:
pension=pension*self.palkkakerroin+acc*wage
else:
pension=pension*self.palkkakerroin
elif state == 7:
if age<self.max_retirementage:
pension=pension*self.palkkakerroin+acc*self.accbasis_kht
else:
pension=pension*self.palkkakerroin
elif state in set([8,9]):
acc=self.acc # ei korotettua
if age<self.max_retirementage:
pension=pension*self.palkkakerroin+acc*wage
else:
pension=pension*self.palkkakerroin
elif state == 13: # tm-tuki
if age<self.min_retirementage:
pension=pension*self.palkkakerroin+self.accbasis_tmtuki*acc
else:
pension=pension*self.palkkakerroin
else: # 2,3,11,12,14 # ei karttumaa
pension=pension*self.palkkakerroin # vastainen eläke, ei alkanut, ei karttumaa
return pension
def update_wage_reduction_baseline(self,state,wage_reduction):
'''
Pidetään kirjaa siitä, kuinka paljon palkkaa alennetaan työttömyyden keston suhteen,
ja miten siitä palaudutaan
'''
if state in set([1,10]): # töissä
wage_reduction=max(0,wage_reduction-self.salary_const_up)
if state in set([8,9]): # ve+töissä
wage_reduction=max(0,wage_reduction-self.salary_const_up)
elif state==12: # opiskelee
wage_reduction=max(0,wage_reduction-self.salary_const_student)
elif state in set([0,4,13,11]): # työtön tai työelämän ulkopuolella
wage_reduction+=self.salary_const
elif state in set([5,6]): # isyys tai vanhempainvapaa
wage_reduction+=self.salary_const
elif state in set([7,2]): # kotihoidontuki tai ve tai tk
wage_reduction+=self.salary_const
elif state in set([3,14,15]): # ei muutosta
wage_reduction=wage_reduction
else: # ylivuoto, ei tiloja
wage_reduction=wage_reduction
return wage_reduction
def update_wage_reduction_sigma(self,state,wage_reduction):
'''
Pidetään kirjaa siitä, kuinka paljon palkkaa alennetaan työttömyyden keston suhteen,
ja miten siitä palaudutaan
Tämä malli ei mene koskaan nollaan.
'''
if state in set([1,10]): # töissä
wage_reduction=max(0,wage_reduction-self.salary_const_up)
if state in set([8,9]): # ve+töissä
wage_reduction=max(0,wage_reduction-self.salary_const_up)
elif state==12: # opiskelee
wage_reduction=max(0,wage_reduction-self.salary_const_student)
elif state in set([0,4,13,11]): # työtön tai työelämän ulkopuolella, tuleeko skaalaus kahteen kertaan?
#wage_reduction=max(0,1.0-((1-self.salary_const)**self.timestep)*(1-wage_reduction))
wage_reduction=max(0,1.0-(1-self.salary_const)*(1-wage_reduction))
elif state in set([5,6]): # isyys tai vanhempainvapaa, ei vaikutusta
wage_reduction=wage_reduction
elif state in set([7,2]): # kotihoidontuki tai ve
#wage_reduction=max(0,1.0-((1-self.salary_const)**self.timestep)*(1-wage_reduction))
wage_reduction=max(0,1.0-(1-self.salary_const)*(1-wage_reduction))
elif state in set([3,14,15]): # ei muutosta
wage_reduction=wage_reduction
else: # ylivuoto, ei tiloja
wage_reduction=wage_reduction
return wage_reduction
def step(self, action, dynprog=False, debug=False):
'''
Open AI interfacen mukainen step-funktio, joka tekee askeleen eteenpäin
toiminnon action mukaan
Keskeinen funktio simuloinnissa
'''
assert self.action_space.contains(action), "%r (%s) invalid"%(action, type(action))
employment_status,g,pension,old_wage,age,time_in_state,paid_pension,pinkslip,toe,\
tyoura,out_of_work,used_unemp_benefit,wage_reduction,prefnoise\
=self.state_decode(self.state)
# simulointiin vaikuttavia ulkoisia tilamuuttujia, ei toteutettu
#tyoura,lapsia,lapsen1_ika,lapsen2_ika,lapsen3_ika,lapsia_paivakodissa=self.decode_info_state()
info={}
intage=int(np.floor(age))
moved=False
if self.randomness:
# kaikki satunnaisuus kerralla
sattuma = np.random.uniform(size=7)
# siirtymät
move_prob=self.disability_intensity[intage,g]+self.birth_intensity[intage,g]+self.student_inrate[intage,g]+self.outsider_inrate[intage,g]
if sattuma[0]<move_prob:
s1=self.disability_intensity[intage,g]
s2=s1+self.birth_intensity[intage,g]
s3=s2+self.student_inrate[intage,g]
#s4=s3+self.outsider_inrate[intage,g]
# tk-alkavuus, siisti kuntoon!
if sattuma[2]<s1/move_prob: # age<self.min_retirementage and
action=11 # disability
elif sattuma[2]<s2/move_prob:
if g>2: # naiset
employment_status,pension,wage,time_in_state,netto,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_motherleave(pension,old_wage,age,out_of_work,wage_reduction)
pinkslip=0
moved=True
else: # miehet
# ikä valittu äidin iän mukaan. oikeastaan tämä ei mene ihan oikein miehille
if sattuma[4]<0.5:
employment_status,pension,wage,time_in_state,netto,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_fatherleave(pension,old_wage,age,out_of_work,wage_reduction)
moved=True
elif sattuma[2]<s3/move_prob:
if employment_status not in set([2,3,8,9,11,12,14]): # and False:
employment_status,pension,wage,time_in_state,netto,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_student(pension,old_wage,age,time_in_state,toe,tyoura,out_of_work,pinkslip,wage_reduction)
moved=True
#elif sattuma[2]<s4/move_prob: # and False:
else:
if employment_status not in set([2,3,8,9,11,12,14]):
employment_status,paid_pension,pension,wage,time_in_state,toe,netto,out_of_work,wage_reduction,pinkslip,benq=\
self.move_to_outsider(pension,old_wage,age,toe,pinkslip,out_of_work,wage_reduction)
moved=True
# voi aiheuttaa epästabiilisuutta
if sattuma[3]<self.mort_intensity[intage,g] and self.include_mort: # and False:
employment_status,pension,wage,time_in_state,netto=self.move_to_deceiced(pension,old_wage,age)
else:
# tn ei ole koskaan alle rajan, jos tämä on 1
sattuma = np.ones(7)
if employment_status==15: # deceiced
#time_in_state+=self.timestep
if not self.include_mort:
print('emp state 15')
wage=old_wage
nextwage=wage
toe=0
if self.mortstop:
done=True
else:
done = age >= self.max_age
done = bool(done)
self.state = self.state_encode(employment_status,g,pension,wage,age+self.timestep,
time_in_state,paid_pension,pinkslip,toe,tyoura,nextwage,out_of_work,
used_unemp_benefit,wage_reduction)
reward=0
return np.array(self.state), reward, done, {}
elif age>=self.max_retirementage:
employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq\
=self.move_to_retirement(pension,0,age,paid_pension,employment_status,out_of_work,wage_reduction,all_acc=True)
elif employment_status == 0:
time_in_state+=self.timestep
if age>=65:
employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq\
=self.move_to_retirement(pension,0,age,paid_pension,employment_status,out_of_work,wage_reduction,all_acc=True)
elif action == 0 or (action == 2 and age < self.min_retirementage):
employment_status = 0 # unchanged
wage=old_wage # self.get_wage(intage,time_in_state)
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
#toe=max(0.0,toe-self.timestep)
netto,benq=self.comp_benefits(0,old_wage,0,employment_status,used_unemp_benefit,age,tyohistoria=tyoura)
if pinkslip or time_in_state>=self.karenssi_kesto: # muuten ei oikeutta ansiopäivärahaan karenssi vuoksi
used_unemp_benefit+=self.timestep
out_of_work+=self.timestep
kesto=12*21.5*used_unemp_benefit
if ((tyoura>=self.tyohistoria_vaatimus500 and kesto>=self.ansiopvraha_kesto500 and age>=self.minage_500) \
or (tyoura>=self.tyohistoria_vaatimus and kesto>=self.ansiopvraha_kesto400 and (age<self.minage_500 or tyoura<self.tyohistoria_vaatimus500)) \
or (tyoura<self.tyohistoria_vaatimus and kesto>=self.ansiopvraha_kesto300)):
if self.include_putki and age>=self.min_tyottputki_ika and tyoura>=self.tyohistoria_tyottputki:
employment_status = 4 # siirto lisäpäiville
pension=self.pension_accrual(age,old_wage,pension,state=4)
else:
employment_status = 13 # siirto työmarkkinatuelle
pension=self.pension_accrual(age,old_wage,pension,state=13)
else:
pension=self.pension_accrual(age,old_wage,pension,state=0)
elif action == 1: #
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_work(pension,old_wage,age,time_in_state,toe,tyoura,out_of_work,pinkslip,wage_reduction)
elif action==2:
if age >= self.min_retirementage: # ve
employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_retirement(pension,old_wage,age,paid_pension,employment_status,out_of_work,wage_reduction,scale_acc=False)
#else:
# employment_status,paid_pension,pension,wage,time_in_state,toe,netto,out_of_work,wage_reduction,pinkslip=\
# self.move_to_outsider(pension,old_wage,age,toe,0,out_of_work,wage_reduction)
elif action == 3: # osatyö 50%
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_parttime(pension,old_wage,age,toe,tyoura,time_in_state,out_of_work,wage_reduction)
elif action==11: # tk
employment_status,pension,paid_pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_disab(pension,old_wage,age,out_of_work,wage_reduction)
pinkslip=0
else:
print('error 17')
elif employment_status == 13: # työmarkkinatuki
time_in_state+=self.timestep
if age>=65:
employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_retirement(pension,0,age,paid_pension,employment_status,out_of_work,wage_reduction,all_acc=True)
elif action == 0 or (action == 2 and age < self.min_retirementage):
employment_status = 13 # unchanged
wage=old_wage
#wage=self.get_wage(intage,wage_reduction)
toe=max(0.0,toe-self.timestep) # approksimaatio, oletus että työjakso korvautuu työttömyysjaksolla
pension=self.pension_accrual(age,wage,pension,state=13)
netto,benq=self.comp_benefits(0,old_wage,0,employment_status,used_unemp_benefit,age,tyohistoria=tyoura)
used_unemp_benefit+=self.timestep
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
out_of_work+=self.timestep
elif action == 1: #
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_work(pension,old_wage,age,time_in_state,toe,tyoura,out_of_work,pinkslip,wage_reduction)
elif action==2:
if age >= self.min_retirementage: # ve
employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_retirement(pension,old_wage,age,paid_pension,employment_status,out_of_work,wage_reduction,scale_acc=False)
#else:
# employment_status,paid_pension,pension,wage,time_in_state,toe,netto,out_of_work,wage_reduction,pinkslip,benq=\
# self.move_to_outsider(pension,old_wage,age,toe,pinkslip,out_of_work,wage_reduction)
# #employment_status,paid_pension,pension,wage,time_in_state,netto,benq=\
# #self.move_to_retirement(pension,old_wage,age,paid_pension,employment_status)
elif action == 3: # osatyö 50%
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_parttime(pension,old_wage,age,toe,tyoura,time_in_state,out_of_work,wage_reduction)
elif action==11: # tk
employment_status,pension,paid_pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_disab(pension,old_wage,age,out_of_work,wage_reduction)
else:
print('error 17')
elif employment_status == 4: # työttömyysputki
time_in_state+=self.timestep
#if age >= self.min_retirementage:
# employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
# self.move_to_retirement(pension,0,age,paid_pension,employment_status,out_of_work,wage_reduction,all_acc=True)
if action == 0 or (action == 2 and age < self.min_retirementage):
employment_status = 4 # unchanged
wage=old_wage # self.get_wage(intage,time_in_state)
toe=max(0,toe-self.timestep)
pension=self.pension_accrual(age,old_wage,pension,state=4)
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
netto,benq=self.comp_benefits(0,old_wage,0,employment_status,used_unemp_benefit,age,tyohistoria=tyoura)
out_of_work+=self.timestep
if pinkslip or time_in_state>=self.karenssi_kesto: # muuten ei oikeutta ansiopäivärahaan karenssi vuoksi
used_unemp_benefit+=self.timestep
elif action == 1: #
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_work(pension,old_wage,age,time_in_state,toe,tyoura,out_of_work,pinkslip,wage_reduction)
elif action==2:
if age >= self.min_retirementage: # ve
employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_retirement(pension,old_wage,age,paid_pension,employment_status,out_of_work,wage_reduction,scale_acc=False)
#else:
# employment_status,paid_pension,pension,wage,time_in_state,toe,netto,out_of_work,wage_reduction,pinkslip,benq=\
# self.move_to_outsider(pension,old_wage,age,toe,pinkslip,out_of_work,wage_reduction)
#employment_status,paid_pension,pension,wage,time_in_state,netto,benq=\
# self.move_to_retirement(pension,old_wage,age,paid_pension,employment_status)
pinkslip=0
elif action == 3: #
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_parttime(pension,old_wage,age,toe,tyoura,time_in_state,out_of_work,wage_reduction)
elif action==11: # tk
employment_status,pension,paid_pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_disab(pension,old_wage,age,out_of_work,wage_reduction)
else:
print('error 1: ',action)
elif employment_status == 1:
time_in_state+=self.timestep
out_of_work=0
if sattuma[1]<self.pinkslip_intensity[g]:
if age<self.min_retirementage:
pinkslip=1
action=1 # unemp
else:
pinkslip=0
action=2 # ve
else:
pinkslip=0
if action == 0 or (action == 2 and age < self.min_retirementage):
employment_status = 1 # unchanged
wage=self.get_wage(intage,wage_reduction)
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
toe=min(self.max_toe,toe+self.timestep)
if toe>=self.ansiopvraha_toe:
used_unemp_benefit=0
tyoura+=self.timestep
out_of_work=0
pension=self.pension_accrual(age,wage,pension,state=1)
netto,benq=self.comp_benefits(wage,0,0,employment_status,time_in_state,age)
elif action == 1: # työttömäksi
employment_status,paid_pension,pension,wage,time_in_state,netto,toe,out_of_work,wage_reduction,used_unemp_benefit,pinkslip,benq=\
self.move_to_unemp(pension,old_wage,age,paid_pension,toe,pinkslip,out_of_work,tyoura,wage_reduction,used_unemp_benefit)
elif action==2:
if age >= self.min_retirementage: # ve
employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_retirement(pension,old_wage,age,paid_pension,employment_status,out_of_work,wage_reduction)
#else: # työttömäksi
# employment_status,paid_pension,pension,wage,time_in_state,toe,netto,out_of_work,wage_reduction,pinkslip,benq=\
# self.move_to_outsider(pension,old_wage,age,toe,pinkslip,out_of_work,wage_reduction)
#employment_status,paid_pension,pension,wage,time_in_state,netto=self.move_to_retirement(pension,old_wage,age,paid_pension,employment_status)
#employment_status,pension,wage,time_in_state,netto,toe=self.move_to_unemp(pension,old_wage,age,toe,pinkslip)
elif action == 3: # osatyö 50%
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_parttime(pension,old_wage,age,toe,tyoura,0,out_of_work,wage_reduction)
elif action==11: # tk
employment_status,pension,paid_pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_disab(pension,old_wage,age,out_of_work,wage_reduction)
else:
print('error 12')
elif employment_status == 3: # tk, ei voi siirtyä ve:lle
time_in_state+=self.timestep
out_of_work+=self.timestep
if age >= self.min_retirementage:
employment_status = 3 # ve # miten kansaneläke menee?? takuueläke?
else:
employment_status = 3 # unchanged
toe=max(0,toe-self.timestep)
paid_pension=paid_pension*self.elakeindeksi
wage=old_wage
netto,benq=self.comp_benefits(0,0,paid_pension,employment_status,0,age)
elif employment_status == 2: # eläkkeellä, voi palata töihin
if age >= self.min_retirementage: # ve
time_in_state+=self.timestep
if age>=self.max_retirementage:
paid_pension += self.elinaikakerroin*pension
pension=0
if action == 0 or action == 3 or ((action == 1 or action == 2) and age>=self.max_retirementage):
employment_status = 2 # unchanged
paid_pension=paid_pension*self.elakeindeksi
pension=pension*self.palkkakerroin
out_of_work+=self.timestep
wage=self.get_wage(intage,out_of_work)
netto,benq=self.comp_benefits(0,0,paid_pension,employment_status,0,age)
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
elif action == 1 and age<self.max_retirementage:
employment_status,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_retwork(pension,old_wage,age,time_in_state,paid_pension,out_of_work,wage_reduction)
elif action == 2 and age<self.max_retirementage:
employment_status,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_retpartwork(pension,old_wage,age,time_in_state,paid_pension,out_of_work,wage_reduction)
elif action == 11:
employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_retdisab(pension,old_wage,age,time_in_state,paid_pension,out_of_work,wage_reduction)
else:
print('error 221, action {} age {}'.format(action,age))
else:
# työvoiman ulkopuolella
time_in_state+=self.timestep
out_of_work+=self.timestep
if action == 0:
employment_status = 2 # unchanged
wage=old_wage
toe=max(0,toe-self.timestep)
pension=pension*self.palkkakerroin
netto,benq=1 #self.comp_benefits(0,0,0,employment_status,time_in_state,age)
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
elif action == 1: # työttömäksi
employment_status,paid_pension,pension,wage,time_in_state,netto,toe,out_of_work,wage_reduction,used_unemp_benefit,pinkslip,benq=\
self.move_to_unemp(pension,old_wage,age,paid_pension,toe,0,out_of_work,tyoura,wage_reduction,used_unemp_benefit)
elif action == 2: # töihin
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_work(pension,old_wage,age,time_in_state,toe,tyoura,out_of_work,pinkslip,wage_reduction)
elif action == 3: # osatyö 50%
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_parttime(pension,old_wage,age,toe,tyoura,time_in_state,out_of_work,wage_reduction)
elif action == 11: # tk
employment_status,pension,paid_pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_disab(pension,old_wage,age,out_of_work,wage_reduction)
else:
print('error 12')
elif employment_status == 5: # äitiysvapaa
if not moved:
if time_in_state>self.aitiysvapaa_kesto:
pinkslip=0
if action == 0:
employment_status,paid_pension,pension,wage,time_in_state,netto,toe,out_of_work,wage_reduction,used_unemp_benefit,pinkslip,benq=\
self.move_to_unemp(pension,old_wage,age,paid_pension,toe,pinkslip,out_of_work,tyoura,wage_reduction,used_unemp_benefit)
elif action == 1: #
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_work(pension,old_wage,age,time_in_state,toe,tyoura,out_of_work,pinkslip,wage_reduction)
elif action == 2: #
employment_status,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_kht(pension,old_wage,age,out_of_work,wage_reduction)
elif action == 3: # osa-aikatyöhön
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_parttime(pension,old_wage,age,toe,tyoura,time_in_state,out_of_work,wage_reduction)
elif action==11: # tk
employment_status,pension,paid_pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_disab(pension,old_wage,age,out_of_work,wage_reduction)
else:
print('Error 21')
else:
pension=self.pension_accrual(age,old_wage,pension,state=5)
wage=old_wage #self.get_wage(intage,time_in_state)
netto,benq=self.comp_benefits(0,old_wage,0,employment_status,0,age)
time_in_state+=self.timestep
out_of_work+=self.timestep
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
elif employment_status == 6: # isyysvapaa
if not moved:
if time_in_state>=self.isyysvapaa_kesto:
pinkslip=0
if action == 0 or action==2:
employment_status,paid_pension,pension,wage,time_in_state,netto,toe,out_of_work,wage_reduction,used_unemp_benefit,pinkslip,benq=\
self.move_to_unemp(pension,old_wage,age,paid_pension,toe,pinkslip,out_of_work,tyoura,wage_reduction,used_unemp_benefit)
elif action == 1: #
# ei vaikutusta palkkaan
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_work(pension,old_wage,age,0,toe,tyoura,out_of_work,pinkslip,wage_reduction)
elif action == 2: #
employment_status,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_kht(pension,old_wage,age,out_of_work,wage_reduction)
elif action == 3: # osa-aikatöihin
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_parttime(pension,old_wage,age,toe,tyoura,0,out_of_work,wage_reduction)
elif action==11: # tk
employment_status,pension,paid_pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_disab(pension,old_wage,age,out_of_work,wage_reduction)
else:
print('Error 23')
else:
pension=self.pension_accrual(age,old_wage,pension,state=6)
wage=old_wage #self.get_wage(intage,time_in_state)
netto,benq=self.comp_benefits(0,old_wage,0,employment_status,0,age)
time_in_state+=self.timestep
out_of_work+=self.timestep
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
elif employment_status == 7: # kotihoidontuki
time_in_state+=self.timestep
if age >= self.min_retirementage: # ve
employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_retirement(pension,old_wage,age,paid_pension,employment_status,out_of_work,wage_reduction)
elif action == 0 and (time_in_state<=self.kht_kesto or self.perustulo): # jos perustulo, ei aikarajoitetta
#elif action == 0 and (time_in_state<=self.kht_kesto): # jos perustulo, ei aikarajoitetta
employment_status = 7 # stay
#wage=self.get_wage(intage,wage_reduction) # aiemmissa laskelmissa old_wage
wage=old_wage
toe=max(0,toe-self.timestep)
# if time_in_state>self.kht_kesto:
# toe=max(0,toe-self.timestep)
pension=self.pension_accrual(age,wage,pension,state=7)
netto,benq=self.comp_benefits(0,old_wage,0,employment_status,time_in_state,age)
out_of_work+=self.timestep
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
elif action == 2: #
pinkslip=0
employment_status,paid_pension,pension,wage,time_in_state,netto,toe,out_of_work,wage_reduction,used_unemp_benefit,pinkslip,benq=\
self.move_to_unemp(pension,old_wage,age,paid_pension,toe,pinkslip,out_of_work,tyoura,wage_reduction,used_unemp_benefit)
elif action == 1: #
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_work(pension,old_wage,age,time_in_state,toe,tyoura,out_of_work,pinkslip,wage_reduction)
elif action == 3: #
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_parttime(pension,old_wage,age,toe,tyoura,time_in_state,out_of_work,wage_reduction)
elif action==11: # tk
employment_status,pension,paid_pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_disab(pension,old_wage,age,out_of_work,wage_reduction)
elif time_in_state>self.kht_kesto: #
employment_status,paid_pension,pension,wage,time_in_state,netto,toe,out_of_work,wage_reduction,used_unemp_benefit,pinkslip,benq=\
self.move_to_unemp(pension,old_wage,age,paid_pension,toe,pinkslip,out_of_work,tyoura,wage_reduction,used_unemp_benefit)
else:
print('Error 25')
elif employment_status == 8: # töissä ja ve:llä
time_in_state+=self.timestep
# irtisanominen
if sattuma[1]<self.pinkslip_intensity[g]:
action=2 # ve:lle
if age>=self.max_retirementage:
paid_pension += self.elinaikakerroin*pension
pension=0
if action == 0 or action == 3: # jatkaa töissä, ei voi saada työttömyyspäivärahaa
employment_status = 8 # unchanged
wage=self.get_wage(intage,wage_reduction)
pension=self.pension_accrual(age,wage,pension,state=8)
paid_pension=paid_pension*self.elakeindeksi
netto,benq=self.comp_benefits(wage,0,paid_pension,employment_status,time_in_state,age)
out_of_work=0
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
elif action == 1: # jatkaa osa-aikatöissä, ei voi saada työttömyyspäivärahaa
employment_status,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_retpartwork(pension,old_wage,age,0,paid_pension,out_of_work,wage_reduction)
elif action==2: # eläkkeelle, eläkeaikana karttunutta eläkettä ei vielä maksuun
employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_retirement(pension,old_wage,age,paid_pension,employment_status,out_of_work,wage_reduction,all_acc=False)
elif action == 11:
# no more working, move to "disab" with no change in paid_pension
employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_retdisab(pension,old_wage,age,time_in_state,paid_pension,out_of_work,wage_reduction)
else:
print('error 14, action {} age {}'.format(action,age))
elif employment_status == 9: # osatöissä ja ve:llä
time_in_state+=self.timestep
# irtisanominen
if sattuma[1]<self.pinkslip_intensity[g]:
if self.plotdebug:
print('pinkslip')
action=2 # ve:lle
if age>=self.max_retirementage:
paid_pension += self.elinaikakerroin*pension
pension=0
if action == 0 or action == 3: # jatkaa osa-aikatöissä, ei voi saada työttömyyspäivärahaa
employment_status = 9 # unchanged
wage=self.get_wage(intage,wage_reduction)
parttimewage=0.5*wage
pension=self.pension_accrual(age,parttimewage,pension,state=9)
paid_pension=paid_pension*self.elakeindeksi
netto,benq=self.comp_benefits(parttimewage,0,paid_pension,employment_status,time_in_state,age)
out_of_work=0
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
elif action==1: # jatkaa täysin töissä, ei voi saada työttömyyspäivärahaa
employment_status,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_retwork(pension,old_wage,age,0,paid_pension,out_of_work,wage_reduction)
elif action==2: # eläkkeelle, eläkeaikana karttunutta eläkettä ei vielä maksuun
employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_retirement(pension,old_wage,age,paid_pension,employment_status,out_of_work,wage_reduction,all_acc=False)
elif action == 11:
# no more working, move to "disab" with no change in paid_pension
employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_retdisab(pension,old_wage,age,time_in_state,paid_pension,out_of_work,wage_reduction)
else:
print('error 14, action {} age {}'.format(action,age))
elif employment_status == 10: # osatöissä, ei ve:llä
time_in_state+=self.timestep
# irtisanominen
if sattuma[1]<self.pinkslip_intensity[g]:
if age<self.min_retirementage:
action=1 # unemp
pinkslip=1
else:
action=2 # ve
pinkslip=0
else:
pinkslip=0
if action == 0 or (action == 2 and age < self.min_retirementage):
employment_status = 10 # unchanged
#if time_in_state>1:
# prev_unempl=0 # nollataan työttömyyden vaikutus palkkaan vuoden jälkeen
wage=self.get_wage(intage,wage_reduction)
parttimewage=0.5*wage
tyoura+=self.timestep
toe=min(self.max_toe,toe+self.timestep)
if toe>=self.ansiopvraha_toe:
used_unemp_benefit=0
pension=self.pension_accrual(age,parttimewage,pension,state=10)
netto,benq=self.comp_benefits(parttimewage,0,0,employment_status,time_in_state,age)
out_of_work=0
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
elif action == 1: # työttömäksi
employment_status,paid_pension,pension,wage,time_in_state,netto,toe,out_of_work,wage_reduction,used_unemp_benefit,pinkslip,benq=\
self.move_to_unemp(pension,old_wage,age,paid_pension,toe,pinkslip,out_of_work,tyoura,wage_reduction,used_unemp_benefit)
elif action==2:
if age >= self.min_retirementage: # ve
employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_retirement(pension,old_wage,age,paid_pension,employment_status,out_of_work,wage_reduction)
#else:
# #employment_status,paid_pension,pension,wage,time_in_state,netto=self.move_to_retirement(pension,old_wage,age,paid_pension,employment_status)
# employment_status,paid_pension,pension,wage,time_in_state,toe,netto,wage_reduction,pinkslip,benq=\
# self.move_to_outsider(pension,old_wage,age,toe,pinkslip,out_of_work,wage_reduction)
elif action==3:
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_work(pension,old_wage,age,0,toe,tyoura,out_of_work,pinkslip,wage_reduction)
elif action==11: # tk
employment_status,pension,paid_pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_disab(pension,old_wage,age,out_of_work,wage_reduction)
else:
print('error 12')
elif employment_status == 11: # työvoiman ulkopuolella, ei töissä, ei hae töitä
if not moved:
if age>=self.min_retirementage:
employment_status,paid_pension,pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_retirement(pension,old_wage,age,paid_pension,employment_status,out_of_work,wage_reduction)
elif sattuma[5]>=self.outsider_outrate[intage,g]:
time_in_state+=self.timestep
employment_status = 11 # unchanged
wage=old_wage
toe=max(0,toe-self.timestep)
pension=self.pension_accrual(age,wage,pension,state=11)
netto,benq=self.comp_benefits(0,old_wage,0,employment_status,time_in_state,age,tyohistoria=tyoura)
out_of_work+=self.timestep
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
elif action == 1: #
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_work(pension,old_wage,age,time_in_state,toe,tyoura,out_of_work,pinkslip,wage_reduction)
elif action == 2 or action == 0: #
pinkslip=0
employment_status,paid_pension,pension,wage,time_in_state,netto,toe,out_of_work,wage_reduction,used_unemp_benefit,pinkslip,benq=\
self.move_to_unemp(pension,old_wage,age,paid_pension,toe,pinkslip,out_of_work,tyoura,wage_reduction,used_unemp_benefit)
elif action == 3: #
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_parttime(pension,old_wage,age,toe,tyoura,time_in_state,out_of_work,wage_reduction)
elif action == 11: # tk
employment_status,pension,paid_pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_disab(pension,old_wage,age,out_of_work,wage_reduction)
pinkslip=0
else:
print('error 19: ',action)
elif employment_status == 12: # opiskelija
if not moved:
out_of_work=0 #self.timestep
pinkslip=0
tyoura=0
if sattuma[5]>=self.student_outrate[intage,g]:
employment_status = 12 # unchanged
time_in_state+=self.timestep
wage=old_wage
toe=max(0,toe-self.timestep)
pension=self.pension_accrual(age,0,pension,state=13)
netto,benq=self.comp_benefits(0,0,0,employment_status,time_in_state,age,tyohistoria=tyoura)
# opiskelu parantaa tuloja
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
elif action == 0: #
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_work(pension,old_wage,age,0,toe,tyoura,out_of_work,pinkslip,wage_reduction)
elif action == 1 or action == 3:
employment_status,paid_pension,pension,wage,time_in_state,netto,toe,out_of_work,wage_reduction,used_unemp_benefit,pinkslip,benq=\
self.move_to_unemp(pension,old_wage,age,paid_pension,toe,pinkslip,out_of_work,tyoura,wage_reduction,used_unemp_benefit)
elif action == 2:
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_parttime(pension,old_wage,age,toe,tyoura,time_in_state,out_of_work,wage_reduction)
#elif action == 3:
# employment_status,paid_pension,pension,wage,time_in_state,toe,netto,out_of_work,wage_reduction,pinkslip,benq=\
# self.move_to_outsider(pension,old_wage,age,toe,pinkslip,out_of_work,wage_reduction)
elif action == 11: # tk
employment_status,pension,paid_pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_disab(pension,old_wage,age,out_of_work,wage_reduction)
else:
print('error 29: ',action)
elif employment_status == 14: # armeijassa
if not moved:
out_of_work=0 #self.timestep
pinkslip=0
tyoura=0
toe=0
if sattuma[6]>=self.army_outrate[intage,g]: # vain ulos
employment_status = 14 # unchanged
time_in_state+=self.timestep
wage=old_wage
#toe=max(0,toe-self.timestep)
#pension=self.pension_accrual(age,0,pension,state=13)
netto,benq=self.comp_benefits(0,0,0,employment_status,time_in_state,age,tyohistoria=tyoura)
# opiskelu parantaa tuloja
wage_reduction=self.update_wage_reduction(employment_status,wage_reduction)
elif action == 0: #
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_work(pension,old_wage,age,0,toe,tyoura,out_of_work,pinkslip,wage_reduction)
elif action == 1 or action == 3:
employment_status,paid_pension,pension,wage,time_in_state,netto,toe,out_of_work,wage_reduction,used_unemp_benefit,pinkslip,benq=\
self.move_to_unemp(pension,old_wage,age,paid_pension,toe,pinkslip,out_of_work,tyoura,wage_reduction,used_unemp_benefit)
elif action == 2:
employment_status,pension,wage,time_in_state,netto,toe,tyoura,out_of_work,pinkslip,wage_reduction,benq=\
self.move_to_parttime(pension,old_wage,age,toe,tyoura,time_in_state,out_of_work,wage_reduction)
#elif action == 3:
# employment_status,paid_pension,pension,wage,time_in_state,toe,netto,out_of_work,wage_reduction,pinkslip,benq=\
# self.move_to_outsider(pension,old_wage,age,toe,pinkslip,out_of_work,wage_reduction)
elif action == 11: # tk
employment_status,pension,paid_pension,wage,time_in_state,netto,out_of_work,wage_reduction,benq=\
self.move_to_disab(pension,old_wage,age,out_of_work,wage_reduction)
else:
print('error 39: ',action)
else:
print('Unknown employment_status {s} of type {t}'.format(s=employment_status,t=type(employment_status)))
done = age >= self.max_age
done = bool(done)
if not done:
reward = self.log_utility(netto,int(employment_status),age,g=g,pinkslip=pinkslip)
elif self.steps_beyond_done is None:
self.steps_beyond_done = 0
paid_pension += self.elinaikakerroin*pension
pension=0
netto,benq=self.comp_benefits(0,old_wage,paid_pension,employment_status,time_in_state,age)
if employment_status in set([2,3,8,9]):
reward = self.npv[g]*self.log_utility(netto,employment_status,age,pinkslip=0)
# npv0 is undiscounted
benq=self.scale_q(self.npv0[g],benq)
else:
# giving up the pension
reward = 0.0 #-self.npv[g]*self.log_utility(netto,employment_status,age)
pinkslip=0
#time_in_state+=self.timestep
else:
#if not dynprog: # tätä mallia on vaikea ajaa dynaamisella ohjelmoinnilla
if self.steps_beyond_done == 0:
logger.warn("You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
self.steps_beyond_done += 1
reward = 0.0
# seuraava palkka tiedoksi valuaatioapproksimaattorille
next_wage=self.get_wage(int(np.floor(age+self.timestep)),wage_reduction)
if self.include_preferencenoise:
self.state = self.state_encode(employment_status,g,pension,wage,age+self.timestep,time_in_state,
paid_pension,pinkslip,toe,tyoura,next_wage,out_of_work,used_unemp_benefit,wage_reduction,
prefnoise=prefnoise)
else:
self.state = self.state_encode(employment_status,g,pension,wage,age+self.timestep,time_in_state,
paid_pension,pinkslip,toe,tyoura,next_wage,out_of_work,used_unemp_benefit,wage_reduction)
if self.plotdebug:
self.render(done=done,reward=reward, netto=netto)
benq['eq']=0.0
return np.array(self.state), reward, done, benq
def scale_q(self,npv,benq):
benq['verot']*=npv
benq['etuustulo_brutto']*=npv
benq['valtionvero']*=npv
benq['kunnallisvero']*=npv
benq['asumistuki']*=npv
benq['elake_maksussa']*=npv
benq['kokoelake']*=npv
benq['perustulo']*=npv
benq['palkkatulot']*=npv
benq['kateen']*=npv
return benq
# WITH RANDOMNESS
def log_utility_randomness(self,income,employment_state,age,g=0,pinkslip=0,prefnoise=0):
'''
Log-utiliteettifunktio muokattuna lähteestä Määttänen, 2013 & Hakola & Määttänen, 2005
Käytetään, jos laskelmissa on mukana satunnaisuutta
Tulot _income_ ovat vuositasolla, jotta askelpituuden muutos ei vaikuta vapaa-aika-vakioihin
'''
# kappa tells how much person values free-time
if g<3: # miehet
kappa_kokoaika=0.625 # 0.635 # 0.665
mu_scale=0.14 # 0.14 # 0.30 # 0.16 # how much penalty is associated with work increase with age after mu_age
mu_age=60 # P.O. 60??
kappa_osaaika=0.57*kappa_kokoaika
else: # naiset
kappa_kokoaika=0.570 # 0.605 # 0.58
mu_scale=0.25 # 0.25 # 0.25 # 0.17 # how much penalty is associated with work increase with age after mu_age
mu_age=61.5 # 61 # P.O. 60??
kappa_osaaika=0.435*kappa_kokoaika # 0.42*kappa_kokoaika
if self.include_preferencenoise:
kappa_kokoaika += prefnoise
kappa_ve=0.15 # ehkä 0.10?
#if age<25:
# alle 25-vuotiaalla eri säännöt, vanhempien tulot huomioidaan jne
# kappa_pinkslip = 0.25
#else:
if pinkslip>0: # irtisanottu
kappa_pinkslip = 0 # irtisanotuille ei vaikutuksia
else:
kappa_pinkslip = -0.2 # irtisanoutumisesta seuraava alennus
if age>mu_age:
kappa_kokoaika *= (1+mu_scale*max(0,age-mu_age))
kappa_osaaika *= (1+mu_scale*max(0,age-mu_age))
if employment_state in set([1,8]):
kappa= -kappa_kokoaika
elif employment_state in set([9,10]):
kappa= -kappa_osaaika
elif employment_state in set([0,4,13]):
kappa= kappa_pinkslip
elif employment_state == 2:
kappa=kappa_ve
elif employment_state == 11:
kappa=0 #kappa_outsider
elif employment_state == 12:
kappa=0 #kappa_opiskelija
else: # states 3, 5, 6, 7, 14, 15
kappa=0
# hyöty/score
#print(type(np),income,type(income))
u=np.log(income)+kappa
if u is np.inf:
print('inf: state ',employment_state)
if income<1:
print('inf: state ',employment_state)
return u/10 # tulot ovat vuositasolla, mutta skaalataan hyöty
# From Määttänen, 2013
def wage_process(self,w,age,ave=3300*12):
'''
Palkkaprosessi lähteestä Määttänen, 2013
'''
eps=np.random.normal(loc=0,scale=0.02,size=1)[0]
a0=ave
a1=0.89
if w>0:
wt=a0*np.exp(a1*np.log(w/a0)+eps)
else:
wt=a0*np.exp(eps)
return wt
def wage_process_simple(self,w,age,ave=3300*12):
'''
debug-versio palkkaprosessista
'''
return w
def compute_salary(self,group=1,debug=True):
'''
Alussa ajettava funktio, joka tekee palkat yhtä episodia varten
'''
group_ave=np.array([2000,3300,5000,0.85*2000,0.85*3300,0.85*5000])*12
a0=group_ave[group]
self.salary[self.min_age]=np.maximum(self.min_salary,np.random.normal(loc=a0,scale=12*1000,size=1)[0]) # e/y
if debug:
self.salary[self.min_age+1:self.max_age+1]=self.salary[self.min_age]
else:
for age in range(self.min_age+1,self.max_age+1):
self.salary[age]=self.wage_process(self.salary[age-1],age,ave=a0)
def get_wage(self,age,reduction,pinkslip=0):
'''
palkka age-ikäiselle time_in_state-vähennyksellä työllistymispalkkaan
'''
if age<self.max_age and age>=self.min_age-1:
return self.salary[int(np.floor(age))]*max(0,(1-reduction))
else:
return 0
# wage process reparametrized
def wage_process_TK(self,w,age,a0=3300*12,a1=3300*12,g=1):
'''
Palkkaprosessi muokattu lähteestä Määttänen, 2013
'''
#group_sigmas=[0.08,0.10,0.15]
#group_sigmas=[0.09,0.10,0.13]
group_sigmas=[0.05,0.05,0.05]
sigma=group_sigmas[g]
eps=np.random.normal(loc=0,scale=sigma,size=1)[0]
c1=0.89
if w>0:
# pidetään keskiarvo/a1 samana kuin w/a0
wt=a1*np.exp(c1*np.log(w/a0)+eps-0.5*sigma*sigma)
else:
wt=a1*np.exp(eps)
# täysiaikainen vuositulo vähintään self.min_salary
wt=np.maximum(self.min_salary,wt)
return wt
def compute_salary_TK(self,group=1,debug=False):
'''
Alussa ajettava funktio, joka tekee palkat yhtä episodia varten
'''
# TK:n aineisto vuodelta 2018
# iät 20-70
palkat_ika_miehet=12.5*np.array([2339.01,2489.09,2571.40,2632.58,2718.03,2774.21,2884.89,2987.55,3072.40,3198.48,3283.81,3336.51,3437.30,3483.45,3576.67,3623.00,3731.27,3809.58,3853.66,3995.90,4006.16,4028.60,4104.72,4181.51,4134.13,4157.54,4217.15,4165.21,4141.23,4172.14,4121.26,4127.43,4134.00,4093.10,4065.53,4063.17,4085.31,4071.25,4026.50,4031.17,4047.32,4026.96,4028.39,4163.14,4266.42,4488.40,4201.40,4252.15,4443.96,3316.92,3536.03,3536.03])
palkat_ika_naiset=12.5*np.array([2223.96,2257.10,2284.57,2365.57,2443.64,2548.35,2648.06,2712.89,2768.83,2831.99,2896.76,2946.37,2963.84,2993.79,3040.83,3090.43,3142.91,3159.91,3226.95,3272.29,3270.97,3297.32,3333.42,3362.99,3381.84,3342.78,3345.25,3360.21,3324.67,3322.28,3326.72,3326.06,3314.82,3303.73,3302.65,3246.03,3244.65,3248.04,3223.94,3211.96,3167.00,3156.29,3175.23,3228.67,3388.39,3457.17,3400.23,3293.52,2967.68,2702.05,2528.84,2528.84])
g_r=[0.77,1.0,1.23]
#group_ave=np.array([2000,3300,5000,0.85*2000,0.85*3300,0.85*5000])*12
if debug: # flat wages, no change in time, all randomness at initialization
a0=3465.0*12.5 # keskiarvo TK:n aineistossa
self.salary[self.min_age]=np.maximum(self.min_salary,np.random.normal(loc=a0,scale=12*1000,size=1)[0]) # e/y
self.salary[self.min_age+1:self.max_age+1]=self.salary[self.min_age]
else: # randomness and time-development included
if group>2: # naiset
r=g_r[group-3]
a0=palkat_ika_naiset[0]*r
a1=palkat_ika_naiset[0]*r/5
self.salary[self.min_age]=np.maximum(self.min_salary,np.random.normal(loc=a0,scale=a1,size=1)[0]) # e/y
for age in range(self.min_age+1,self.max_age+1):
a0=palkat_ika_naiset[age-1-self.min_age]*r
a1=palkat_ika_naiset[age-self.min_age]*r
self.salary[age]=self.wage_process_TK(self.salary[age-1],age,a0,a1)
else: # miehet
r=g_r[group]
a0=palkat_ika_miehet[0]*r
a1=palkat_ika_miehet[0]*r/5
self.salary[self.min_age]=np.maximum(self.min_salary,np.random.normal(loc=a0,scale=a1,size=1)[0]) # e/y
for age in range(self.min_age+1,self.max_age+1):
a0=palkat_ika_miehet[age-1-self.min_age]*r
a1=palkat_ika_miehet[age-self.min_age]*r
self.salary[age]=self.wage_process_TK(self.salary[age-1],age,a0,a1)
def state_encode_mort(self,emp,g,pension,old_wage,age,time_in_state,paid_pension,pink,
toe,tyohist,next_wage,out_of_work,used_unemp_benefit,wage_reduction,
prefnoise=0):
'''
Tilan koodaus neuroverkkoa varten. Arvot skaalataan ja tilat one-hot-enkoodataan
Käytetään, jos kuolleisuus mukana
'''
if self.include_preferencenoise:
d=np.zeros(self.n_empl+self.n_groups+14)
else:
d=np.zeros(self.n_empl+self.n_groups+13)
states=self.n_empl
if emp==1:
d[0:states]=np.array([0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
elif emp==0:
d[0:states]=np.array([1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
elif emp==2:
d[0:states]=np.array([0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
elif emp==3:
d[0:states]=np.array([0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0])
elif emp==4:
d[0:states]=np.array([0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0])
elif emp==5:
d[0:states]=np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0])
elif emp==6:
d[0:states]=np.array([0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0])
elif emp==7:
d[0:states]=np.array([0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0])
elif emp==8:
d[0:states]=np.array([0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
elif emp==9:
d[0:states]=np.array([0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0])
elif emp==10:
d[0:states]=np.array([0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0])
elif emp==11:
d[0:states]=np.array([0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0])
elif emp==12:
d[0:states]=np.array([0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0])
elif emp==13:
d[0:states]=np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0])
elif emp==14:
d[0:states]=np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0])
elif emp==15:
d[0:states]=np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1])
else:
print('state_encode error '+str(emp))
states2=states+self.n_groups
if g==1:
d[states:states2]=np.array([0,1,0,0,0,0])
elif g==0:
d[states:states2]=np.array([1,0,0,0,0,0])
elif g==2:
d[states:states2]=np.array([0,0,1,0,0,0])
elif g==3:
d[states:states2]=np.array([0,0,0,1,0,0])
elif g==4:
d[states:states2]=np.array([0,0,0,0,1,0])
elif g==5:
d[states:states2]=np.array([0,0,0,0,0,1])
else:
print('state_encode g-error '+str(g))
if self.log_transform:
d[states2]=np.log(pension/20_000+self.eps) # vastainen eläke
d[states2+1]=np.log(old_wage/40_000+self.eps)
d[states2+4]=np.log(paid_pension/20_000+self.eps) # alkanut eläke
d[states2+10]=np.log(next_wage/40_000+self.eps)
else:
d[states2]=(pension-20_000)/10_000 # vastainen eläke
d[states2+1]=(old_wage-40_000)/15_000
d[states2+4]=(paid_pension-20_000)/10_000 # alkanut eläke
d[states2+10]=(next_wage-40_000)/15_000
if tyohist>self.tyohistoria_vaatimus:
hist400=1
else:
hist400=0
d[states2+2]=(age-(self.max_age+self.min_age)/2)/20
d[states2+3]=(time_in_state-3)/10
#if self.include300:
d[states2+5]=pink # irtisanottu vai ei
d[states2+6]=toe-14/12 # työssäoloehto
d[states2+7]=(tyohist-3)/10 # tyohistoria: 300/400 pv
d[states2+8]=hist400
if age>=self.min_retirementage:
retaged=1
else:
retaged=0
d[states2+9]=retaged
#d[states2+11]=(out_of_work-3)/10
d[states2+11]=used_unemp_benefit
d[states2+12]=wage_reduction
if self.include_preferencenoise:
d[states2+13]=prefnoise
#d[states2+10]=lapsia # lapsien lkm
#d[states2+11]=lapsia_paivakodissa # nuorimman lapsen ika
return d
def state_encode_nomort(self,emp,g,pension,old_wage,age,time_in_state,paid_pension,pink,
toe,tyohist,next_wage,out_of_work,used_unemp_benefit,wage_reduction,
prefnoise=0):
'''
Tilan koodaus neuroverkkoa varten. Arvot skaalataan ja tilat one-hot-enkoodataan
Käytetään, jos kuolleisuus ei mukana
'''
if self.include_preferencenoise:
d=np.zeros(self.n_empl+self.n_groups+14)
else:
d=np.zeros(self.n_empl+self.n_groups+13)
states=self.n_empl
# d2=np.zeros(n_empl,1)
# d2[emp]=1
if emp==1:
d[0:states]=np.array([0,1,0,0,0,0,0,0,0,0,0,0,0,0,0])
elif emp==0:
d[0:states]=np.array([1,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
elif emp==2:
d[0:states]=np.array([0,0,1,0,0,0,0,0,0,0,0,0,0,0,0])
elif emp==3:
d[0:states]=np.array([0,0,0,1,0,0,0,0,0,0,0,0,0,0,0])
elif emp==4:
d[0:states]=np.array([0,0,0,0,1,0,0,0,0,0,0,0,0,0,0])
elif emp==5:
d[0:states]=np.array([0,0,0,0,0,1,0,0,0,0,0,0,0,0,0])
elif emp==6:
d[0:states]=np.array([0,0,0,0,0,0,1,0,0,0,0,0,0,0,0])
elif emp==7:
d[0:states]=np.array([0,0,0,0,0,0,0,1,0,0,0,0,0,0,0])
elif emp==8:
d[0:states]=np.array([0,0,0,0,0,0,0,0,1,0,0,0,0,0,0])
elif emp==9:
d[0:states]=np.array([0,0,0,0,0,0,0,0,0,1,0,0,0,0,0])
elif emp==10:
d[0:states]=np.array([0,0,0,0,0,0,0,0,0,0,1,0,0,0,0])
elif emp==11:
d[0:states]=np.array([0,0,0,0,0,0,0,0,0,0,0,1,0,0,0])
elif emp==12:
d[0:states]=np.array([0,0,0,0,0,0,0,0,0,0,0,0,1,0,0])
elif emp==13:
d[0:states]=np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,1,0])
elif emp==14:
d[0:states]=np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,1])
elif emp==15:
print('no state 15 in state_encode_nomort')
else:
print('state_encode error '+str(emp))
states2=states+self.n_groups
if g==1:
d[states:states2]=np.array([0,1,0,0,0,0])
elif g==0:
d[states:states2]=np.array([1,0,0,0,0,0])
elif g==2:
d[states:states2]=np.array([0,0,1,0,0,0])
elif g==3:
d[states:states2]=np.array([0,0,0,1,0,0])
elif g==4:
d[states:states2]=np.array([0,0,0,0,1,0])
elif g==5:
d[states:states2]=np.array([0,0,0,0,0,1])
else:
print('state_encode g-error '+str(g))
if self.log_transform:
d[states2]=np.log(pension/20_000+self.eps) # vastainen eläke
d[states2+1]=np.log(old_wage/40_000+self.eps)
d[states2+4]=np.log(paid_pension/20_000+self.eps) # alkanut eläke
else:
d[states2]=(pension-20_000)/10_000 # vastainen eläke
d[states2+1]=(old_wage-40_000)/15_000
d[states2+4]=(paid_pension-20_000)/10_000 # alkanut eläke
d[states2+2]=(age-(self.max_age+self.min_age)/2)/20
d[states2+3]=(time_in_state-3)/10
if age>=self.min_retirementage:
retaged=1
else:
retaged=0
#if self.include300:
d[states2+5]=pink # irtisanottu vai ei
d[states2+6]=toe-14/12 # työssäoloehto
d[states2+7]=(tyohist-3)/10 # tyohistoria: 300/400 pv
if tyohist>self.tyohistoria_vaatimus:
hist400=1
else:
hist400=0
d[states2+8]=hist400
d[states2+9]=retaged
d[states2+10]=(next_wage-40_000)/15_000
#d[states2+11]=(out_of_work-3)/10
d[states2+11]=used_unemp_benefit
d[states2+12]=wage_reduction
if self.include_preferencenoise:
d[states2+13]=prefnoise
return d
def state_decode(self,vec):
'''
Tilan dekoodaus laskentaa varten
Käytetään, jos aina
'''
emp=-1
for k in range(self.n_empl):
if vec[k]>0:
emp=k
break
if emp<0:
print('state error '+str(vec))
g=-1
pos=self.n_empl+self.n_groups
for k in range(self.n_empl,pos):
if vec[k]>0:
g=k-self.n_empl
break
if g<0:
print('state error '+str(vec))
if self.log_transform:
pension=(np.exp(vec[pos])-self.eps)*20_000
wage=(np.exp(vec[pos+1])-self.eps)*40_000
paid_pension=(np.exp(vec[pos+4])-self.eps)*20_000
else:
pension=vec[pos]*10_000+20_000
wage=vec[pos+1]*15_000+40_000
paid_pension=vec[pos+4]*10_000+20_000
age=vec[pos+2]*20+(self.max_age+self.min_age)/2
time_in_state=vec[pos+3]*10+3
#if self.include300:
pink=vec[pos+5] # irtisanottu vai ei
toe=vec[pos+6]+14/12 # työssäoloehto, kesto
tyohist=vec[pos+7]*10+3 # työhistoria
#out_of_work=vec[pos+11]*10+3 # kesto poissa työelämästä
out_of_work=0 # ei tarvita
used_unemp_benefit=vec[pos+11] # käytetty työttömyyspäivärahapäivien määrä
wage_reduction=vec[pos+12] # käytetty työttömyyspäivärahapäivien määrä
if self.include_preferencenoise:
prefnoise=vec[pos+13]
else:
prefnoise=0
return int(emp),int(g),pension,wage,age,time_in_state,paid_pension,int(pink),toe,\
tyohist,out_of_work,used_unemp_benefit,wage_reduction,prefnoise
def reset(self,init=None):
'''
Open AI-interfacen mukainen reset-funktio, joka nollaa laskennan alkutilaan
'''
age=int(self.min_age)
pension=0
time_in_state=0
pink=0
toe=0
tyohist=0
# set up salary for the entire career
g=random.choices(np.array([0,1,2],dtype=int),weights=[0.3,0.5,0.2])[0]
gender=random.choices(np.array([0,1],dtype=int),weights=[0.5,0.5])[0]
group=int(g+gender*3)
self.compute_salary_TK(group=group)
old_wage=self.salary[self.min_age]
next_wage=old_wage # timestep < 1.0 year, hence ok
out_of_w=0
used_unemp_benefit=0
wage_reduction=0
if gender==0: # miehet
employment_state=random.choices(np.array([13,0,1,10,3,11,12,14],dtype=int),weights=[0.133*3/5,0.133*2/5,0.68*0.374,0.32*0.374,0.014412417,0.151,0.240,0.089])[0]
else: # naiset
employment_state=random.choices(np.array([13,0,1,10,3,11,12,14],dtype=int),weights=[0.073*3/5,0.073*2/5,0.44*0.550,0.56*0.550,0.0121151,0.077,0.283,0.00362])[0]
if employment_state==0:
tyohist=1.0
toe=1.0
wage_reduction=0.05
elif employment_state==13:
tyohist=0.0
toe=0.0
wage_reduction=0.05
elif employment_state==11:
tyohist=0.0
toe=0.0
wage_reduction=0.10
#elif employment_state==12:
# wage_reduction=0.25
# tarvitseeko alkutilassa laskea muita tietoja uusiksi? ei kait
if self.plotdebug:
print('emp {} gender {} g {} old_wage {} next_wage {}'.format(employment_state,gender,g,old_wage,next_wage))
if self.include_preferencenoise:
prefnoise=np.random.normal(loc=0,scale=0.1,size=1)[0]
self.state = self.state_encode(employment_state,group,pension,old_wage,self.min_age,
time_in_state,0,pink,toe,tyohist,next_wage,out_of_w,
used_unemp_benefit,wage_reduction,prefnoise=prefnoise)
else:
self.state = self.state_encode(employment_state,group,pension,old_wage,self.min_age,
time_in_state,0,pink,toe,tyohist,next_wage,out_of_w,
used_unemp_benefit,wage_reduction)
self.steps_beyond_done = None
return np.array(self.state)
def render(self, mode='human', close=False, done=False, reward=None, netto=None):
'''
Tulostus-rutiini
'''
emp,g,pension,wage,age,time_in_state,paid_pension,pink,toe,tyohist,out_of_work,used_unemp_benefit,wage_red,prefnoise=self.state_decode(self.state)
if reward is None:
print('Tila {} ryhmä {} palkka {:.2f} ikä {} t-i-s {} tul.eläke {:.2f} alk.eläke {:.2f} irtisanottu {} toe {:.2f} työhist {:.2f} o-o-w {:.2f} ueb {:.2f} wr {}'.format(\
emp,g,wage,age,time_in_state,pension,paid_pension,pink,toe,tyohist,out_of_work,used_unemp_benefit,wage_red))
elif netto is None:
print('Tila {} ryhmä {} palkka {:.2f} ikä {} t-i-s {} tul.eläke {:.2f} alk.eläke {:.2f} irtisanottu {} toe {:.2f} työhist {:.2f} o-o-w {:.2f} ueb {:.2f} wr {} r {:.4f}'.format(\
emp,g,wage,age,time_in_state,pension,paid_pension,pink,toe,tyohist,out_of_work,used_unemp_benefit,wage_red,reward))
else:
print('Tila {} ryhmä {} palkka {:.2f} ikä {} t-i-s {} tul.eläke {:.2f} alk.eläke {:.2f} irtisanottu {} toe {:.2f} työhist {:.2f} o-o-w {:.2f} ueb {:.2f} wr {} r {:.4f} n {:.2f}'.format(\
emp,g,wage,age,time_in_state,pension,paid_pension,pink,toe,tyohist,out_of_work,used_unemp_benefit,wage_red,reward,netto))
if done:
print('-------------------------------------------------------------------------------------------------------')
def close(self):
'''
Ei käytössä
'''
if self.viewer:
self.viewer.close()
self.viewer = None
def set_state_limits(self,debug=True):
'''
Rajat tiloille
'''
if self.log_transform:
pension_min=np.log(0/20_000+self.eps) # vastainen eläke
pension_max=np.log(200_000/20_000+self.eps) # vastainen eläke
wage_max=np.log(500_000/40_000+self.eps)
wage_min=np.log(0/40_000+self.eps)
paid_pension_max=np.log(200_00/20_000+self.eps) # alkanut eläke
paid_pension_min=np.log(0/20_000+self.eps) # alkanut eläke
else:
pension_max=(200_000-20_000)/10_000 # vastainen eläke
pension_min=(0-20_000)/10_000 # vastainen eläke
wage_max=(500_000-40_000)/15_000
wage_min=(0-40_000)/15_000
paid_pension_min=(0-20_000)/10_000 # alkanut eläke
paid_pension_max=(200_000-20_000)/10_000 # alkanut eläke
age_max=(self.max_age-(self.max_age+self.min_age)/2)/20
age_min=(self.min_age-(self.max_age+self.min_age)/2)/20
tis_max=(self.max_age-self.min_age-3)/10
tis_min=-3/10
pink_min=0 # irtisanottu vai ei
pink_max=1 # irtisanottu vai ei
toe_min=0-self.max_toe*0.5 # työssäoloehto
toe_max=self.max_toe-self.max_toe*0.5 # työssäoloehto
thist_min=-3/10 # tyohistoria: 300/400 pv
thist_max=(self.max_age-self.min_age-3)/10 # tyohistoria: 300/400 pv
out_max=100
out_min=0
group_min=0
group_max=1
state_min=0
state_max=1
ben_min=0
ben_max=3
wr_min=0
wr_max=1
pref_min=-5
pref_max=5
# korjaa
low = [
state_min,
state_min,
state_min,
state_min,
state_min,
state_min,
state_min,
state_min,
state_min,
state_min,
state_min,
state_min,
state_min,
state_min,
state_min,
group_min,
group_min,
group_min,
group_min,
group_min,
group_min,
pension_min,
wage_min,
age_min,
tis_min,
paid_pension_min,
pink_min,
toe_min,
thist_min,
state_min,
state_min,
wage_min,
#out_min,
ben_min,
wr_min]
high = [
state_max,
state_max,
state_max,
state_max,
state_max,
state_max,
state_max,
state_max,
state_max,
state_max,
state_max,
state_max,
state_max,
state_max,
state_max,
group_max,
group_max,
group_max,
group_max,
group_max,
group_max,
pension_max,
wage_max,
age_max,
tis_max,
paid_pension_max,
pink_max,
toe_max,
thist_max,
state_max,
state_max,
wage_max,
#out_max,
ben_max,
wr_max]
if self.include_mort: # if mortality is included, add one more state
low.prepend(state_min)
high.prepend(state_max)
if self.include_preferencenoise:
low.append(pref_min)
high.append(pref_max)
self.low=np.array(low)
self.high=np.array(high)
def explain(self):
'''
Tulosta laskennan parametrit
'''
print('Parameters of lifecycle:\ntimestep {}\ngamma {} ({} per anno)\nmin_age {}\nmax_age {}\nmin_retirementage {}'.format(self.timestep,self.gamma,self.gamma**(1.0/self.timestep),self.min_age,self.max_age,self.min_retirementage))
print('max_retirementage {}\nansiopvraha_kesto300 {}\nansiopvraha_kesto400 {}\nansiopvraha_toe {}'.format(self.max_retirementage,self.ansiopvraha_kesto300,self.ansiopvraha_kesto400,self.ansiopvraha_toe))
print('perustulo {}\nkarenssi_kesto {}\nmortality {}\nrandomness {}'.format(self.perustulo,self.karenssi_kesto,self.include_mort,self.randomness))
print('include_putki {}\ninclude_pinkslip {}\n'.format(self.include_putki,self.include_pinkslip))
def unempright_left(self,emp,tis,bu,ika,tyohistoria,oof):
'''
Tilastointia varten lasketaan jäljellä olevat ansiosidonnaiset työttömyysturvapäivät
'''
if ika>=self.minage_500 and tyohistoria>=self.tyohistoria_vaatimus500:
kesto=self.ansiopvraha_kesto500
elif tyohistoria>=self.tyohistoria_vaatimus:
kesto=self.ansiopvraha_kesto400
else:
kesto=self.ansiopvraha_kesto300
kesto=kesto/(12*21.5)
#if irtisanottu<1 and time_in_state<self.karenssi_kesto: # karenssi, jos ei irtisanottu
if emp==13:
return oof
else:
return kesto-bu
| [
"numpy.random.uniform",
"numpy.random.seed",
"numpy.maximum",
"fin_benefits.BasicIncomeBenefits",
"numpy.log",
"gym.logger.warn",
"numpy.floor",
"gym.spaces.Discrete",
"numpy.zeros",
"numpy.ones",
"numpy.array",
"gym.spaces.Box",
"fin_benefits.Benefits",
"numpy.arange",
"numpy.random.nor... | [((8933, 8959), 'numpy.zeros', 'np.zeros', (['(self.max_age + 1)'], {}), '(self.max_age + 1)\n', (8941, 8959), True, 'import numpy as np\n'), ((11745, 11763), 'gym.spaces.Discrete', 'spaces.Discrete', (['(4)'], {}), '(4)\n', (11760, 11763), False, 'from gym import spaces, logger, utils, error\n'), ((11797, 11846), 'gym.spaces.Box', 'spaces.Box', (['self.low', 'self.high'], {'dtype': 'np.float32'}), '(self.low, self.high, dtype=np.float32)\n', (11807, 11846), False, 'from gym import spaces, logger, utils, error\n'), ((13098, 13121), 'numpy.zeros', 'np.zeros', (['self.n_groups'], {}), '(self.n_groups)\n', (13106, 13121), True, 'import numpy as np\n'), ((13135, 13158), 'numpy.zeros', 'np.zeros', (['self.n_groups'], {}), '(self.n_groups)\n', (13143, 13158), True, 'import numpy as np\n'), ((21221, 21244), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (21238, 21244), False, 'from gym.utils import seeding\n'), ((21380, 21400), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (21394, 21400), True, 'import numpy as np\n'), ((21550, 21580), 'numpy.zeros', 'np.zeros', (['(101, self.n_groups)'], {}), '((101, self.n_groups))\n', (21558, 21580), True, 'import numpy as np\n'), ((23284, 23314), 'numpy.zeros', 'np.zeros', (['(101, self.n_groups)'], {}), '((101, self.n_groups))\n', (23292, 23314), True, 'import numpy as np\n'), ((23332, 23804), 'numpy.array', 'np.array', (['[0.15202, 0.09165, 0.08517, 0.07565, 0.05787, 0.04162, 0.03061, 0.02336, \n 0.01803, 0.01439, 0.03214, 0.02674, 0.02122, 0.02005, 0.01776, 0.0161, \n 0.0149, 0.01433, 0.01307, 0.01175, 0.01081, 0.01069, 0.00921, 0.00832, \n 0.00808, 0.00783, 0.00738, 0.00727, 0.00712, 0.00621, 0.00578, 0.0054, \n 0.00505, 0.00411, 0.00434, 0.00392, 0.00415, 0.00362, 0.00279, 0.00232,\n 0.00184, 0.00196, 0.00126, 0.00239, 0.00402, 0.00587, 0.00587, 0.00754,\n 0, 0]'], {}), '([0.15202, 0.09165, 0.08517, 0.07565, 0.05787, 0.04162, 0.03061, \n 0.02336, 0.01803, 0.01439, 0.03214, 0.02674, 0.02122, 0.02005, 0.01776,\n 0.0161, 0.0149, 0.01433, 0.01307, 0.01175, 0.01081, 0.01069, 0.00921, \n 0.00832, 0.00808, 0.00783, 0.00738, 0.00727, 0.00712, 0.00621, 0.00578,\n 0.0054, 0.00505, 0.00411, 0.00434, 0.00392, 0.00415, 0.00362, 0.00279, \n 0.00232, 0.00184, 0.00196, 0.00126, 0.00239, 0.00402, 0.00587, 0.00587,\n 0.00754, 0, 0])\n', (23340, 23804), True, 'import numpy as np\n'), ((23800, 24271), 'numpy.array', 'np.array', (['[0.12538, 0.09262, 0.08467, 0.06923, 0.05144, 0.03959, 0.03101, 0.0243, \n 0.02103, 0.01834, 0.03984, 0.03576, 0.033, 0.03115, 0.02934, 0.02777, \n 0.02454, 0.02261, 0.02127, 0.01865, 0.01711, 0.01631, 0.01496, 0.01325,\n 0.01251, 0.01158, 0.01148, 0.01034, 0.00935, 0.00911, 0.00848, 0.00674,\n 0.00636, 0.00642, 0.00605, 0.00517, 0.00501, 0.00392, 0.0033, 0.00291, \n 0.00202, 0.00155, 0.00118, 0.00193, 0.00376, 0.00567, 0.00779, 0.00746,\n 0, 0]'], {}), '([0.12538, 0.09262, 0.08467, 0.06923, 0.05144, 0.03959, 0.03101, \n 0.0243, 0.02103, 0.01834, 0.03984, 0.03576, 0.033, 0.03115, 0.02934, \n 0.02777, 0.02454, 0.02261, 0.02127, 0.01865, 0.01711, 0.01631, 0.01496,\n 0.01325, 0.01251, 0.01158, 0.01148, 0.01034, 0.00935, 0.00911, 0.00848,\n 0.00674, 0.00636, 0.00642, 0.00605, 0.00517, 0.00501, 0.00392, 0.0033, \n 0.00291, 0.00202, 0.00155, 0.00118, 0.00193, 0.00376, 0.00567, 0.00779,\n 0.00746, 0, 0])\n', (23808, 24271), True, 'import numpy as np\n'), ((24476, 24506), 'numpy.zeros', 'np.zeros', (['(101, self.n_groups)'], {}), '((101, self.n_groups))\n', (24484, 24506), True, 'import numpy as np\n'), ((24526, 25002), 'numpy.array', 'np.array', (['[0.2, 0.2, 0.27503, 0.38096, 0.43268, 0.42941, 0.41466, 0.40854, 0.38759, \n 0.30057, 0.66059, 0.69549, 0.55428, 0.61274, 0.58602, 0.57329, 0.53688,\n 0.58737, 0.59576, 0.5819, 0.50682, 0.63749, 0.59542, 0.53201, 0.53429, \n 0.55827, 0.51792, 0.52038, 0.63078, 0.57287, 0.57201, 0.56673, 0.6929, \n 0.44986, 0.60497, 0.4589, 0.64129, 0.73762, 0.68664, 0.73908, 0.47708, \n 0.92437, 0.27979, 0.54998, 0.60635, 0.72281, 0.45596, 0.4812, 0.41834, \n 0.55567]'], {}), '([0.2, 0.2, 0.27503, 0.38096, 0.43268, 0.42941, 0.41466, 0.40854, \n 0.38759, 0.30057, 0.66059, 0.69549, 0.55428, 0.61274, 0.58602, 0.57329,\n 0.53688, 0.58737, 0.59576, 0.5819, 0.50682, 0.63749, 0.59542, 0.53201, \n 0.53429, 0.55827, 0.51792, 0.52038, 0.63078, 0.57287, 0.57201, 0.56673,\n 0.6929, 0.44986, 0.60497, 0.4589, 0.64129, 0.73762, 0.68664, 0.73908, \n 0.47708, 0.92437, 0.27979, 0.54998, 0.60635, 0.72281, 0.45596, 0.4812, \n 0.41834, 0.55567])\n', (24534, 25002), True, 'import numpy as np\n'), ((24958, 25648), 'numpy.array', 'np.array', (['[0.2, 0.226044511, 0.34859165, 0.404346193, 0.378947854, 0.379027678, \n 0.393658729, 0.312799282, 0.312126148, 0.325150199, 0.5946454, \n 0.564144808, 0.555376244, 0.556615568, 0.545757439, 0.61520002, \n 0.577306728, 0.558805476, 0.618014582, 0.584596312, 0.542579298, \n 0.581755996, 0.612559266, 0.559683811, 0.577041852, 0.51024909, \n 0.602288269, 0.594473782, 0.529303275, 0.573062208, 0.709297989, \n 0.559692954, 0.499632245, 0.560546551, 0.654820741, 0.547514252, \n 0.728319756, 0.668454496, 0.637200351, 0.832907039, 0.763936815, \n 0.823014939, 0.439925972, 0.400593267, 0.57729364, 0.432838681, \n 0.720728303, 0.45569566, 0.756655823, 0.210470698]'], {}), '([0.2, 0.226044511, 0.34859165, 0.404346193, 0.378947854, \n 0.379027678, 0.393658729, 0.312799282, 0.312126148, 0.325150199, \n 0.5946454, 0.564144808, 0.555376244, 0.556615568, 0.545757439, \n 0.61520002, 0.577306728, 0.558805476, 0.618014582, 0.584596312, \n 0.542579298, 0.581755996, 0.612559266, 0.559683811, 0.577041852, \n 0.51024909, 0.602288269, 0.594473782, 0.529303275, 0.573062208, \n 0.709297989, 0.559692954, 0.499632245, 0.560546551, 0.654820741, \n 0.547514252, 0.728319756, 0.668454496, 0.637200351, 0.832907039, \n 0.763936815, 0.823014939, 0.439925972, 0.400593267, 0.57729364, \n 0.432838681, 0.720728303, 0.45569566, 0.756655823, 0.210470698])\n', (24966, 25648), True, 'import numpy as np\n'), ((25941, 25971), 'numpy.zeros', 'np.zeros', (['(101, self.n_groups)'], {}), '((101, self.n_groups))\n', (25949, 25971), True, 'import numpy as np\n'), ((26017, 26471), 'numpy.array', 'np.array', (['[0.00598, 0.00236, 0.00195, 0.00179, 0.00222, 0.0015, 0.00363, 0.00142, \n 0.00138, 0.00149, 0.00561, 0.0014, 0.00291, 0.0039, 0.0013, 0.00548, \n 0.0012, 0.00476, 0.00118, 0.00315, 0.00111, 0.00346, 0.00117, 0.00203, \n 0.00105, 0.00189, 0.00154, 0.00104, 0.00488, 0.00103, 0.00273, 0.00104,\n 0.00375, 0.00108, 0.00314, 0.00256, 0.00188, 0.00115, 0.00115, 0.00112,\n 0.00112, 0.00106, 0.00112, 0.0, 0.0, 0.0, 0.00257, 0.00359, 0, 0]'], {}), '([0.00598, 0.00236, 0.00195, 0.00179, 0.00222, 0.0015, 0.00363, \n 0.00142, 0.00138, 0.00149, 0.00561, 0.0014, 0.00291, 0.0039, 0.0013, \n 0.00548, 0.0012, 0.00476, 0.00118, 0.00315, 0.00111, 0.00346, 0.00117, \n 0.00203, 0.00105, 0.00189, 0.00154, 0.00104, 0.00488, 0.00103, 0.00273,\n 0.00104, 0.00375, 0.00108, 0.00314, 0.00256, 0.00188, 0.00115, 0.00115,\n 0.00112, 0.00112, 0.00106, 0.00112, 0.0, 0.0, 0.0, 0.00257, 0.00359, 0, 0])\n', (26025, 26471), True, 'import numpy as np\n'), ((26436, 26883), 'numpy.array', 'np.array', (['[0.00246, 0.0021, 0.00212, 0.00211, 0.00205, 0.00217, 0.00233, 0.00355, \n 0.00246, 0.00247, 0.00248, 0.00239, 0.00238, 0.00225, 0.00209, 0.00194,\n 0.00179, 0.01151, 0.00823, 0.00802, 0.0099, 0.00515, 0.00418, 0.00644, \n 0.00334, 0.00101, 0.00098, 0.00256, 0.00093, 0.00092, 0.00089, 0.00172,\n 0.00089, 0.00248, 0.00107, 0.0017, 0.00105, 0.00143, 0.0014, 0.00233, \n 0.00108, 0.00104, 0.00112, 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0]'], {}), '([0.00246, 0.0021, 0.00212, 0.00211, 0.00205, 0.00217, 0.00233, \n 0.00355, 0.00246, 0.00247, 0.00248, 0.00239, 0.00238, 0.00225, 0.00209,\n 0.00194, 0.00179, 0.01151, 0.00823, 0.00802, 0.0099, 0.00515, 0.00418, \n 0.00644, 0.00334, 0.00101, 0.00098, 0.00256, 0.00093, 0.00092, 0.00089,\n 0.00172, 0.00089, 0.00248, 0.00107, 0.0017, 0.00105, 0.00143, 0.0014, \n 0.00233, 0.00108, 0.00104, 0.00112, 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0])\n', (26444, 26883), True, 'import numpy as np\n'), ((27093, 27123), 'numpy.zeros', 'np.zeros', (['(101, self.n_groups)'], {}), '((101, self.n_groups))\n', (27101, 27123), True, 'import numpy as np\n'), ((27143, 27551), 'numpy.array', 'np.array', (['[0.54528, 0.21972, 0.05225, 0.08766, 0.02, 0.07014, 0.02, 0.07964, 0.05357,\n 0.02, 0.02, 0.12421, 0.02, 0.02, 0.09464, 0.02, 0.06655, 0.02, 0.04816,\n 0.02, 0.09763, 0.02, 0.02, 0.02, 0.03777, 0.02, 0.02, 0.10725, 0.02, \n 0.05159, 0.02, 0.04831, 0.02, 0.08232, 0.02, 0.02, 0.02, 0.02931, \n 0.07298, 0.05129, 0.11783, 0.07846, 0.45489, 0.58986, 0.15937, 0.43817,\n 0.0, 0.0, 0.25798, 0.0]'], {}), '([0.54528, 0.21972, 0.05225, 0.08766, 0.02, 0.07014, 0.02, 0.07964,\n 0.05357, 0.02, 0.02, 0.12421, 0.02, 0.02, 0.09464, 0.02, 0.06655, 0.02,\n 0.04816, 0.02, 0.09763, 0.02, 0.02, 0.02, 0.03777, 0.02, 0.02, 0.10725,\n 0.02, 0.05159, 0.02, 0.04831, 0.02, 0.08232, 0.02, 0.02, 0.02, 0.02931,\n 0.07298, 0.05129, 0.11783, 0.07846, 0.45489, 0.58986, 0.15937, 0.43817,\n 0.0, 0.0, 0.25798, 0.0])\n', (27151, 27551), True, 'import numpy as np\n'), ((27575, 28059), 'numpy.array', 'np.array', (['[0.47839484, 0.190435122, 0.12086902, 0.081182033, 0.030748876, 0.184119897,\n 0.075833908, 0.02, 0.029741112, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, \n 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.032506855,\n 0.026333043, 0.02, 0.023692146, 0.050057587, 0.037561449, 0.02, \n 0.024524018, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.092785925, \n 0.054435714, 0.439187202, 0.465046705, 0.39008036, 0.384356347, \n 0.169971142, 0.031645066, 0, 0]'], {}), '([0.47839484, 0.190435122, 0.12086902, 0.081182033, 0.030748876, \n 0.184119897, 0.075833908, 0.02, 0.029741112, 0.02, 0.02, 0.02, 0.02, \n 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02,\n 0.032506855, 0.026333043, 0.02, 0.023692146, 0.050057587, 0.037561449, \n 0.02, 0.024524018, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, \n 0.092785925, 0.054435714, 0.439187202, 0.465046705, 0.39008036, \n 0.384356347, 0.169971142, 0.031645066, 0, 0])\n', (27583, 28059), True, 'import numpy as np\n'), ((28394, 28424), 'numpy.zeros', 'np.zeros', (['(101, self.n_groups)'], {}), '((101, self.n_groups))\n', (28402, 28424), True, 'import numpy as np\n'), ((29316, 29766), 'numpy.array', 'np.array', (['[0.00578, 0.00226, 0.00187, 0.0017, 0.00153, 0.00143, 0.00137, 0.00134, \n 0.0013, 0.00129, 0.0021, 0.00132, 0.00348, 0.00358, 0.00123, 0.00312, \n 0.00112, 0.00368, 0.00109, 0.00162, 0.00103, 0.00271, 0.00095, 0.00252,\n 0.00095, 0.00094, 0.00093, 0.00093, 0.004, 0.00093, 0.00342, 0.00097, \n 0.0037, 0.00099, 0.00259, 0.00221, 0.00244, 0.00106, 0.00102, 0.00101, \n 0.00099, 0.00098, 0.00095, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.00578, 0.00226, 0.00187, 0.0017, 0.00153, 0.00143, 0.00137, \n 0.00134, 0.0013, 0.00129, 0.0021, 0.00132, 0.00348, 0.00358, 0.00123, \n 0.00312, 0.00112, 0.00368, 0.00109, 0.00162, 0.00103, 0.00271, 0.00095,\n 0.00252, 0.00095, 0.00094, 0.00093, 0.00093, 0.004, 0.00093, 0.00342, \n 0.00097, 0.0037, 0.00099, 0.00259, 0.00221, 0.00244, 0.00106, 0.00102, \n 0.00101, 0.00099, 0.00098, 0.00095, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (29324, 29766), True, 'import numpy as np\n'), ((30583, 31033), 'numpy.array', 'np.array', (['[0.00236, 0.00203, 0.00205, 0.00206, 0.00198, 0.0021, 0.00228, 0.00227, \n 0.00241, 0.00241, 0.00242, 0.00234, 0.00231, 0.00217, 0.00202, 0.00187,\n 0.01046, 0.00997, 0.00293, 0.00918, 0.00231, 0.00401, 0.0085, 0.00266, \n 0.00394, 0.00172, 0.00089, 0.00262, 0.00113, 0.00084, 0.00083, 0.00142,\n 0.0008, 0.00295, 0.00187, 0.00086, 0.00118, 0.00089, 0.00166, 0.001, \n 0.00094, 0.00092, 0.00097, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.00236, 0.00203, 0.00205, 0.00206, 0.00198, 0.0021, 0.00228, \n 0.00227, 0.00241, 0.00241, 0.00242, 0.00234, 0.00231, 0.00217, 0.00202,\n 0.00187, 0.01046, 0.00997, 0.00293, 0.00918, 0.00231, 0.00401, 0.0085, \n 0.00266, 0.00394, 0.00172, 0.00089, 0.00262, 0.00113, 0.00084, 0.00083,\n 0.00142, 0.0008, 0.00295, 0.00187, 0.00086, 0.00118, 0.00089, 0.00166, \n 0.001, 0.00094, 0.00092, 0.00097, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (30591, 31033), True, 'import numpy as np\n'), ((31244, 31274), 'numpy.zeros', 'np.zeros', (['(101, self.n_groups)'], {}), '((101, self.n_groups))\n', (31252, 31274), True, 'import numpy as np\n'), ((32160, 32595), 'numpy.array', 'np.array', (['[0.54333, 0.18208, 0.09452, 0.06729, 0.05128, 0.03646, 0.02952, 0.04198, \n 0.02505, 0.02711, 0.02, 0.07864, 0.02, 0.02, 0.09971, 0.02, 0.05071, \n 0.02, 0.03236, 0.02, 0.09185, 0.02, 0.03203, 0.02, 0.03167, 0.03064, \n 0.02161, 0.0848, 0.02, 0.04616, 0.02, 0.06061, 0.02, 0.08866, 0.02, \n 0.02, 0.02, 0.07034, 0.04439, 0.08118, 0.06923, 0.16061, 0.51689, \n 0.5598, 0.2331, 0.25554, 0.01519, 0.12491, 0.06625, 0.0]'], {}), '([0.54333, 0.18208, 0.09452, 0.06729, 0.05128, 0.03646, 0.02952, \n 0.04198, 0.02505, 0.02711, 0.02, 0.07864, 0.02, 0.02, 0.09971, 0.02, \n 0.05071, 0.02, 0.03236, 0.02, 0.09185, 0.02, 0.03203, 0.02, 0.03167, \n 0.03064, 0.02161, 0.0848, 0.02, 0.04616, 0.02, 0.06061, 0.02, 0.08866, \n 0.02, 0.02, 0.02, 0.07034, 0.04439, 0.08118, 0.06923, 0.16061, 0.51689,\n 0.5598, 0.2331, 0.25554, 0.01519, 0.12491, 0.06625, 0.0])\n', (32168, 32595), True, 'import numpy as np\n'), ((33456, 33948), 'numpy.array', 'np.array', (['[0.371419539, 0.205661569, 0.135265873, 0.102702654, 0.055240889, \n 0.048992378, 0.107111533, 0.059592465, 0.032056939, 0.02, 0.02, 0.02, \n 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02,\n 0.02, 0.02, 0.06991043, 0.02, 0.02, 0.036157545, 0.070163829, 0.02, \n 0.032241992, 0.02, 0.02, 0.02027967, 0.02, 0.043477638, 0.02, 0.02, \n 0.080038155, 0.071876772, 0.477291934, 0.454819524, 0.428913696, \n 0.287380262, 0.140803001, 0.054164949, 0, 0]'], {}), '([0.371419539, 0.205661569, 0.135265873, 0.102702654, 0.055240889, \n 0.048992378, 0.107111533, 0.059592465, 0.032056939, 0.02, 0.02, 0.02, \n 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02,\n 0.02, 0.02, 0.06991043, 0.02, 0.02, 0.036157545, 0.070163829, 0.02, \n 0.032241992, 0.02, 0.02, 0.02027967, 0.02, 0.043477638, 0.02, 0.02, \n 0.080038155, 0.071876772, 0.477291934, 0.454819524, 0.428913696, \n 0.287380262, 0.140803001, 0.054164949, 0, 0])\n', (33464, 33948), True, 'import numpy as np\n'), ((34279, 34309), 'numpy.zeros', 'np.zeros', (['(101, self.n_groups)'], {}), '((101, self.n_groups))\n', (34287, 34309), True, 'import numpy as np\n'), ((34329, 34593), 'numpy.array', 'np.array', (['[0.826082957, 0.593698994, 0.366283368, 0.43758429, 0.219910436, \n 0.367689675, 0.111588214, 0.234498521, 0.5, 0.96438943, 1, 1, 1, 1, 1, \n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]'], {}), '([0.826082957, 0.593698994, 0.366283368, 0.43758429, 0.219910436, \n 0.367689675, 0.111588214, 0.234498521, 0.5, 0.96438943, 1, 1, 1, 1, 1, \n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n', (34337, 34593), True, 'import numpy as np\n'), ((34551, 34759), 'numpy.array', 'np.array', (['[0.506854911, 0.619103706, 0.181591468, 0.518294319, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]'], {}), '([0.506854911, 0.619103706, 0.181591468, 0.518294319, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n', (34559, 34759), True, 'import numpy as np\n'), ((35189, 35232), 'numpy.zeros', 'np.zeros', (['(self.max_age + 1, self.n_groups)'], {}), '((self.max_age + 1, self.n_groups))\n', (35197, 35232), True, 'import numpy as np\n'), ((35651, 36304), 'numpy.array', 'np.array', (['[0.004697942, 0.004435302, 0.003631736, 0.003141361, 0.003457091, \n 0.003005607, 0.002905609, 0.003029283, 0.002289213, 0.002137714, \n 0.001854558, 0.002813517, 0.002607335, 0.00292628, 0.002937462, \n 0.002784612, 0.002846377, 0.002776506, 0.003017675, 0.003129845, \n 0.003349059, 0.002991577, 0.00305634, 0.003446143, 0.003633971, \n 0.004045113, 0.004002001, 0.004517725, 0.005527525, 0.005565513, \n 0.006319492, 0.007399175, 0.00731299, 0.009142823, 0.010254463, \n 0.011784364, 0.013783743, 0.015299156, 0.018282001, 0.024051257, \n 0.032338044, 0.028290544, 0.019444444, 0.00454486, 0.000330718, 0, 0, 0,\n 0, 0, 0]'], {}), '([0.004697942, 0.004435302, 0.003631736, 0.003141361, 0.003457091, \n 0.003005607, 0.002905609, 0.003029283, 0.002289213, 0.002137714, \n 0.001854558, 0.002813517, 0.002607335, 0.00292628, 0.002937462, \n 0.002784612, 0.002846377, 0.002776506, 0.003017675, 0.003129845, \n 0.003349059, 0.002991577, 0.00305634, 0.003446143, 0.003633971, \n 0.004045113, 0.004002001, 0.004517725, 0.005527525, 0.005565513, \n 0.006319492, 0.007399175, 0.00731299, 0.009142823, 0.010254463, \n 0.011784364, 0.013783743, 0.015299156, 0.018282001, 0.024051257, \n 0.032338044, 0.028290544, 0.019444444, 0.00454486, 0.000330718, 0, 0, 0,\n 0, 0, 0])\n', (35659, 36304), True, 'import numpy as np\n'), ((36230, 36884), 'numpy.array', 'np.array', (['[0.00532654, 0.004917401, 0.00453191, 0.003799551, 0.003253733, 0.003092307,\n 0.002822592, 0.003309772, 0.002482279, 0.002615887, 0.002416545, \n 0.003546203, 0.002665276, 0.003095104, 0.003129633, 0.003406418, \n 0.003171677, 0.003320357, 0.003391292, 0.004007371, 0.004310094, \n 0.00438571, 0.004267343, 0.004889399, 0.005043702, 0.005793425, \n 0.005569451, 0.006298434, 0.006363081, 0.007043361, 0.009389811, \n 0.007457667, 0.009251373, 0.011154836, 0.009524088, 0.013689796, \n 0.014658423, 0.017440417, 0.022804727, 0.02677838, 0.037438459, \n 0.034691279, 0.022649573, 0.004414073, 0.000264568, 0, 0, 0, 0, 0, 0]'], {}), '([0.00532654, 0.004917401, 0.00453191, 0.003799551, 0.003253733, \n 0.003092307, 0.002822592, 0.003309772, 0.002482279, 0.002615887, \n 0.002416545, 0.003546203, 0.002665276, 0.003095104, 0.003129633, \n 0.003406418, 0.003171677, 0.003320357, 0.003391292, 0.004007371, \n 0.004310094, 0.00438571, 0.004267343, 0.004889399, 0.005043702, \n 0.005793425, 0.005569451, 0.006298434, 0.006363081, 0.007043361, \n 0.009389811, 0.007457667, 0.009251373, 0.011154836, 0.009524088, \n 0.013689796, 0.014658423, 0.017440417, 0.022804727, 0.02677838, \n 0.037438459, 0.034691279, 0.022649573, 0.004414073, 0.000264568, 0, 0, \n 0, 0, 0, 0])\n', (36238, 36884), True, 'import numpy as np\n'), ((36899, 36940), 'numpy.maximum', 'np.maximum', (['dis_miehet[41:51]', '(0.02829054)'], {}), '(dis_miehet[41:51], 0.02829054)\n', (36909, 36940), True, 'import numpy as np\n'), ((36966, 37007), 'numpy.maximum', 'np.maximum', (['dis_naiset[41:51]', '(0.03469128)'], {}), '(dis_naiset[41:51], 0.03469128)\n', (36976, 37007), True, 'import numpy as np\n'), ((37554, 37597), 'numpy.zeros', 'np.zeros', (['(self.max_age + 1, self.n_groups)'], {}), '((self.max_age + 1, self.n_groups))\n', (37562, 37597), True, 'import numpy as np\n'), ((38017, 38641), 'numpy.array', 'np.array', (['[0.0068168, 0.003341014, 0, 0.004279685, 0.001118673, 0.001802593, \n 0.00217149, 0, 0, 0.002157641, 0, 0.002545172, 0, 0.002960375, \n 0.000767293, 0, 0.002265829, 0.000286527, 0, 0.004899931, 0, \n 0.000677208, 0.001155069, 0.003796412, 0.004896709, 0.001921327, \n 0.004668376, 0.004630126, 0.002478899, 0.00642266, 0.005795605, \n 0.00558426, 0.008096878, 0.004548654, 0.010179089, 0.016100661, \n 0.015144889, 0.011688053, 0.024563474, 0.036719657, 0.036573355, \n 0.026898066, 0.027508352, 0.024176173, 0.023621633, 0.02058014, \n 0.020290345, 0.0202976, 0.020304995, 0.020282729, 0.020282729]'], {}), '([0.0068168, 0.003341014, 0, 0.004279685, 0.001118673, 0.001802593,\n 0.00217149, 0, 0, 0.002157641, 0, 0.002545172, 0, 0.002960375, \n 0.000767293, 0, 0.002265829, 0.000286527, 0, 0.004899931, 0, \n 0.000677208, 0.001155069, 0.003796412, 0.004896709, 0.001921327, \n 0.004668376, 0.004630126, 0.002478899, 0.00642266, 0.005795605, \n 0.00558426, 0.008096878, 0.004548654, 0.010179089, 0.016100661, \n 0.015144889, 0.011688053, 0.024563474, 0.036719657, 0.036573355, \n 0.026898066, 0.027508352, 0.024176173, 0.023621633, 0.02058014, \n 0.020290345, 0.0202976, 0.020304995, 0.020282729, 0.020282729])\n', (38025, 38641), True, 'import numpy as np\n'), ((38572, 39235), 'numpy.array', 'np.array', (['[0.004962318, 0.002850008, 0.004703008, 0, 0.001625749, 0.000940874, \n 0.001050232, 0, 0, 4.34852e-05, 0.003516261, 0, 8.21901e-05, \n 0.002276047, 0.000443789, 0.002472653, 0, 0.001866348, 0.002269429, \n 0.001480588, 0.00139571, 0.002185668, 0.002003531, 0.003662852, \n 0.003271301, 0.003629155, 0.002690071, 0.003977974, 0.005051223, \n 0.00303663, 0.008097507, 0.004912787, 0.005008356, 0.007536173, \n 0.007618452, 0.017496524, 0.012431715, 0.020801345, 0.025163258, \n 0.027521298, 0.039852895, 0.023791604, 0.025422742, 0.02230225, \n 0.021684456, 0.01894045, 0.018676988, 0.018654938, 0.01865384, \n 0.018650795, 0.018650795]'], {}), '([0.004962318, 0.002850008, 0.004703008, 0, 0.001625749, \n 0.000940874, 0.001050232, 0, 0, 4.34852e-05, 0.003516261, 0, \n 8.21901e-05, 0.002276047, 0.000443789, 0.002472653, 0, 0.001866348, \n 0.002269429, 0.001480588, 0.00139571, 0.002185668, 0.002003531, \n 0.003662852, 0.003271301, 0.003629155, 0.002690071, 0.003977974, \n 0.005051223, 0.00303663, 0.008097507, 0.004912787, 0.005008356, \n 0.007536173, 0.007618452, 0.017496524, 0.012431715, 0.020801345, \n 0.025163258, 0.027521298, 0.039852895, 0.023791604, 0.025422742, \n 0.02230225, 0.021684456, 0.01894045, 0.018676988, 0.018654938, \n 0.01865384, 0.018650795, 0.018650795])\n', (38580, 39235), True, 'import numpy as np\n'), ((39567, 39610), 'numpy.zeros', 'np.zeros', (['(self.max_age + 1, self.n_groups)'], {}), '((self.max_age + 1, self.n_groups))\n', (39575, 39610), True, 'import numpy as np\n'), ((111137, 111168), 'numpy.maximum', 'np.maximum', (['self.min_salary', 'wt'], {}), '(self.min_salary, wt)\n', (111147, 111168), True, 'import numpy as np\n'), ((126083, 126103), 'numpy.array', 'np.array', (['self.state'], {}), '(self.state)\n', (126091, 126103), True, 'import numpy as np\n'), ((131316, 131329), 'numpy.array', 'np.array', (['low'], {}), '(low)\n', (131324, 131329), True, 'import numpy as np\n'), ((131348, 131362), 'numpy.array', 'np.array', (['high'], {}), '(high)\n', (131356, 131362), True, 'import numpy as np\n'), ((9277, 9288), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (9285, 9288), True, 'import numpy as np\n'), ((12313, 12355), 'fin_benefits.BasicIncomeBenefits', 'fin_benefits.BasicIncomeBenefits', ([], {}), '(**kwargs)\n', (12345, 12355), False, 'import fin_benefits\n'), ((12393, 12424), 'fin_benefits.Benefits', 'fin_benefits.Benefits', ([], {}), '(**kwargs)\n', (12414, 12424), False, 'import fin_benefits\n'), ((13261, 13305), 'numpy.arange', 'np.arange', (['(100)', 'self.max_age', '(-self.timestep)'], {}), '(100, self.max_age, -self.timestep)\n', (13270, 13305), True, 'import numpy as np\n'), ((21618, 21658), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n', (21626, 21658), True, 'import numpy as np\n'), ((21688, 21730), 'numpy.array', 'np.array', (['[1.3, 1.0, 0.8, 1.15, 1.0, 0.85]'], {}), '([1.3, 1.0, 0.8, 1.15, 1.0, 0.85])\n', (21696, 21730), True, 'import numpy as np\n'), ((21807, 22505), 'numpy.array', 'np.array', (['[2.12, 0.32, 0.17, 0.07, 0.07, 0.1, 0.0, 0.09, 0.03, 0.13, 0.03, 0.07, 0.1,\n 0.1, 0.1, 0.23, 0.5, 0.52, 0.42, 0.87, 0.79, 0.66, 0.71, 0.69, 0.98, \n 0.8, 0.77, 1.07, 0.97, 0.76, 0.83, 1.03, 0.98, 1.2, 1.03, 0.76, 1.22, \n 1.29, 1.1, 1.26, 1.37, 1.43, 1.71, 2.32, 2.22, 1.89, 2.05, 2.15, 2.71, \n 2.96, 3.52, 3.54, 4.3, 4.34, 5.09, 4.75, 6.17, 5.88, 6.67, 8.0, 9.2, \n 10.52, 10.3, 12.26, 12.74, 13.22, 15.03, 17.24, 18.14, 17.78, 20.35, \n 25.57, 23.53, 26.5, 28.57, 31.87, 34.65, 40.88, 42.43, 52.28, 59.26, \n 62.92, 68.86, 72.7, 94.04, 99.88, 113.11, 128.52, 147.96, 161.89, \n 175.99, 199.39, 212.52, 248.32, 260.47, 284.01, 319.98, 349.28, 301.37,\n 370.17, 370.17]'], {}), '([2.12, 0.32, 0.17, 0.07, 0.07, 0.1, 0.0, 0.09, 0.03, 0.13, 0.03, \n 0.07, 0.1, 0.1, 0.1, 0.23, 0.5, 0.52, 0.42, 0.87, 0.79, 0.66, 0.71, \n 0.69, 0.98, 0.8, 0.77, 1.07, 0.97, 0.76, 0.83, 1.03, 0.98, 1.2, 1.03, \n 0.76, 1.22, 1.29, 1.1, 1.26, 1.37, 1.43, 1.71, 2.32, 2.22, 1.89, 2.05, \n 2.15, 2.71, 2.96, 3.52, 3.54, 4.3, 4.34, 5.09, 4.75, 6.17, 5.88, 6.67, \n 8.0, 9.2, 10.52, 10.3, 12.26, 12.74, 13.22, 15.03, 17.24, 18.14, 17.78,\n 20.35, 25.57, 23.53, 26.5, 28.57, 31.87, 34.65, 40.88, 42.43, 52.28, \n 59.26, 62.92, 68.86, 72.7, 94.04, 99.88, 113.11, 128.52, 147.96, 161.89,\n 175.99, 199.39, 212.52, 248.32, 260.47, 284.01, 319.98, 349.28, 301.37,\n 370.17, 370.17])\n', (21815, 22505), True, 'import numpy as np\n'), ((22482, 23172), 'numpy.array', 'np.array', (['[1.89, 0.3, 0.11, 0.03, 0.14, 0.03, 0.16, 0.07, 0.13, 0.03, 0.0, 0.07, 0.07,\n 0.07, 0.18, 0.14, 0.07, 0.31, 0.31, 0.3, 0.33, 0.26, 0.18, 0.33, 0.56, \n 0.17, 0.32, 0.29, 0.35, 0.24, 0.55, 0.35, 0.23, 0.39, 0.48, 0.38, 0.35,\n 0.8, 0.42, 0.65, 0.5, 0.68, 0.8, 1.12, 0.99, 0.88, 1.13, 1.01, 1.07, \n 1.68, 1.79, 2.16, 1.87, 2.32, 2.67, 2.69, 2.88, 2.86, 3.73, 4.19, 3.66,\n 4.97, 5.2, 5.52, 6.05, 7.17, 7.48, 7.32, 8.88, 10.33, 10.72, 12.77, \n 12.13, 13.3, 16.18, 18.3, 17.5, 24.63, 26.53, 29.88, 32.65, 38.88, \n 46.95, 51.3, 60.0, 64.73, 79.35, 90.94, 105.11, 118.46, 141.44, 155.07,\n 163.11, 198.45, 207.92, 237.21, 254.75, 311.31, 299.59, 356.64, 356.64]'], {}), '([1.89, 0.3, 0.11, 0.03, 0.14, 0.03, 0.16, 0.07, 0.13, 0.03, 0.0, \n 0.07, 0.07, 0.07, 0.18, 0.14, 0.07, 0.31, 0.31, 0.3, 0.33, 0.26, 0.18, \n 0.33, 0.56, 0.17, 0.32, 0.29, 0.35, 0.24, 0.55, 0.35, 0.23, 0.39, 0.48,\n 0.38, 0.35, 0.8, 0.42, 0.65, 0.5, 0.68, 0.8, 1.12, 0.99, 0.88, 1.13, \n 1.01, 1.07, 1.68, 1.79, 2.16, 1.87, 2.32, 2.67, 2.69, 2.88, 2.86, 3.73,\n 4.19, 3.66, 4.97, 5.2, 5.52, 6.05, 7.17, 7.48, 7.32, 8.88, 10.33, 10.72,\n 12.77, 12.13, 13.3, 16.18, 18.3, 17.5, 24.63, 26.53, 29.88, 32.65, \n 38.88, 46.95, 51.3, 60.0, 64.73, 79.35, 90.94, 105.11, 118.46, 141.44, \n 155.07, 163.11, 198.45, 207.92, 237.21, 254.75, 311.31, 299.59, 356.64,\n 356.64])\n', (22490, 23172), True, 'import numpy as np\n'), ((35345, 35385), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n', (35353, 35385), True, 'import numpy as np\n'), ((37710, 37750), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n', (37718, 37750), True, 'import numpy as np\n'), ((37948, 37989), 'numpy.array', 'np.array', (['[1.3, 0.95, 0.6, 1.2, 1.0, 0.9]'], {}), '([1.3, 0.95, 0.6, 1.2, 1.0, 0.9])\n', (37956, 37989), True, 'import numpy as np\n'), ((39646, 39686), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n', (39654, 39686), True, 'import numpy as np\n'), ((39716, 39758), 'numpy.array', 'np.array', (['[0.75, 1.0, 1.25, 0.5, 1.0, 1.5]'], {}), '([0.75, 1.0, 1.25, 0.5, 1.0, 1.5])\n', (39724, 39758), True, 'import numpy as np\n'), ((42111, 42124), 'numpy.floor', 'np.floor', (['age'], {}), '(age)\n', (42119, 42124), True, 'import numpy as np\n'), ((43052, 43065), 'numpy.floor', 'np.floor', (['age'], {}), '(age)\n', (43060, 43065), True, 'import numpy as np\n'), ((43974, 43987), 'numpy.floor', 'np.floor', (['age'], {}), '(age)\n', (43982, 43987), True, 'import numpy as np\n'), ((44887, 44900), 'numpy.floor', 'np.floor', (['age'], {}), '(age)\n', (44895, 44900), True, 'import numpy as np\n'), ((45588, 45601), 'numpy.floor', 'np.floor', (['age'], {}), '(age)\n', (45596, 45601), True, 'import numpy as np\n'), ((52111, 52124), 'numpy.floor', 'np.floor', (['age'], {}), '(age)\n', (52119, 52124), True, 'import numpy as np\n'), ((62795, 62808), 'numpy.floor', 'np.floor', (['age'], {}), '(age)\n', (62803, 62808), True, 'import numpy as np\n'), ((62932, 62957), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(7)'}), '(size=7)\n', (62949, 62957), True, 'import numpy as np\n'), ((65608, 65618), 'numpy.ones', 'np.ones', (['(7)'], {}), '(7)\n', (65615, 65618), True, 'import numpy as np\n'), ((106110, 106130), 'numpy.array', 'np.array', (['self.state'], {}), '(self.state)\n', (106118, 106130), True, 'import numpy as np\n'), ((108814, 108828), 'numpy.log', 'np.log', (['income'], {}), '(income)\n', (108820, 108828), True, 'import numpy as np\n'), ((109213, 109256), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(0.02)', 'size': '(1)'}), '(loc=0, scale=0.02, size=1)\n', (109229, 109256), True, 'import numpy as np\n'), ((109712, 109779), 'numpy.array', 'np.array', (['[2000, 3300, 5000, 0.85 * 2000, 0.85 * 3300, 0.85 * 5000]'], {}), '([2000, 3300, 5000, 0.85 * 2000, 0.85 * 3300, 0.85 * 5000])\n', (109720, 109779), True, 'import numpy as np\n'), ((110827, 110871), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': 'sigma', 'size': '(1)'}), '(loc=0, scale=sigma, size=1)\n', (110843, 110871), True, 'import numpy as np\n'), ((111426, 111922), 'numpy.array', 'np.array', (['[2339.01, 2489.09, 2571.4, 2632.58, 2718.03, 2774.21, 2884.89, 2987.55, \n 3072.4, 3198.48, 3283.81, 3336.51, 3437.3, 3483.45, 3576.67, 3623.0, \n 3731.27, 3809.58, 3853.66, 3995.9, 4006.16, 4028.6, 4104.72, 4181.51, \n 4134.13, 4157.54, 4217.15, 4165.21, 4141.23, 4172.14, 4121.26, 4127.43,\n 4134.0, 4093.1, 4065.53, 4063.17, 4085.31, 4071.25, 4026.5, 4031.17, \n 4047.32, 4026.96, 4028.39, 4163.14, 4266.42, 4488.4, 4201.4, 4252.15, \n 4443.96, 3316.92, 3536.03, 3536.03]'], {}), '([2339.01, 2489.09, 2571.4, 2632.58, 2718.03, 2774.21, 2884.89, \n 2987.55, 3072.4, 3198.48, 3283.81, 3336.51, 3437.3, 3483.45, 3576.67, \n 3623.0, 3731.27, 3809.58, 3853.66, 3995.9, 4006.16, 4028.6, 4104.72, \n 4181.51, 4134.13, 4157.54, 4217.15, 4165.21, 4141.23, 4172.14, 4121.26,\n 4127.43, 4134.0, 4093.1, 4065.53, 4063.17, 4085.31, 4071.25, 4026.5, \n 4031.17, 4047.32, 4026.96, 4028.39, 4163.14, 4266.42, 4488.4, 4201.4, \n 4252.15, 4443.96, 3316.92, 3536.03, 3536.03])\n', (111434, 111922), True, 'import numpy as np\n'), ((111885, 112387), 'numpy.array', 'np.array', (['[2223.96, 2257.1, 2284.57, 2365.57, 2443.64, 2548.35, 2648.06, 2712.89, \n 2768.83, 2831.99, 2896.76, 2946.37, 2963.84, 2993.79, 3040.83, 3090.43,\n 3142.91, 3159.91, 3226.95, 3272.29, 3270.97, 3297.32, 3333.42, 3362.99,\n 3381.84, 3342.78, 3345.25, 3360.21, 3324.67, 3322.28, 3326.72, 3326.06,\n 3314.82, 3303.73, 3302.65, 3246.03, 3244.65, 3248.04, 3223.94, 3211.96,\n 3167.0, 3156.29, 3175.23, 3228.67, 3388.39, 3457.17, 3400.23, 3293.52, \n 2967.68, 2702.05, 2528.84, 2528.84]'], {}), '([2223.96, 2257.1, 2284.57, 2365.57, 2443.64, 2548.35, 2648.06, \n 2712.89, 2768.83, 2831.99, 2896.76, 2946.37, 2963.84, 2993.79, 3040.83,\n 3090.43, 3142.91, 3159.91, 3226.95, 3272.29, 3270.97, 3297.32, 3333.42,\n 3362.99, 3381.84, 3342.78, 3345.25, 3360.21, 3324.67, 3322.28, 3326.72,\n 3326.06, 3314.82, 3303.73, 3302.65, 3246.03, 3244.65, 3248.04, 3223.94,\n 3211.96, 3167.0, 3156.29, 3175.23, 3228.67, 3388.39, 3457.17, 3400.23, \n 3293.52, 2967.68, 2702.05, 2528.84, 2528.84])\n', (111893, 112387), True, 'import numpy as np\n'), ((114347, 114389), 'numpy.zeros', 'np.zeros', (['(self.n_empl + self.n_groups + 14)'], {}), '(self.n_empl + self.n_groups + 14)\n', (114355, 114389), True, 'import numpy as np\n'), ((114414, 114456), 'numpy.zeros', 'np.zeros', (['(self.n_empl + self.n_groups + 13)'], {}), '(self.n_empl + self.n_groups + 13)\n', (114422, 114456), True, 'import numpy as np\n'), ((114524, 114582), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (114532, 114582), True, 'import numpy as np\n'), ((116058, 116086), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 0, 0]'], {}), '([0, 1, 0, 0, 0, 0])\n', (116066, 116086), True, 'import numpy as np\n'), ((116566, 116600), 'numpy.log', 'np.log', (['(pension / 20000 + self.eps)'], {}), '(pension / 20000 + self.eps)\n', (116572, 116600), True, 'import numpy as np\n'), ((116641, 116676), 'numpy.log', 'np.log', (['(old_wage / 40000 + self.eps)'], {}), '(old_wage / 40000 + self.eps)\n', (116647, 116676), True, 'import numpy as np\n'), ((116699, 116738), 'numpy.log', 'np.log', (['(paid_pension / 20000 + self.eps)'], {}), '(paid_pension / 20000 + self.eps)\n', (116705, 116738), True, 'import numpy as np\n'), ((116778, 116814), 'numpy.log', 'np.log', (['(next_wage / 40000 + self.eps)'], {}), '(next_wage / 40000 + self.eps)\n', (116784, 116814), True, 'import numpy as np\n'), ((118403, 118445), 'numpy.zeros', 'np.zeros', (['(self.n_empl + self.n_groups + 14)'], {}), '(self.n_empl + self.n_groups + 14)\n', (118411, 118445), True, 'import numpy as np\n'), ((118470, 118512), 'numpy.zeros', 'np.zeros', (['(self.n_empl + self.n_groups + 13)'], {}), '(self.n_empl + self.n_groups + 13)\n', (118478, 118512), True, 'import numpy as np\n'), ((118644, 118699), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (118652, 118699), True, 'import numpy as np\n'), ((120136, 120164), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 0, 0]'], {}), '([0, 1, 0, 0, 0, 0])\n', (120144, 120164), True, 'import numpy as np\n'), ((120644, 120678), 'numpy.log', 'np.log', (['(pension / 20000 + self.eps)'], {}), '(pension / 20000 + self.eps)\n', (120650, 120678), True, 'import numpy as np\n'), ((120719, 120754), 'numpy.log', 'np.log', (['(old_wage / 40000 + self.eps)'], {}), '(old_wage / 40000 + self.eps)\n', (120725, 120754), True, 'import numpy as np\n'), ((120777, 120816), 'numpy.log', 'np.log', (['(paid_pension / 20000 + self.eps)'], {}), '(paid_pension / 20000 + self.eps)\n', (120783, 120816), True, 'import numpy as np\n'), ((127870, 127898), 'numpy.log', 'np.log', (['(0 / 20000 + self.eps)'], {}), '(0 / 20000 + self.eps)\n', (127876, 127898), True, 'import numpy as np\n'), ((127938, 127971), 'numpy.log', 'np.log', (['(200000 / 20000 + self.eps)'], {}), '(200000 / 20000 + self.eps)\n', (127944, 127971), True, 'import numpy as np\n'), ((128009, 128042), 'numpy.log', 'np.log', (['(500000 / 40000 + self.eps)'], {}), '(500000 / 40000 + self.eps)\n', (128015, 128042), True, 'import numpy as np\n'), ((128062, 128090), 'numpy.log', 'np.log', (['(0 / 40000 + self.eps)'], {}), '(0 / 40000 + self.eps)\n', (128068, 128090), True, 'import numpy as np\n'), ((128117, 128149), 'numpy.log', 'np.log', (['(20000 / 20000 + self.eps)'], {}), '(20000 / 20000 + self.eps)\n', (128123, 128149), True, 'import numpy as np\n'), ((128193, 128221), 'numpy.log', 'np.log', (['(0 / 20000 + self.eps)'], {}), '(0 / 20000 + self.eps)\n', (128199, 128221), True, 'import numpy as np\n'), ((35583, 35623), 'numpy.array', 'np.array', (['[1.2, 0.8, 0.4, 1.1, 0.8, 0.5]'], {}), '([1.2, 0.8, 0.4, 1.1, 0.8, 0.5])\n', (35591, 35623), True, 'import numpy as np\n'), ((50762, 50775), 'numpy.floor', 'np.floor', (['age'], {}), '(age)\n', (50770, 50775), True, 'import numpy as np\n'), ((66300, 66320), 'numpy.array', 'np.array', (['self.state'], {}), '(self.state)\n', (66308, 66320), True, 'import numpy as np\n'), ((105345, 105374), 'numpy.floor', 'np.floor', (['(age + self.timestep)'], {}), '(age + self.timestep)\n', (105353, 105374), True, 'import numpy as np\n'), ((109383, 109394), 'numpy.exp', 'np.exp', (['eps'], {}), '(eps)\n', (109389, 109394), True, 'import numpy as np\n'), ((109863, 109912), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'a0', 'scale': '(12 * 1000)', 'size': '(1)'}), '(loc=a0, scale=12 * 1000, size=1)\n', (109879, 109912), True, 'import numpy as np\n'), ((111053, 111064), 'numpy.exp', 'np.exp', (['eps'], {}), '(eps)\n', (111059, 111064), True, 'import numpy as np\n'), ((114613, 114671), 'numpy.array', 'np.array', (['[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (114621, 114671), True, 'import numpy as np\n'), ((116131, 116159), 'numpy.array', 'np.array', (['[1, 0, 0, 0, 0, 0]'], {}), '([1, 0, 0, 0, 0, 0])\n', (116139, 116159), True, 'import numpy as np\n'), ((118731, 118786), 'numpy.array', 'np.array', (['[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (118739, 118786), True, 'import numpy as np\n'), ((120209, 120237), 'numpy.array', 'np.array', (['[1, 0, 0, 0, 0, 0]'], {}), '([1, 0, 0, 0, 0, 0])\n', (120217, 120237), True, 'import numpy as np\n'), ((123916, 123946), 'numpy.array', 'np.array', (['[0, 1, 2]'], {'dtype': 'int'}), '([0, 1, 2], dtype=int)\n', (123924, 123946), True, 'import numpy as np\n'), ((124000, 124027), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'int'}), '([0, 1], dtype=int)\n', (124008, 124027), True, 'import numpy as np\n'), ((125405, 125447), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(0.1)', 'size': '(1)'}), '(loc=0, scale=0.1, size=1)\n', (125421, 125447), True, 'import numpy as np\n'), ((13330, 13341), 'numpy.floor', 'np.floor', (['x'], {}), '(x)\n', (13338, 13341), True, 'import numpy as np\n'), ((104967, 105188), 'gym.logger.warn', 'logger.warn', (['"""You are calling \'step()\' even though this environment has already returned done = True. You should always call \'reset()\' once you receive \'done = True\' -- any further steps are undefined behavior."""'], {}), '(\n "You are calling \'step()\' even though this environment has already returned done = True. You should always call \'reset()\' once you receive \'done = True\' -- any further steps are undefined behavior."\n )\n', (104978, 105188), False, 'from gym import spaces, logger, utils, error\n'), ((112626, 112675), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'a0', 'scale': '(12 * 1000)', 'size': '(1)'}), '(loc=a0, scale=12 * 1000, size=1)\n', (112642, 112675), True, 'import numpy as np\n'), ((114702, 114760), 'numpy.array', 'np.array', (['[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (114710, 114760), True, 'import numpy as np\n'), ((116204, 116232), 'numpy.array', 'np.array', (['[0, 0, 1, 0, 0, 0]'], {}), '([0, 0, 1, 0, 0, 0])\n', (116212, 116232), True, 'import numpy as np\n'), ((118818, 118873), 'numpy.array', 'np.array', (['[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (118826, 118873), True, 'import numpy as np\n'), ((120282, 120310), 'numpy.array', 'np.array', (['[0, 0, 1, 0, 0, 0]'], {}), '([0, 0, 1, 0, 0, 0])\n', (120290, 120310), True, 'import numpy as np\n'), ((122455, 122471), 'numpy.exp', 'np.exp', (['vec[pos]'], {}), '(vec[pos])\n', (122461, 122471), True, 'import numpy as np\n'), ((122507, 122527), 'numpy.exp', 'np.exp', (['vec[pos + 1]'], {}), '(vec[pos + 1])\n', (122513, 122527), True, 'import numpy as np\n'), ((122569, 122589), 'numpy.exp', 'np.exp', (['vec[pos + 4]'], {}), '(vec[pos + 4])\n', (122575, 122589), True, 'import numpy as np\n'), ((124381, 124431), 'numpy.array', 'np.array', (['[13, 0, 1, 10, 3, 11, 12, 14]'], {'dtype': 'int'}), '([13, 0, 1, 10, 3, 11, 12, 14], dtype=int)\n', (124389, 124431), True, 'import numpy as np\n'), ((124577, 124627), 'numpy.array', 'np.array', (['[13, 0, 1, 10, 3, 11, 12, 14]'], {'dtype': 'int'}), '([13, 0, 1, 10, 3, 11, 12, 14], dtype=int)\n', (124585, 124627), True, 'import numpy as np\n'), ((110416, 110429), 'numpy.floor', 'np.floor', (['age'], {}), '(age)\n', (110424, 110429), True, 'import numpy as np\n'), ((113039, 113081), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'a0', 'scale': 'a1', 'size': '(1)'}), '(loc=a0, scale=a1, size=1)\n', (113055, 113081), True, 'import numpy as np\n'), ((113579, 113621), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'a0', 'scale': 'a1', 'size': '(1)'}), '(loc=a0, scale=a1, size=1)\n', (113595, 113621), True, 'import numpy as np\n'), ((114791, 114849), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (114799, 114849), True, 'import numpy as np\n'), ((116277, 116305), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 0, 0]'], {}), '([0, 0, 0, 1, 0, 0])\n', (116285, 116305), True, 'import numpy as np\n'), ((118905, 118960), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (118913, 118960), True, 'import numpy as np\n'), ((120355, 120383), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 0, 0]'], {}), '([0, 0, 0, 1, 0, 0])\n', (120363, 120383), True, 'import numpy as np\n'), ((109333, 109347), 'numpy.log', 'np.log', (['(w / a0)'], {}), '(w / a0)\n', (109339, 109347), True, 'import numpy as np\n'), ((114880, 114938), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (114888, 114938), True, 'import numpy as np\n'), ((116350, 116378), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 1, 0]'], {}), '([0, 0, 0, 0, 1, 0])\n', (116358, 116378), True, 'import numpy as np\n'), ((118992, 119047), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (119000, 119047), True, 'import numpy as np\n'), ((120428, 120456), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 1, 0]'], {}), '([0, 0, 0, 0, 1, 0])\n', (120436, 120456), True, 'import numpy as np\n'), ((110987, 111001), 'numpy.log', 'np.log', (['(w / a0)'], {}), '(w / a0)\n', (110993, 111001), True, 'import numpy as np\n'), ((114969, 115027), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (114977, 115027), True, 'import numpy as np\n'), ((116423, 116451), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 1]'], {}), '([0, 0, 0, 0, 0, 1])\n', (116431, 116451), True, 'import numpy as np\n'), ((119079, 119134), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (119087, 119134), True, 'import numpy as np\n'), ((120501, 120529), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 1]'], {}), '([0, 0, 0, 0, 0, 1])\n', (120509, 120529), True, 'import numpy as np\n'), ((115058, 115116), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (115066, 115116), True, 'import numpy as np\n'), ((119166, 119221), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0])\n', (119174, 119221), True, 'import numpy as np\n'), ((115147, 115205), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0])\n', (115155, 115205), True, 'import numpy as np\n'), ((119253, 119308), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0])\n', (119261, 119308), True, 'import numpy as np\n'), ((115236, 115294), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0])\n', (115244, 115294), True, 'import numpy as np\n'), ((119340, 119395), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0])\n', (119348, 119395), True, 'import numpy as np\n'), ((115325, 115383), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0])\n', (115333, 115383), True, 'import numpy as np\n'), ((119427, 119482), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0])\n', (119435, 119482), True, 'import numpy as np\n'), ((115415, 115473), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0])\n', (115423, 115473), True, 'import numpy as np\n'), ((119515, 119570), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0])\n', (119523, 119570), True, 'import numpy as np\n'), ((115505, 115563), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0])\n', (115513, 115563), True, 'import numpy as np\n'), ((119603, 119658), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0])\n', (119611, 119658), True, 'import numpy as np\n'), ((115595, 115653), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0])\n', (115603, 115653), True, 'import numpy as np\n'), ((119691, 119746), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0])\n', (119699, 119746), True, 'import numpy as np\n'), ((115685, 115743), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0])\n', (115693, 115743), True, 'import numpy as np\n'), ((119779, 119834), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0])\n', (119787, 119834), True, 'import numpy as np\n'), ((115775, 115833), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0])\n', (115783, 115833), True, 'import numpy as np\n'), ((119867, 119922), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1])\n', (119875, 119922), True, 'import numpy as np\n'), ((115865, 115923), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1])\n', (115873, 115923), True, 'import numpy as np\n')] |
"""Utility functions
"""
import math
import os
from functools import singledispatch
import numpy as np
import pandas as pd
__all__ = ["load_dataset"]
ANCHOR_DATETIME = np.datetime64("1970-01-01") # I remember the day well
def load_dataset(name: str) -> pd.DataFrame:
"""Load dataset from shellplot library
Parameters
----------
name : str
Name of the dataset. Currently, available options are:
- `penguins`
Returns
-------
pd.DataFrame
Pandas dataframe of dataset
"""
module_path = os.path.dirname(__file__)
dataset_path = os.path.join(module_path, "datasets", f"{name}.csv")
return pd.read_csv(dataset_path)
def tolerance_round(x, tol=1e-3):
error = 1.0
decimals = 0
while error > tol:
if decimals == 0:
x_rounded = round(x)
else:
x_rounded = round(x, decimals)
fudge = 1e-9 # protect against zero div
error = (x - x_rounded) / (x + fudge)
decimals += 1
return x_rounded, decimals
def round_up(n, decimals=0):
return _round_to_decimals(n=n, decimals=decimals, round_func=math.ceil)
def round_down(n, decimals=0):
return _round_to_decimals(n=n, decimals=decimals, round_func=math.floor)
def _round_to_decimals(n, decimals, round_func):
if decimals == 0: # avoid float div for int rounded value
return round_func(n)
else:
multiplier = 10 ** decimals
return round_func(n * multiplier) / multiplier
def timedelta_round(x):
"""Given a numpy timedelta, find the largest time unit without changing value"""
units = ["Y", "M", "D", "h", "m", "s", "ms", "us", "ns"]
for unit in units:
x_rounded = x.astype(f"timedelta64[{unit}]")
if x_rounded == x: # TODO: apparently raises a # WARNING: ?
return unit
def remove_any_nan(x, y):
"""Given two np.ndarray, remove indeces where any is nan"""
is_any_nan = np.isnan(x) | np.isnan(y)
return x[~is_any_nan], y[~is_any_nan]
@singledispatch
def numpy_2d(x):
"""Reshape and transform various array-like inputs to 2d np arrays"""
@numpy_2d.register
def _(x: np.ndarray):
if len(x.shape) == 1:
return x[np.newaxis]
elif len(x.shape) == 2:
return x
else:
raise ValueError("Array dimensions need to be <= 2!")
@numpy_2d.register
def _(x: pd.DataFrame):
return x.to_numpy().transpose()
@numpy_2d.register(pd.Series)
@numpy_2d.register(pd.Index)
def _(x):
return x.to_numpy()[np.newaxis]
@numpy_2d.register
def _(x: list):
if isinstance(x[0], np.ndarray):
return numpy_1d(x)
elif isinstance(x[0], list):
return np.array([numpy_1d(x) for x in x])
else:
return np.array([numpy_1d((x))])
@singledispatch
def numpy_1d(x):
"""Reshape and transform various array-like inputs to 1d np arrays"""
@numpy_1d.register(np.ndarray)
def _(x):
return x
@numpy_1d.register(pd.Series)
@numpy_1d.register(pd.Index)
def _(x):
return x.to_numpy()
@numpy_1d.register(pd.DataFrame)
def _(x):
return x.to_numpy().squeeze()
@numpy_1d.register(list)
def _(x):
return np.array(x)
@numpy_1d.register(str)
def _(x): # TODO: this should be any non-iterable
return np.array([x])
@singledispatch
def get_label(x):
"""Try to get names out of array-like inputs"""
pass
@get_label.register(pd.DataFrame)
def _(x):
return list(x)
@get_label.register(pd.Series)
def _(x):
return x.name
@singledispatch
def get_index(x):
"""Try to get index out of array-like inputs"""
@get_index.register(pd.Series)
@get_index.register(pd.DataFrame)
def _(x):
return np.array(x.index)
def to_numeric(x):
x = numpy_1d(x)
"""Convert np array to numeric values"""
if x.dtype.kind in np.typecodes["Datetime"]:
return x.astype("datetime64[ns]") - ANCHOR_DATETIME, x.dtype
else:
return x, False
def to_datetime(x):
return x + ANCHOR_DATETIME
| [
"numpy.datetime64",
"pandas.read_csv",
"os.path.dirname",
"numpy.isnan",
"numpy.array",
"os.path.join"
] | [((171, 198), 'numpy.datetime64', 'np.datetime64', (['"""1970-01-01"""'], {}), "('1970-01-01')\n", (184, 198), True, 'import numpy as np\n'), ((555, 580), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (570, 580), False, 'import os\n'), ((600, 652), 'os.path.join', 'os.path.join', (['module_path', '"""datasets"""', 'f"""{name}.csv"""'], {}), "(module_path, 'datasets', f'{name}.csv')\n", (612, 652), False, 'import os\n'), ((665, 690), 'pandas.read_csv', 'pd.read_csv', (['dataset_path'], {}), '(dataset_path)\n', (676, 690), True, 'import pandas as pd\n'), ((3162, 3173), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3170, 3173), True, 'import numpy as np\n'), ((3262, 3275), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (3270, 3275), True, 'import numpy as np\n'), ((3675, 3692), 'numpy.array', 'np.array', (['x.index'], {}), '(x.index)\n', (3683, 3692), True, 'import numpy as np\n'), ((1960, 1971), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (1968, 1971), True, 'import numpy as np\n'), ((1974, 1985), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (1982, 1985), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 4 19:55:24 2019
@author: avelinojaver
"""
import sys
from pathlib import Path
dname = Path(__file__).resolve().parents[1]
sys.path.append(str(dname))
import torch
import pandas as pd
import cv2
import numpy as np
import matplotlib.pylab as plt
import tqdm
from cell_localization.models import UNet, UNetv2B
#from from_images import cv2_peak_local_max
from skimage.feature import peak_local_max
from cell_localization.evaluation.localmaxima import evaluate_coordinates
def normalize_softmax(xhat):
n_batch, n_channels, w, h = xhat.shape
hh = xhat.view(n_batch, n_channels, -1)
hh = torch.nn.functional.softmax(hh, dim = 2)
hmax, _ = hh.max(dim=2)
hh = hh/hmax.unsqueeze(2)
hh = hh.view(n_batch, n_channels, w, h)
return hh
def reshape_norm(xhat):
n_batch, n_outs, w, h = xhat.shape
n_channels = n_outs // 5
hh = xhat.view(n_batch, n_channels, 5, w, h)
hh = hh[:, :, 0]
hh = normalize_softmax(hh)
return hh
if __name__ == '__main__':
#bn = 'eggs-int/eggs-int_unet_hard-neg-freq1_l1smooth_20190513_081902_adam_lr0.000128_batch128'
#bn = 'eggs-int/eggs-int_unet_l1smooth_20190512_210150_adam_lr0.000128_batch128'
#bn = 'eggs-only/eggs-only_unet-bn-sigmoid_hard-neg-freq1_l1smooth_20190514_094134_adam_lr0.0008_batch8'
#bn = 'eggs-only/eggs-only_unet-bn_hard-neg-freq1_l1smooth_20190514_131259_adam_lr8e-05_batch8'
#bn = 'eggsadam-roi48/eggsadam-roi48_unetv2b_l1smooth_20190605_110638_adam_lr0.000128_wd0.0_batch128'
#bn = 'eggsadam-roi48/eggsadam-roi48_unetv2b_hard-neg-freq10_l1smooth_20190605_165046_adam_lr0.000128_wd0.0_batch128'
#bn = 'eggsadamv2/eggsadamv2-roi48_unetv2b-bn-tanh_hard-neg-freq10_l1smooth_20190613_120327_adam_lr0.000128_wd0.0_batch256'
#bn = 'eggsadam-stacked/eggsadam-stacked-roi48_unetv2b-tanh_hard-neg-freq10_maxlikelihood_20190614_180709_adam_lr3.2e-05_wd0.0_batch32'
#bn = 'eggsadam-stacked3/eggsadam-stacked3-roi48_unetv2b-tanh_hard-neg-freq10_maxlikelihood_20190614_205856_adam_lr6e-05_wd0.0_batch60'
#bn = 'eggsadam-stacked/eggsadam-stacked-roi48_unetv2b-tanh_hard-neg-freq10_maxlikelihood_20190614_233317_adam_lr3.2e-05_wd0.0_batch32'
#bn = 'eggsadamI/eggsadamI-roi48_unetv2b-sigmoid_hard-neg-freq10_maxlikelihoodpooled_20190615_075129_adam_lr0.000128_wd0.0_batch256'
#bn = 'eggsadamI/eggsadamI-roi96_unetv2b_hard-neg-freq10_maxlikelihoodpooled_20190615_235057_adam_lr3.2e-05_wd0.0_batch64'
#bn = 'eggsadamI/eggsadamI-roi96_unetv2b_hard-neg-freq10_mixturemodelloss_20190616_190002_adam_lr0.00032_wd0.0_batch64'
#bn = 'eggsadamI/eggsadamI-roi96_unetv2b_hard-neg-freq10_maxlikelihoodpooled_20190616_170529_adam_lr3.2e-05_wd0.0_batch64'
#bn = 'eggsadamI/eggsadamI-roi96_unetv2b_hard-neg-freq10_mixturemodelloss_20190618_231245_adam_lr0.00032_wd0.0_batch64'
#bn = 'eggsadamI/eggsadamI-roi96_unetv2b_hard-neg-freq10_maxlikelihoodpooled_20190618_231245_adam_lr3.2e-05_wd0.0_batch64'
#bn = 'eggsadamI/eggsadamI-roi96_unetv2b-bn_hard-neg-freq10_mixturemodelloss_20190619_081519_adam_lr0.00032_wd0.0_batch64'
bn = 'eggsadamI/eggsadamI-roi96_unetv2b-bn_hard-neg-freq10_maxlikelihoodpooled_20190619_081454_adam_lr3.2e-05_wd0.0_batch64'
n_epoch = None
if n_epoch is None:
#check_name = 'checkpoint.pth.tar'
check_name = 'model_best.pth.tar'
else:
check_name = f'checkpoint-{n_epoch}.pth.tar'
model_path = Path().home() / 'workspace/localization/results/locmax_detection/eggs' / bn / check_name
print(model_path)
#%%
n_ch_in, n_ch_out = 1, 1
batchnorm = '-bn' in bn
tanh_head = '-tanh' in bn
sigma_out = '-sigmoid' in bn
if 'unetv2b' in bn:
model_func = UNetv2B
else:
model_func = UNet
if 'maxlikelihood' in bn:
preeval_func = normalize_softmax
elif 'mixturemodelloss' in bn:
preeval_func = reshape_norm
n_ch_out = n_ch_out*5
else:
preeval_func = lambda x : x
model = model_func(n_channels = n_ch_in,
n_classes = n_ch_out,
tanh_head = tanh_head,
sigma_out = sigma_out,
batchnorm=batchnorm)
state = torch.load(model_path, map_location = 'cpu')
model.load_state_dict(state['state_dict'])
model.eval()
print(state['epoch'])
#%%
data_root_dir = Path.home() / 'workspace/localization/data/worm_eggs_first/validation'
#data_root_dir = Path.home() / 'workspace/localization/data/worm_eggs/train'
data_root_dir = Path(data_root_dir)
fnames = data_root_dir.rglob('*.hdf5')
fnames = list(fnames)
# fnames = []
# for dd in ['validation', 'test']:
# data_root_dir = Path.home() / 'workspace/localization/data/worm_eggs/' / dd
# fnames += list(data_root_dir.rglob('*.hdf5'))
#%%
norm_args = dict(vmin = 0, vmax=1)
metrics = np.full((model.n_classes, 3), 1e-3)
for fname in tqdm.tqdm(fnames):
with pd.HDFStore(str(fname), 'r') as fid:
img = fid.get_node('/img')[:]
df = fid['/coords']
#xin = np.rollaxis(img, 2, 0)
xin = img[None]
xin = xin.astype(np.float32)/255
with torch.no_grad():
xin = torch.from_numpy(xin[None])
xhat = model(xin)
xhat_n = preeval_func(xhat)
xout = xhat_n[0].detach().numpy()
#%%
bot, top = xout.min(), xout.max()
xout = (xout - bot) / (top - bot)
#%%
fig, axs = plt.subplots(1, 2, sharex=True, sharey=True)
axs[0].imshow(img, cmap='gray')
bb = xout[0]
# if 'maxlikelihood' in bn:
# bb = cv2.blur(bb, (3,3))
axs[1].imshow(bb)#, **norm_args)
coords_pred = peak_local_max(bb, min_distance = 2, threshold_abs = 0.05, threshold_rel = 0.05)
target = np.array((df['cy'], df['cx'])).T
#%%
TP, FP, FN, pred_ind, true_ind = evaluate_coordinates(coords_pred, target, max_dist = 5)
metrics[0] += (TP, FP, FN)
axs[0].plot(df['cx'], df['cy'], 'xr')
if coords_pred.size > 0:
axs[0].plot(coords_pred[...,1], coords_pred[...,0], 'g.')
#%%
if 'mixturemodelloss' in bn:
fig, axs = plt.subplots(2, 3, sharex=True, sharey=True)
axs[0][0].imshow(img, cmap='gray')
axs[0][0].plot(df['cx'], df['cy'], 'xr')
if coords_pred.size > 0:
axs[0][0].plot(coords_pred[...,1], coords_pred[...,0], 'g.')
axs[1][0].imshow(xout[0])#, **norm_args)
mux = torch.clamp(xhat[:, 1], -3, 3)[0].numpy()
muy = torch.clamp(xhat[:, 2], -3, 3)[0].numpy()
sx = torch.clamp(xhat[:, 3], 1, 100)[0].numpy()
sy = torch.clamp(xhat[:, 4], 1, 100)[0].numpy()
axs[0][1].imshow(mux)
axs[0][2].imshow(muy)
axs[1][1].imshow(sx)
axs[1][2].imshow(sy)
#%%
#break
#%%
plt.show()
TP, FP, FN = metrics[0]
P = TP/(TP+FP)
R = TP/(TP+FN)
F1 = 2*P*R/(P+R)
print(f'P={P}, R={R}, F1={F1}')
#%%
print(bn) | [
"numpy.full",
"tqdm.tqdm",
"torch.from_numpy",
"pathlib.Path.home",
"skimage.feature.peak_local_max",
"torch.load",
"cell_localization.evaluation.localmaxima.evaluate_coordinates",
"torch.nn.functional.softmax",
"pathlib.Path",
"torch.clamp",
"numpy.array",
"matplotlib.pylab.subplots",
"torc... | [((672, 710), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['hh'], {'dim': '(2)'}), '(hh, dim=2)\n', (699, 710), False, 'import torch\n'), ((4332, 4374), 'torch.load', 'torch.load', (['model_path'], {'map_location': '"""cpu"""'}), "(model_path, map_location='cpu')\n", (4342, 4374), False, 'import torch\n'), ((4680, 4699), 'pathlib.Path', 'Path', (['data_root_dir'], {}), '(data_root_dir)\n', (4684, 4699), False, 'from pathlib import Path\n'), ((5036, 5072), 'numpy.full', 'np.full', (['(model.n_classes, 3)', '(0.001)'], {}), '((model.n_classes, 3), 0.001)\n', (5043, 5072), True, 'import numpy as np\n'), ((5089, 5106), 'tqdm.tqdm', 'tqdm.tqdm', (['fnames'], {}), '(fnames)\n', (5098, 5106), False, 'import tqdm\n'), ((7301, 7311), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (7309, 7311), True, 'import matplotlib.pylab as plt\n'), ((4500, 4511), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (4509, 4511), False, 'from pathlib import Path\n'), ((5718, 5762), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharex': '(True)', 'sharey': '(True)'}), '(1, 2, sharex=True, sharey=True)\n', (5730, 5762), True, 'import matplotlib.pylab as plt\n'), ((6005, 6079), 'skimage.feature.peak_local_max', 'peak_local_max', (['bb'], {'min_distance': '(2)', 'threshold_abs': '(0.05)', 'threshold_rel': '(0.05)'}), '(bb, min_distance=2, threshold_abs=0.05, threshold_rel=0.05)\n', (6019, 6079), False, 'from skimage.feature import peak_local_max\n'), ((6189, 6242), 'cell_localization.evaluation.localmaxima.evaluate_coordinates', 'evaluate_coordinates', (['coords_pred', 'target'], {'max_dist': '(5)'}), '(coords_pred, target, max_dist=5)\n', (6209, 6242), False, 'from cell_localization.evaluation.localmaxima import evaluate_coordinates\n'), ((5366, 5381), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5379, 5381), False, 'import torch\n'), ((5401, 5428), 'torch.from_numpy', 'torch.from_numpy', (['xin[None]'], {}), '(xin[None])\n', (5417, 5428), False, 'import torch\n'), ((6103, 6133), 'numpy.array', 'np.array', (["(df['cy'], df['cx'])"], {}), "((df['cy'], df['cx']))\n", (6111, 6133), True, 'import numpy as np\n'), ((6510, 6554), 'matplotlib.pylab.subplots', 'plt.subplots', (['(2)', '(3)'], {'sharex': '(True)', 'sharey': '(True)'}), '(2, 3, sharex=True, sharey=True)\n', (6522, 6554), True, 'import matplotlib.pylab as plt\n'), ((160, 174), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (164, 174), False, 'from pathlib import Path\n'), ((3505, 3511), 'pathlib.Path', 'Path', ([], {}), '()\n', (3509, 3511), False, 'from pathlib import Path\n'), ((6853, 6883), 'torch.clamp', 'torch.clamp', (['xhat[:, 1]', '(-3)', '(3)'], {}), '(xhat[:, 1], -3, 3)\n', (6864, 6883), False, 'import torch\n'), ((6913, 6943), 'torch.clamp', 'torch.clamp', (['xhat[:, 2]', '(-3)', '(3)'], {}), '(xhat[:, 2], -3, 3)\n', (6924, 6943), False, 'import torch\n'), ((6972, 7003), 'torch.clamp', 'torch.clamp', (['xhat[:, 3]', '(1)', '(100)'], {}), '(xhat[:, 3], 1, 100)\n', (6983, 7003), False, 'import torch\n'), ((7032, 7063), 'torch.clamp', 'torch.clamp', (['xhat[:, 4]', '(1)', '(100)'], {}), '(xhat[:, 4], 1, 100)\n', (7043, 7063), False, 'import torch\n')] |
#!/usr/bin/env python3
"""
A custom gaussian harness for QCEngine which should be registered with qcengine.
"""
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import numpy as np
import cclib
from qcelemental.models import AtomicInput, AtomicResult
from qcelemental.util import which
from qcengine.exceptions import UnknownError
from qcengine.util import execute, get_template, disk_files
from qcengine.units import ureg
from .model import ProgramHarness
if TYPE_CHECKING:
from qcengine.config import TaskConfig
class GaussianHarness(ProgramHarness):
"""
A very minimal Gaussian wrapper for optimisations and torsiondrives.
Note a lot of key information is not extracted need for QCArchive.
"""
name = "gaussian"
scratch = True
thread_safe = True
thread_parallel = True
node_parallel = False
managed_memory = True
@staticmethod
def found(raise_error: bool = False) -> bool:
"""
Check if gaussian can be found via the command line, check for g09 and g16.
"""
gaussian09 = which("g09", return_bool=True)
gaussian16 = which("g16", return_bool=True)
return gaussian09 or gaussian16
def get_version(self) -> str:
"""
Work out which version of gaussian we should be running.
"""
if which("g09", return_bool=True, raise_error=False):
return "g09"
else:
which(
"g16",
return_bool=True,
raise_error=True,
raise_msg="Gaussian 09/16 can not be found make sure they are available.",
)
return "g16"
def compute(self, input_data: AtomicInput, config: "TaskConfig") -> AtomicResult:
"""
Run the compute job via the gaussian CLI.
"""
# we always use an internal template
job_inputs = self.build_input(input_model=input_data, config=config)
# check for extra output files
if "gaussian.wfx" in input_data.keywords.get("add_input", []):
extra_outfiles = ["gaussian.wfx"]
else:
extra_outfiles = None
exe_success, proc = self.execute(job_inputs, extra_outfiles=extra_outfiles)
if exe_success:
result = self.parse_output(proc["outfiles"], input_data)
return result
else:
raise UnknownError(proc["outfiles"]["gaussian.log"])
def execute(
self,
inputs: Dict[str, Any],
extra_outfiles: Optional[List[str]] = None,
extra_commands: Optional[List[str]] = None,
scratch_name: Optional[str] = None,
timeout: Optional[int] = None,
) -> Tuple[bool, Dict[str, Any]]:
"""
Run the gaussian single point job and fchk conversion
"""
# collect together inputs
infiles = inputs["infiles"]
outfiles = ["gaussian.log", "lig.chk"]
if extra_outfiles is not None:
outfiles.extend(extra_outfiles)
#FIXME: YG: only for testing, need to solve the gaussian environment
#gaussian_version = self.get_version()
commands = 'ml medsci gaussian && g16 gaussian.com'.split()
scratch_directory = inputs["scratch_directory"]
# remember before formatting lig.chk is binary, run calculation
exe_success, proc = execute(
command=commands,
infiles=infiles,
outfiles=outfiles,
scratch_directory=scratch_directory,
as_binary=["lig.chk"],
)
if exe_success:
# now we need to run the conversion of the chk file
# FIXME: YG: only for testing, need to solve the gaussian environment
commands = 'ml medsci gaussian && formchk lig.chk lig.fchk'.split()
infiles = {"lig.chk": proc["outfiles"]["lig.chk"]}
chk_success, proc_chk = execute(
command=commands,
infiles=infiles,
outfiles=["lig.fchk"],
scratch_directory=scratch_directory,
as_binary=["lig.chk"],
)
# now we want to put this out file into the main proc
proc["outfiles"]["lig.fchk"] = proc_chk["outfiles"]["lig.fchk"]
# we can dump the chk file
del proc["outfiles"]["lig.chk"]
return exe_success, proc
@classmethod
def functional_converter(cls, method: str) -> str:
"""
Convert the given function to the correct format for gaussian.
"""
method = method.lower()
functionals = {"pbe": "PBEPBE", "wb97x-d": "wB97XD"}
# check for dispersion
if "-d3" in method:
theory = method.split("-d3")[0]
dispersion = "GD3"
if "bj" in method:
dispersion += "BJ"
else:
theory = method
dispersion = None
# convert the theory
theory = functionals.get(theory, theory)
if dispersion is not None:
return f"EmpiricalDispersion={dispersion.upper()} {theory}"
return theory
@classmethod
def driver_conversion(cls, driver: str) -> str:
"""
Convert the qcengine driver enum to the specific keyword for gaussian.
"""
drivers = {"energy": "SP", "gradient": "Force=NoStep", "hessian": "FREQ", "optimization": "opt(maxcycles=200) freq"}
return drivers[driver.lower()]
@classmethod
def get_symmetry(cls, driver: str) -> str:
"""
For gaussian calculations we want to set the use of symmetry depending on the driver.
Note:
There is an issue with turning off symmetry for large molecules when using an implicit solvent.
Important:
Symmetry must be turned off for gradient calculations so geometric is not confused.
"""
symmetry = {"energy": "", "gradient": "nosymm", "hessian": "nosymm", "optimization": "nosymm"}
return symmetry[driver.lower()]
def build_input(
self,
input_model: AtomicInput,
config: "TaskConfig",
template: Optional[str] = None,
) -> Dict[str, Any]:
"""
Use the template files stored in QUBEKit to build a gaussian input file for the given driver.
"""
from jinja2 import Template
template_file = get_template('gaussian.com')
with open(template_file) as file:
template = Template(file.read())
template_data = {
"memory": int(config.memory),
"threads": config.ncores,
"driver": self.driver_conversion(driver=input_model.driver),
"title": "gaussian job",
"symmetry": self.get_symmetry(driver=input_model.driver),
}
molecule = input_model.molecule
spec = input_model.model
theory = self.functional_converter(method=spec.method)
template_data["theory"] = theory
template_data["basis"] = spec.basis
template_data["charge"] = int(molecule.molecular_charge)
template_data["multiplicity"] = molecule.molecular_multiplicity
template_data["scf_maxiter"] = input_model.extras.get("maxiter", 300)
# work around for extra cmdline args
template_data["cmdline_extra"] = input_model.keywords.get("cmdline_extra", [])
# work around for extra trailing input
template_data["add_input"] = input_model.keywords.get("add_input", [])
# check for extra scf property settings
cmdline_extra, add_input = self.scf_property_conversion(
input_model.keywords.get("scf_properties", [])
)
template_data["cmdline_extra"].extend(cmdline_extra)
template_data["add_input"].extend(add_input)
# check for td settings
template_data["td_settings"] = self.td_settings(keywords=input_model.keywords)
template_data.update(input_model.keywords)
# now we need to build the coords data
data = []
for i, symbol in enumerate(molecule.symbols):
# we must convert the atomic input back to angstroms
data.append((symbol, molecule.geometry[i] * ureg.conversion_factor("bohr", "angstrom")))
template_data["data"] = data
rendered_template = template.render(**template_data)
# YG: add a new line to end of the com file
rendered_template += '\n'
# also write to file
with open("gaussian.com", "w") as output:
output.write(rendered_template)
return {
"infiles": {"gaussian.com": rendered_template},
"scratch_directory": config.scratch_directory,
"input_result": input_model.copy(deep=True),
}
@classmethod
def check_convergence(cls, logfile: str) -> None:
"""
Check the gaussian log file to make sure we have normal termination.
"""
if "Normal termination of Gaussian" not in logfile:
# raise an error with the log file as output
raise UnknownError(message=logfile)
@classmethod
def td_settings(cls, keywords: Dict[str, str]) -> str:
"""Construct any time-dependent settings from the input keywords."""
use_tda = keywords.get("tdscf_tda")
states = keywords.get("tdscf_states")
if use_tda is None:
return ""
else:
theory = f"TD{'A' if use_tda else ''}=(nstates={states})"
return theory
@classmethod
def scf_property_conversion(
cls, scf_properties: List[str]
) -> Tuple[List[str], List[str]]:
"""For each of the scp_properties requested convert them to gaussian format."""
cmdline_extra, add_input = [], []
if "wiberg_lowdin_indices" in scf_properties:
cmdline_extra.append("pop=(nboread)")
add_input.extend(["", "$nbo BNDIDX $end"])
return cmdline_extra, add_input
def parse_output(
self, outfiles: Dict[str, str], input_model: AtomicInput
) -> AtomicResult:
"""
For the set of output files parse them to extract as much info as possible and return the atomic result.
From the fchk file we get the energy and hessian, the gradient is taken from the log file.
"""
properties = {}
qcvars = {}
# make sure we got valid exit status
self.check_convergence(logfile=outfiles["gaussian.log"])
version = self.parse_version(logfile=outfiles["gaussian.log"])
# build the main data dict
output_data = input_model.dict()
provenance = {"version": version, "creator": "gaussian", "routine": "CLI"}
# collect the total energy from the fchk file
logfile = outfiles["lig.fchk"]
# process fchk file
for line in logfile.split("\n"):
if "Total Energy" in line:
energy = float(line.split()[3])
properties["return_energy"] = energy
properties["scf_total_energy"] = energy
if input_model.driver == "energy":
output_data["return_result"] = energy
if input_model.driver == "gradient":
# now we need to parse out the forces
gradient = self.parse_gradient(fchfile=outfiles["lig.fchk"])
output_data["return_result"] = gradient
elif input_model.driver == "hessian":
hessian = self.parse_hessian(fchkfile=outfiles["lig.fchk"])
output_data["return_result"] = hessian
#YG: optimization output
elif input_model.driver == "optimization":
coordinates = self.parse_coords(logfile=outfiles["gaussian.log"])
output_data["return_result"] = {'optimization': coordinates}
#FIXME: YG: need to separate freq from optimization, but for now, just parse all
output_data["return_result"]['vibfreq'] = self.parse_vibfreq(logfile=outfiles["gaussian.log"]).tolist()
output_data["return_result"]['thermo'] = self.parse_thermo(logfile=outfiles["gaussian.log"])
# parse scf_properties
if "scf_properties" in input_model.keywords:
qcvars["WIBERG_LOWDIN_INDICES"] = self.parse_wbo(
logfile=outfiles["gaussian.log"],
natoms=len(input_model.molecule.symbols),
)
# if there is an extra output file grab it
if "gaussian.wfx" in outfiles:
output_data["extras"]["gaussian.wfx"] = outfiles["gaussian.wfx"]
if qcvars:
output_data["extras"]["qcvars"] = qcvars
output_data["properties"] = properties
output_data["schema_name"] = "qcschema_output"
output_data["stdout"] = outfiles["gaussian.log"]
output_data["success"] = True
output_data["provenance"] = provenance
return AtomicResult(**output_data)
@classmethod
def parse_vibfreq(cls, logfile: str) -> np.array:
with disk_files(infiles={'gaussian.log': logfile}, outfiles={}) as _:
log_data = cclib.io.ccread('gaussian.log')
return log_data.vibfreqs
@classmethod
def parse_thermo(cls, logfile: str) -> str:
commands = 'python -m goodvibes gaussian.log'.split()
infiles = {'gaussian.log': logfile}
outfiles = ["Goodvibes_output.dat"]
exe_success, proc = execute(
command=commands,
infiles=infiles,
outfiles=outfiles,
shell=True
)
return proc["outfiles"]["Goodvibes_output.dat"]
@classmethod
def parse_coords(cls, logfile: str) -> str:
"""
parse coordinates from the logfile
"""
#FIXME: YG: need to resolve the invoking path
commands = '/gpfs/workspace/users/guany20/.conda/envs/QCdemo/bin/obabel -ilog gaussian.log -O optimization.xyz'.split()
infiles = {'gaussian.log': logfile}
outfiles = ['optimization.xyz']
exe_success, proc = execute(
command=commands,
infiles=infiles,
outfiles=outfiles,
shell=True,
)
return proc["outfiles"]['optimization.xyz']
@classmethod
def parse_version(cls, logfile: str) -> str:
"""
Parse the gaussian version from the logfile.
"""
# the version is printed after the first ***** line
lines = logfile.split("\n")
star_line = 0
for i, line in enumerate(lines):
if "******************************************" in line:
star_line = i
break
# now extract the line
version = lines[star_line + 1].strip()
return version
@classmethod
def parse_gradient(cls, fchfile: str) -> List[float]:
"""
Parse the cartesian gradient from a gaussian fchk file and return them as a flat list.
"""
gradient = []
grad_found = False
for line in fchfile.split("\n"):
if "Cartesian Gradient" in line:
grad_found = True
elif grad_found:
# Nonadiabatic coupling add for g16 support
if (
"Cartesian Force Constants" in line
or "Dipole Moment" in line
or "Nonadiabatic coupling" in line
):
grad_found = False
else:
gradient.extend([float(grad) for grad in line.split()])
return gradient
@classmethod
def parse_hessian(cls, fchkfile: str) -> List[float]:
"""
Parse the hessian from the fchk file and return as a flat list in atomic units.
Note gaussian gives us only half of the matrix so we have to fix this.
#TODO the reshaping or the array could be fragile is there a better way?
"""
hessian = []
hessian_found = False
for line in fchkfile.split("\n"):
if line.startswith("Cartesian Force Constants"):
hessian_found = True
elif hessian_found:
if line.startswith("Nonadiabatic coupling") or line.startswith(
"Dipole Moment"
):
hessian_found = False
else:
hessian.extend([float(hess) for hess in line.split()])
# now we can calculate the size of the hessian
# (2 * triangle length) + 0.25 = (x+0.5)^2
hess_size = int(np.sqrt(2 * len(hessian) + 0.25) - 0.5)
full_hessian = np.zeros((hess_size, hess_size))
m = 0
for i in range(hess_size):
for j in range(i + 1):
full_hessian[i, j] = hessian[m]
full_hessian[j, i] = hessian[m]
m += 1
return full_hessian.flatten().tolist()
@classmethod
def parse_wbo(cls, logfile: str, natoms: int) -> List[float]:
"""
Parse the WBO matrix from the logfile as a flat list.
"""
wbo_matrix = [[] for _ in range(natoms)]
matrix_line = None
lines = logfile.split("\n")
for i, line in enumerate(lines):
if "Wiberg bond index matrix" in line:
matrix_line = i + 1
break
# now get the matrix
for i in range(int(np.ceil(natoms / 9))):
starting_line = matrix_line + ((i + 1) * 3) + (i * natoms)
for line in lines[starting_line : starting_line + natoms]:
sp = line.split()
atom_idx = int(float(sp[0])) - 1
orders = [float(value) for value in sp[2:]]
wbo_matrix[atom_idx].extend(orders)
return np.array(wbo_matrix).flatten().tolist()
| [
"qcengine.util.execute",
"qcengine.util.disk_files",
"cclib.io.ccread",
"numpy.ceil",
"qcengine.util.get_template",
"numpy.zeros",
"qcengine.units.ureg.conversion_factor",
"numpy.array",
"qcelemental.models.AtomicResult",
"qcelemental.util.which",
"qcengine.exceptions.UnknownError"
] | [((1084, 1114), 'qcelemental.util.which', 'which', (['"""g09"""'], {'return_bool': '(True)'}), "('g09', return_bool=True)\n", (1089, 1114), False, 'from qcelemental.util import which\n'), ((1136, 1166), 'qcelemental.util.which', 'which', (['"""g16"""'], {'return_bool': '(True)'}), "('g16', return_bool=True)\n", (1141, 1166), False, 'from qcelemental.util import which\n'), ((1342, 1391), 'qcelemental.util.which', 'which', (['"""g09"""'], {'return_bool': '(True)', 'raise_error': '(False)'}), "('g09', return_bool=True, raise_error=False)\n", (1347, 1391), False, 'from qcelemental.util import which\n'), ((3366, 3491), 'qcengine.util.execute', 'execute', ([], {'command': 'commands', 'infiles': 'infiles', 'outfiles': 'outfiles', 'scratch_directory': 'scratch_directory', 'as_binary': "['lig.chk']"}), "(command=commands, infiles=infiles, outfiles=outfiles,\n scratch_directory=scratch_directory, as_binary=['lig.chk'])\n", (3373, 3491), False, 'from qcengine.util import execute, get_template, disk_files\n'), ((6385, 6413), 'qcengine.util.get_template', 'get_template', (['"""gaussian.com"""'], {}), "('gaussian.com')\n", (6397, 6413), False, 'from qcengine.util import execute, get_template, disk_files\n'), ((12860, 12887), 'qcelemental.models.AtomicResult', 'AtomicResult', ([], {}), '(**output_data)\n', (12872, 12887), False, 'from qcelemental.models import AtomicInput, AtomicResult\n'), ((13374, 13447), 'qcengine.util.execute', 'execute', ([], {'command': 'commands', 'infiles': 'infiles', 'outfiles': 'outfiles', 'shell': '(True)'}), '(command=commands, infiles=infiles, outfiles=outfiles, shell=True)\n', (13381, 13447), False, 'from qcengine.util import execute, get_template, disk_files\n'), ((13989, 14062), 'qcengine.util.execute', 'execute', ([], {'command': 'commands', 'infiles': 'infiles', 'outfiles': 'outfiles', 'shell': '(True)'}), '(command=commands, infiles=infiles, outfiles=outfiles, shell=True)\n', (13996, 14062), False, 'from qcengine.util import execute, get_template, disk_files\n'), ((16552, 16584), 'numpy.zeros', 'np.zeros', (['(hess_size, hess_size)'], {}), '((hess_size, hess_size))\n', (16560, 16584), True, 'import numpy as np\n'), ((1444, 1572), 'qcelemental.util.which', 'which', (['"""g16"""'], {'return_bool': '(True)', 'raise_error': '(True)', 'raise_msg': '"""Gaussian 09/16 can not be found make sure they are available."""'}), "('g16', return_bool=True, raise_error=True, raise_msg=\n 'Gaussian 09/16 can not be found make sure they are available.')\n", (1449, 1572), False, 'from qcelemental.util import which\n'), ((2394, 2440), 'qcengine.exceptions.UnknownError', 'UnknownError', (["proc['outfiles']['gaussian.log']"], {}), "(proc['outfiles']['gaussian.log'])\n", (2406, 2440), False, 'from qcengine.exceptions import UnknownError\n'), ((3909, 4038), 'qcengine.util.execute', 'execute', ([], {'command': 'commands', 'infiles': 'infiles', 'outfiles': "['lig.fchk']", 'scratch_directory': 'scratch_directory', 'as_binary': "['lig.chk']"}), "(command=commands, infiles=infiles, outfiles=['lig.fchk'],\n scratch_directory=scratch_directory, as_binary=['lig.chk'])\n", (3916, 4038), False, 'from qcengine.util import execute, get_template, disk_files\n'), ((9062, 9091), 'qcengine.exceptions.UnknownError', 'UnknownError', ([], {'message': 'logfile'}), '(message=logfile)\n', (9074, 9091), False, 'from qcengine.exceptions import UnknownError\n'), ((12973, 13031), 'qcengine.util.disk_files', 'disk_files', ([], {'infiles': "{'gaussian.log': logfile}", 'outfiles': '{}'}), "(infiles={'gaussian.log': logfile}, outfiles={})\n", (12983, 13031), False, 'from qcengine.util import execute, get_template, disk_files\n'), ((13061, 13092), 'cclib.io.ccread', 'cclib.io.ccread', (['"""gaussian.log"""'], {}), "('gaussian.log')\n", (13076, 13092), False, 'import cclib\n'), ((17324, 17343), 'numpy.ceil', 'np.ceil', (['(natoms / 9)'], {}), '(natoms / 9)\n', (17331, 17343), True, 'import numpy as np\n'), ((8198, 8240), 'qcengine.units.ureg.conversion_factor', 'ureg.conversion_factor', (['"""bohr"""', '"""angstrom"""'], {}), "('bohr', 'angstrom')\n", (8220, 8240), False, 'from qcengine.units import ureg\n'), ((17700, 17720), 'numpy.array', 'np.array', (['wbo_matrix'], {}), '(wbo_matrix)\n', (17708, 17720), True, 'import numpy as np\n')] |
#coding:utf-8
from typing import Set
from scipy.optimize.optimize import main
from basic_config import *
import seaborn as sns
import pandas as pd
import numpy as np
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
from patsy import dmatrices
import statsmodels.formula.api as smf
def hist_attrs(data, attr_names, logs, outpath, col=2, indexed=True):
indexes = 'abcdefghijklmnopqrst'
attr_num = len(attr_names)
if attr_num == 0:
logging.error('No attrname stated.')
return None
if attr_num != len(logs):
logging.error('log scale list do not has same length as attr_names.')
return None
if attr_num == 1:
indexed = False
row = attr_num // col
fig, axes = plt.subplots(row, col, figsize=(col * 4.5, row * 3.5))
for i, attr_name in enumerate(attr_names):
r = i // col
c = i % col
ax = axes[r][c]
log = logs[i]
hist_one_attr(data, attr_name, ax, log=log)
xlabel = attr_name
if indexed:
xlabel += '\n(' + indexes[i] + ')'
ax.set_xlabel(xlabel)
plt.tight_layout()
plt.savefig(outpath, dpi=400)
logging.info(f'fig saved to {outpath}')
#一个属性的分布
def hist_one_attr(data, attr_name, ax, log=True):
sns.histplot(data,
x=attr_name,
ax=ax,
log_scale=log,
kde=True,
stat='probability')
def relations_maps(data,
xs=['UNT', 'diversity'],
ys=['productivity', 'hindex'],
outpath=None):
# indexes = 'abcdefghijklmn'
indexes = 'abcdefghijklmnopqrst'
row = len(xs)
col = len(ys)
fig, axes = plt.subplots(col, row, figsize=(row * 4.5, col * 3.5))
figindex = 0
for i, x in enumerate(ys):
for j, y in enumerate(xs):
ax = axes[i][j]
rel_two_attrs(data, x, y, ax)
xlabel = x
ax.set_xlabel(xlabel + '\n(' + indexes[figindex] + ')')
ax.set_ylabel(y)
if x == 'productivity':
ax.set_xscale('log')
figindex += 1
plt.tight_layout()
plt.savefig(outpath, dpi=400)
logging.info(f'fig saved to {outpath}')
def rel_two_attrs(data, x, y, ax):
sns.lineplot(data=data, x=x, y=y, ax=ax, alpha=0.5, ci=None)
xs, ys = zip(*lowess(data[y], data[x], frac=1. / 3, it=0))
miny = np.min(data[y])
maxy = np.max(data[y])
ax.set_ylim(miny / 2, maxy * 1.5)
# ax.plot(xs, ys)
# xs, ys = zip(*lowess(data[y], data[x], frac=1. / 3))
# ax.plot(xs, ys)
# xs, ys = zip(*lowess(data[y], data[x]))
ax.plot(xs, ys)
def variable_dis():
data = pd.read_csv('data/author_topic_indicators.txt')
sns.set_theme(style='ticks')
# 自变量
data['NUNT'] = data['UNT'] / data['productivity']
data['persistence'] = data['MAX PNUOT'] / data['productivity']
hist_attrs(data, ['UNT', 'NUNT', 'persistence', 'diversity'],
[False, False, False, False], 'fig/fig1.png')
# 因变量分布
hist_attrs(data, ['TNC', 'ANC', 'hindex', 'productivity'],
[False, False, False, True], 'fig/fig2.png')
relations_maps(data,
xs=['UNT', 'NUNT', 'diversity', 'persistence'],
ys=['hindex', 'TNC', 'ANC', 'productivity'],
outpath='fig/fig3.png')
relations_maps(data,
ys=['UNT', 'NUNT', 'diversity', 'persistence'],
xs=['hindex', 'TNC', 'ANC', 'productivity'],
outpath='fig/fig4.png')
relations_maps(data,
ys=['productivity', 'UNT'],
xs=['hindex', 'TNC', 'ANC'],
outpath='fig/fig5.png')
def regression_analysis():
data = pd.read_csv('data/author_topic_indicators.txt')
data['NUNT'] = data['UNT'] / data['productivity']
data['consistency'] = data['MAX PNUOT'] / data['productivity']
rights = ['hindex', 'productivity', 'np.log(TNC + 1)']
lefts = [[
'UNT',
'diversity',
], ['diversity', 'consistency'], ['UNT', 'consistency'],
['UNT', 'diversity', 'consistency']]
# formula1 = 'hindex ~ UNT + NUNT + diversity + persistence + productivity'
results = []
for right in rights:
for left in lefts:
formula = right + ' ~ '
# parameters = list(set(lefts) - set([left]))
formula += ' + '.join(left)
result = formulate_ols(data, formula)
results.extend(result)
# formula = right + ' ~ ' + ' + '.join(parameters)
# result = formulate_ols(data, formula)
results.extend(result)
open('data/result.txt', 'w').write('\n'.join(results))
df1 = pd.DataFrame(data,
columns=[
'hindex', 'productivity', 'TNC', 'ANC', 'UNT',
'NUNT', 'diversity', 'persistence'
])
open('data/corr.txt', 'w').write(str(df1.corr('spearman')))
def formulate_ols(data, formula):
lines = []
lines.append('\n\n----------------------------------------------------')
lines.append('formula:' + formula)
mod = smf.ols(formula=formula, data=data)
res = mod.fit()
lines.append(str(res.summary()))
return lines
if __name__ == "__main__":
# variable_dis()
regression_analysis()
| [
"pandas.DataFrame",
"seaborn.lineplot",
"seaborn.histplot",
"pandas.read_csv",
"numpy.min",
"numpy.max",
"statsmodels.formula.api.ols",
"seaborn.set_theme"
] | [((1285, 1373), 'seaborn.histplot', 'sns.histplot', (['data'], {'x': 'attr_name', 'ax': 'ax', 'log_scale': 'log', 'kde': '(True)', 'stat': '"""probability"""'}), "(data, x=attr_name, ax=ax, log_scale=log, kde=True, stat=\n 'probability')\n", (1297, 1373), True, 'import seaborn as sns\n'), ((2311, 2371), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'data', 'x': 'x', 'y': 'y', 'ax': 'ax', 'alpha': '(0.5)', 'ci': 'None'}), '(data=data, x=x, y=y, ax=ax, alpha=0.5, ci=None)\n', (2323, 2371), True, 'import seaborn as sns\n'), ((2447, 2462), 'numpy.min', 'np.min', (['data[y]'], {}), '(data[y])\n', (2453, 2462), True, 'import numpy as np\n'), ((2474, 2489), 'numpy.max', 'np.max', (['data[y]'], {}), '(data[y])\n', (2480, 2489), True, 'import numpy as np\n'), ((2735, 2782), 'pandas.read_csv', 'pd.read_csv', (['"""data/author_topic_indicators.txt"""'], {}), "('data/author_topic_indicators.txt')\n", (2746, 2782), True, 'import pandas as pd\n'), ((2787, 2815), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""ticks"""'}), "(style='ticks')\n", (2800, 2815), True, 'import seaborn as sns\n'), ((3817, 3864), 'pandas.read_csv', 'pd.read_csv', (['"""data/author_topic_indicators.txt"""'], {}), "('data/author_topic_indicators.txt')\n", (3828, 3864), True, 'import pandas as pd\n'), ((4797, 4912), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['hindex', 'productivity', 'TNC', 'ANC', 'UNT', 'NUNT', 'diversity',\n 'persistence']"}), "(data, columns=['hindex', 'productivity', 'TNC', 'ANC', 'UNT',\n 'NUNT', 'diversity', 'persistence'])\n", (4809, 4912), True, 'import pandas as pd\n'), ((5256, 5291), 'statsmodels.formula.api.ols', 'smf.ols', ([], {'formula': 'formula', 'data': 'data'}), '(formula=formula, data=data)\n', (5263, 5291), True, 'import statsmodels.formula.api as smf\n')] |
"""Input/output functions (**work in progress**)."""
import numpy as np
import datetime as dt
from .state import State
from .grid import Grid
def to_dataset(states):
# TODO
raise NotImplementedError("This feature has not been implemented yet")
def from_dataset(dataset, names=None, grid_kwargs=None):
"""Load states from u and v components in an xarray Dataset
If automatic detection of grid and winds based on the coordinate
attributes of the dataset fails, the association of `lat`, `lon`,
`time`, `u` and `v` can be specified explicitly with a dict given to
names. A `Grid` object shared by the returned states is created based
on the dataset coordinates. Additional arguments for the `Grid`
instanciation can be specified with the `grid_kwargs` argument.
"""
# TODO also accept fields of absolute/relative vorticity
# TODO write proper error messages
# Dataset should have 3 dimensions: time, lon, lat
assert len(dataset.dims) == 3
# Initialize mapping of coordinate names
var_map = { "lat": None, "lon": None, "time": None, "u": None, "v": None }
# Iterate through all coordinates of the dataset and try to detect
# usable fields based on metadata
for name, var in dataset.variables.items():
lname = var.attrs["long_name"].lower() if "long_name" in var.attrs else None
units = var.attrs["units"].lower() if "units" in var.attrs else None
if lname == "time":
var_map["time"] = name
# Verify that latitude and longitude is given in degrees
elif lname == "longitude":
assert "degree" in units and "east" in units
var_map["lon"] = name
elif lname == "latitude":
assert "degree" in units and "north" in units
var_map["lat"] = name
# Verify that wind components are given in m/s
elif lname is not None and "wind" in lname:
assert "m s**-1" in units or "m s^-1" in units or "m/s" in units
if "u component" in lname or "zonal" in lname:
var_map["u"] = name
if "v component" in lname or "meridional" in lname:
var_map["v"] = name
# Override mapping with given name map argument
if names is not None:
var_map.update(names)
# Verify that every required variable has been found
assert all(var is not None for var in var_map.values())
# Extract latitude and longitude coordinates and verify basic
# properties (non-emptyness, shape)
lons = dataset[var_map["lon"]].values
lats = dataset[var_map["lat"]].values
assert lats.size > 0
assert lons.size > 0
assert lats.shape[0] % 2 == 1
assert lats.shape[0] - 1 == lons.shape[-1] // 2
# Verify that latitudes go from north pole to south pole, longitudes
# from west to east and grid is regular
dlats = np.diff(lats, axis= 0).flatten()
dlons = np.diff(lons, axis=-1).flatten()
dlat = dlats[0]
dlon = dlons[0]
assert dlat < 0
assert dlon > 0
assert np.isclose(-dlat, dlon)
assert np.isclose(dlat, dlats).all()
assert np.isclose(dlon, dlons).all()
# Instanciate shared Grid that matches dataset coordinates
grid_kwargs = {} if grid_kwargs is None else grid_kwargs
grid = Grid(resolution=dlon, **grid_kwargs)
# For every time in the dataset, extract the wind fields and create
# a State from them
states = []
for time in dataset[var_map["time"]].values:
data = dataset.sel({ var_map["time"]: time }, drop=True)
# Verify that coordinates are in the right order
assert tuple(data.coords) == (var_map["lon"], var_map["lat"])
# Convert numpy datetime type into a regular datetime instance
# https://stackoverflow.com/questions/13703720/
if isinstance(time, np.datetime64):
time = dt.datetime.utcfromtimestamp((time - np.datetime64(0, "s")) / np.timedelta64(1, "s"))
states.append(
State.from_wind(grid, time, data[var_map["u"]].values, data[var_map["v"]].values)
)
return states
| [
"numpy.datetime64",
"numpy.isclose",
"numpy.diff",
"numpy.timedelta64"
] | [((3053, 3076), 'numpy.isclose', 'np.isclose', (['(-dlat)', 'dlon'], {}), '(-dlat, dlon)\n', (3063, 3076), True, 'import numpy as np\n'), ((2884, 2905), 'numpy.diff', 'np.diff', (['lats'], {'axis': '(0)'}), '(lats, axis=0)\n', (2891, 2905), True, 'import numpy as np\n'), ((2929, 2951), 'numpy.diff', 'np.diff', (['lons'], {'axis': '(-1)'}), '(lons, axis=-1)\n', (2936, 2951), True, 'import numpy as np\n'), ((3088, 3111), 'numpy.isclose', 'np.isclose', (['dlat', 'dlats'], {}), '(dlat, dlats)\n', (3098, 3111), True, 'import numpy as np\n'), ((3129, 3152), 'numpy.isclose', 'np.isclose', (['dlon', 'dlons'], {}), '(dlon, dlons)\n', (3139, 3152), True, 'import numpy as np\n'), ((3936, 3958), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""s"""'], {}), "(1, 's')\n", (3950, 3958), True, 'import numpy as np\n'), ((3911, 3932), 'numpy.datetime64', 'np.datetime64', (['(0)', '"""s"""'], {}), "(0, 's')\n", (3924, 3932), True, 'import numpy as np\n')] |
import os
import logging
from sklearn.model_selection import train_test_split
# Compute ROC curve and ROC area
from sklearn.metrics import roc_curve, auc
import numpy as np
def split_data(data, y_name, test_size, SEED):
"""
This function splits the data in train and test sets, but before, it performs one hot encoding for categorical features
Parameters
----------
X : pd.DataFrame
Pandas DataFrame to use. This one contains just X features
y : pd.Series
Variable of interest
test_size : float
Number between 0 and 1 that indicate the proportion of records in test set
SEED: int
Seed used to do reproducible the execution
Return
------
X_train : pd.DataFrame
Pandas DataFrame to use in trainig. This one contains just X features
X_test : pd.DataFrame
Pandas DataFrame to use in test. This one contains just X features
y_train : pd.Series
Pandas Series to use in trainig. This one contains just the interest feature
y_test : pd.Series
Pandas Series to use in test. This one contains just the interest feature
"""
X = data.drop(columns = y_name).copy()
y = data[y_name]
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size = test_size,
random_state = SEED
)
return X_train, X_test, y_train, y_test
# =============================================================
# cast features
# =============================================================
def cast(data, features_type):
X = data.copy()
for xname in features_type['qualitative']:
X[xname] = [str(x) if x is not None else None for x in X[xname]]
for xname in features_type['quantitative']:
X[xname] = [float(x) if x is not None else None for x in X[xname]]
return X
# =============================================================
# Evaluation function
# =============================================================
def compute_metrics(X, y_true, model):
"""
This function compute the performance's metrics for one model
Args:
-----
y_true (pd.Series): True labels
y_pred (pd.Series): Predicted labels
y_proba (array): Probability predicted
Return:
-------
dict_metrics (dict): dictionary that contains the recall, precision,
accuracy, f1-score and AUC
"""
y_pred = model.predict(X)
y_proba = model.predict_proba(X)[:,1]
vp = np.sum((y_true == 1) & (y_pred == 1))
fn = np.sum((y_true == 1) & (y_pred == 0))
fp = np.sum((y_true == 0) & (y_pred == 1))
vn = np.sum((y_true == 0) & (y_pred == 0))
# calcular el recall
recall = vp / (vp + fn)
# calcular el precision
precision = vp / (vp + fp)
# accuracy
acc = (vp + vn) / (vp + fn + fp + vn)
# f1-score
f1 = 2 * precision * recall / (precision + recall)
# AUC
fpr, tpr, _ = roc_curve(y_true, y_proba)
roc_auc = auc(fpr, tpr)
dict_metrics = {'recall' : recall,
'precision' : precision,
'f1' : f1,
'accuracy' : acc,
'auc' : roc_auc}
return dict_metrics
def log(path_output, filelogs):
"""
This function create a log file to record the logs and
specify the logger to print in console as well
Args
----
path_output (str): generic path where the output will be saved
dirname (str): folder name where the output will be saved
filelogs (str): file name where the logs will be recorded
Return
------
logger (logger): logger object configured to use
"""
# check if the directories and file exist
log_file = os.path.join(path_output, filelogs)
if not os.path.exists(path_output):
os.mkdir(path)
#if not os.path.exists(path_dir):
# os.mkdir(dir_name)
if not os.path.isfile(log_file):
open(log_file, "w+").close()
#set the format of the log records and the logging level to INFO
logging_format = '%(asctime)s %(levelname)s: %(message)s'
logging.basicConfig(format = logging_format,
level = logging.INFO)
logger = logging.getLogger()
# create a file handler for output file
handler = logging.FileHandler(log_file)
# set the logging level for log file
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter(logging_format)
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
return logger
def renderize_html(html, path_html):
"""
To renderize the HTML
Parameters
----------
html : str
Information in HTML language.
path_html : str
Path where the HTML page will be saved.
"""
html += "<br></body></html>"
with open(path_html, 'w') as out:
out.write(html) | [
"os.mkdir",
"numpy.sum",
"logging.FileHandler",
"sklearn.metrics.roc_curve",
"logging.basicConfig",
"sklearn.model_selection.train_test_split",
"os.path.exists",
"logging.Formatter",
"sklearn.metrics.auc",
"os.path.isfile",
"os.path.join",
"logging.getLogger"
] | [((1258, 1320), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size', 'random_state': 'SEED'}), '(X, y, test_size=test_size, random_state=SEED)\n', (1274, 1320), False, 'from sklearn.model_selection import train_test_split\n'), ((2732, 2769), 'numpy.sum', 'np.sum', (['((y_true == 1) & (y_pred == 1))'], {}), '((y_true == 1) & (y_pred == 1))\n', (2738, 2769), True, 'import numpy as np\n'), ((2779, 2816), 'numpy.sum', 'np.sum', (['((y_true == 1) & (y_pred == 0))'], {}), '((y_true == 1) & (y_pred == 0))\n', (2785, 2816), True, 'import numpy as np\n'), ((2826, 2863), 'numpy.sum', 'np.sum', (['((y_true == 0) & (y_pred == 1))'], {}), '((y_true == 0) & (y_pred == 1))\n', (2832, 2863), True, 'import numpy as np\n'), ((2873, 2910), 'numpy.sum', 'np.sum', (['((y_true == 0) & (y_pred == 0))'], {}), '((y_true == 0) & (y_pred == 0))\n', (2879, 2910), True, 'import numpy as np\n'), ((3205, 3231), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_true', 'y_proba'], {}), '(y_true, y_proba)\n', (3214, 3231), False, 'from sklearn.metrics import roc_curve, auc\n'), ((3246, 3259), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (3249, 3259), False, 'from sklearn.metrics import roc_curve, auc\n'), ((4000, 4035), 'os.path.join', 'os.path.join', (['path_output', 'filelogs'], {}), '(path_output, filelogs)\n', (4012, 4035), False, 'import os\n'), ((4394, 4456), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'logging_format', 'level': 'logging.INFO'}), '(format=logging_format, level=logging.INFO)\n', (4413, 4456), False, 'import logging\n'), ((4499, 4518), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (4516, 4518), False, 'import logging\n'), ((4582, 4611), 'logging.FileHandler', 'logging.FileHandler', (['log_file'], {}), '(log_file)\n', (4601, 4611), False, 'import logging\n'), ((4734, 4767), 'logging.Formatter', 'logging.Formatter', (['logging_format'], {}), '(logging_format)\n', (4751, 4767), False, 'import logging\n'), ((4052, 4079), 'os.path.exists', 'os.path.exists', (['path_output'], {}), '(path_output)\n', (4066, 4079), False, 'import os\n'), ((4089, 4103), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (4097, 4103), False, 'import os\n'), ((4191, 4215), 'os.path.isfile', 'os.path.isfile', (['log_file'], {}), '(log_file)\n', (4205, 4215), False, 'import os\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Import modules
import pytest
import numpy as np
# Import from package
from pyswarms.single import GlobalBestPSO, LocalBestPSO
from pyswarms.utils.functions.single_obj import rosenbrock_func
def rosenbrock_with_args(x, a, b):
f = (a - x[:, 0]) ** 2 + b * (x[:, 1] - x[:, 0] ** 2) ** 2
return f
@pytest.mark.parametrize('func', [
rosenbrock_with_args
])
def test_global_kwargs(func):
"""Tests if kwargs are passed properly to the objective function for when kwargs are present"""
# setup optimizer
options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9, 'k': 2, 'p': 2}
x_max = 10 * np.ones(2)
x_min = -1 * x_max
bounds = (x_min, x_max)
opt_ps = GlobalBestPSO(n_particles=100, dimensions=2, options=options, bounds=bounds)
# run it
cost, pos = opt_ps.optimize(func, 1000, print_step=10, verbose=3, a=1 , b=100)
assert np.isclose(cost, 0, rtol=1e-03)
assert np.isclose(pos[0], 1.0, rtol=1e-03)
assert np.isclose(pos[1], 1.0, rtol=1e-03)
@pytest.mark.parametrize('func', [
rosenbrock_with_args
])
def test_global_kwargs_without_named_arguments(func):
"""Tests if kwargs are passed properly to the objective function for when kwargs are present and
other named arguments are not passed, such as print_step"""
# setup optimizer
options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9, 'k': 2, 'p': 2}
x_max = 10 * np.ones(2)
x_min = -1 * x_max
bounds = (x_min, x_max)
opt_ps = GlobalBestPSO(n_particles=100, dimensions=2, options=options, bounds=bounds)
# run it
cost, pos = opt_ps.optimize(func, 1000, verbose=3, a=1 , b=100)
assert np.isclose(cost, 0, rtol=1e-03)
assert np.isclose(pos[0], 1.0, rtol=1e-03)
assert np.isclose(pos[1], 1.0, rtol=1e-03)
@pytest.mark.parametrize('func', [
rosenbrock_func
])
def test_global_no_kwargs(func):
"""Tests if args are passed properly to the objective function for when no args are present"""
# setup optimizer
options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9, 'k': 2, 'p': 2}
x_max = 10 * np.ones(2)
x_min = -1 * x_max
bounds = (x_min, x_max)
opt_ps = GlobalBestPSO(n_particles=100, dimensions=2, options=options, bounds=bounds)
# run it
cost, pos = opt_ps.optimize(func, 1000, print_step=10, verbose=3)
assert np.isclose(cost, 0, rtol=1e-03)
assert np.isclose(pos[0], 1.0, rtol=1e-03)
assert np.isclose(pos[1], 1.0, rtol=1e-03)
@pytest.mark.parametrize('func', [
rosenbrock_with_args
])
def test_local_kwargs(func):
"""Tests if kwargs are passed properly to the objective function for when kwargs are present"""
# setup optimizer
options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9, 'k': 2, 'p': 2}
x_max = 10 * np.ones(2)
x_min = -1 * x_max
bounds = (x_min, x_max)
opt_ps = LocalBestPSO(n_particles=100, dimensions=2, options=options, bounds=bounds)
# run it
cost, pos = opt_ps.optimize(func, 1000, print_step=10, verbose=3, a=1, b=100)
assert np.isclose(cost, 0, rtol=1e-03)
assert np.isclose(pos[0], 1.0, rtol=1e-03)
assert np.isclose(pos[1], 1.0, rtol=1e-03)
@pytest.mark.parametrize('func', [
rosenbrock_func
])
def test_local_no_kwargs(func):
"""Tests if no kwargs/args are passed properly to the objective function for when kwargs are present"""
# setup optimizer
options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9, 'k': 2, 'p': 2}
x_max = 10 * np.ones(2)
x_min = -1 * x_max
bounds = (x_min, x_max)
opt_ps = LocalBestPSO(n_particles=100, dimensions=2, options=options, bounds=bounds)
# run it
cost, pos = opt_ps.optimize(func, iters=1000, print_step=10, verbose=3)
assert np.isclose(cost, 0, rtol=1e-03)
assert np.isclose(pos[0], 1.0, rtol=1e-03)
assert np.isclose(pos[1], 1.0, rtol=1e-03)
@pytest.mark.parametrize('func', [
rosenbrock_func
])
def test_global_uneeded_kwargs(func):
"""Tests kwargs are passed the objective function for when kwargs do not exist"""
# setup optimizer
options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9, 'k': 2, 'p': 2}
x_max = 10 * np.ones(2)
x_min = -1 * x_max
bounds = (x_min, x_max)
opt_ps = GlobalBestPSO(n_particles=100, dimensions=2, options=options, bounds=bounds)
# run it
with pytest.raises(TypeError) as excinfo:
cost, pos = opt_ps.optimize(func, 1000, print_step=10, verbose=3, a=1)
assert 'unexpected keyword' in str(excinfo.value)
@pytest.mark.parametrize('func', [
rosenbrock_with_args
])
def test_global_missed_kwargs(func):
"""Tests kwargs are passed the objective function for when kwargs do not exist"""
# setup optimizer
options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9, 'k': 2, 'p': 2}
x_max = 10 * np.ones(2)
x_min = -1 * x_max
bounds = (x_min, x_max)
opt_ps = GlobalBestPSO(n_particles=100, dimensions=2, options=options, bounds=bounds)
# run it
with pytest.raises(TypeError) as excinfo:
cost, pos = opt_ps.optimize(func, 1000, print_step=10, verbose=3, a=1)
assert 'missing 1 required positional argument' in str(excinfo.value)
@pytest.mark.parametrize('func', [
rosenbrock_func
])
def test_local_uneeded_kwargs(func):
"""Tests kwargs are passed the objective function for when kwargs do not exist"""
# setup optimizer
options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9, 'k': 2, 'p': 2}
x_max = 10 * np.ones(2)
x_min = -1 * x_max
bounds = (x_min, x_max)
opt_ps = LocalBestPSO(n_particles=100, dimensions=2, options=options, bounds=bounds)
# run it
with pytest.raises(TypeError) as excinfo:
cost, pos = opt_ps.optimize(func, 1000, print_step=10, verbose=3, a=1)
assert 'unexpected keyword' in str(excinfo.value)
@pytest.mark.parametrize('func', [
rosenbrock_with_args
])
def test_local_missed_kwargs(func):
"""Tests kwargs are passed the objective function for when kwargs do not exist"""
# setup optimizer
options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9, 'k': 2, 'p': 2}
x_max = 10 * np.ones(2)
x_min = -1 * x_max
bounds = (x_min, x_max)
opt_ps = LocalBestPSO(n_particles=100, dimensions=2, options=options, bounds=bounds)
# run it
with pytest.raises(TypeError) as excinfo:
cost, pos = opt_ps.optimize(func, 1000, print_step=10, verbose=3, a=1)
assert 'missing 1 required positional argument' in str(excinfo.value)
@pytest.mark.parametrize('func', [
rosenbrock_with_args
])
def test_local_wrong_kwargs(func):
"""Tests kwargs are passed the objective function for when kwargs do not exist"""
# setup optimizer
options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9, 'k': 2, 'p': 2}
x_max = 10 * np.ones(2)
x_min = -1 * x_max
bounds = (x_min, x_max)
opt_ps = LocalBestPSO(n_particles=100, dimensions=2, options=options, bounds=bounds)
# run it
with pytest.raises(TypeError) as excinfo:
cost, pos = opt_ps.optimize(func, 1000, print_step=10, verbose=3, c=1, d=100)
assert 'unexpected keyword' in str(excinfo.value)
@pytest.mark.parametrize('func', [
rosenbrock_with_args
])
def test_global_wrong_kwargs(func):
"""Tests kwargs are passed the objective function for when kwargs do not exist"""
# setup optimizer
options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9, 'k': 2, 'p': 2}
x_max = 10 * np.ones(2)
x_min = -1 * x_max
bounds = (x_min, x_max)
opt_ps = GlobalBestPSO(n_particles=100, dimensions=2, options=options, bounds=bounds)
# run it
with pytest.raises(TypeError) as excinfo:
cost, pos = opt_ps.optimize(func, 1000, print_step=10, verbose=3, c=1, d=100)
assert 'unexpected keyword' in str(excinfo.value)
| [
"pyswarms.single.GlobalBestPSO",
"numpy.ones",
"numpy.isclose",
"pytest.raises",
"pytest.mark.parametrize",
"pyswarms.single.LocalBestPSO"
] | [((357, 412), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', '[rosenbrock_with_args]'], {}), "('func', [rosenbrock_with_args])\n", (380, 412), False, 'import pytest\n'), ((1043, 1098), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', '[rosenbrock_with_args]'], {}), "('func', [rosenbrock_with_args])\n", (1066, 1098), False, 'import pytest\n'), ((1803, 1853), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', '[rosenbrock_func]'], {}), "('func', [rosenbrock_func])\n", (1826, 1853), False, 'import pytest\n'), ((2473, 2528), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', '[rosenbrock_with_args]'], {}), "('func', [rosenbrock_with_args])\n", (2496, 2528), False, 'import pytest\n'), ((3156, 3206), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', '[rosenbrock_func]'], {}), "('func', [rosenbrock_func])\n", (3179, 3206), False, 'import pytest\n'), ((3839, 3889), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', '[rosenbrock_func]'], {}), "('func', [rosenbrock_func])\n", (3862, 3889), False, 'import pytest\n'), ((4476, 4531), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', '[rosenbrock_with_args]'], {}), "('func', [rosenbrock_with_args])\n", (4499, 4531), False, 'import pytest\n'), ((5137, 5187), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', '[rosenbrock_func]'], {}), "('func', [rosenbrock_func])\n", (5160, 5187), False, 'import pytest\n'), ((5772, 5827), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', '[rosenbrock_with_args]'], {}), "('func', [rosenbrock_with_args])\n", (5795, 5827), False, 'import pytest\n'), ((6431, 6486), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', '[rosenbrock_with_args]'], {}), "('func', [rosenbrock_with_args])\n", (6454, 6486), False, 'import pytest\n'), ((7076, 7131), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', '[rosenbrock_with_args]'], {}), "('func', [rosenbrock_with_args])\n", (7099, 7131), False, 'import pytest\n'), ((728, 804), 'pyswarms.single.GlobalBestPSO', 'GlobalBestPSO', ([], {'n_particles': '(100)', 'dimensions': '(2)', 'options': 'options', 'bounds': 'bounds'}), '(n_particles=100, dimensions=2, options=options, bounds=bounds)\n', (741, 804), False, 'from pyswarms.single import GlobalBestPSO, LocalBestPSO\n'), ((914, 945), 'numpy.isclose', 'np.isclose', (['cost', '(0)'], {'rtol': '(0.001)'}), '(cost, 0, rtol=0.001)\n', (924, 945), True, 'import numpy as np\n'), ((957, 992), 'numpy.isclose', 'np.isclose', (['pos[0]', '(1.0)'], {'rtol': '(0.001)'}), '(pos[0], 1.0, rtol=0.001)\n', (967, 992), True, 'import numpy as np\n'), ((1004, 1039), 'numpy.isclose', 'np.isclose', (['pos[1]', '(1.0)'], {'rtol': '(0.001)'}), '(pos[1], 1.0, rtol=0.001)\n', (1014, 1039), True, 'import numpy as np\n'), ((1503, 1579), 'pyswarms.single.GlobalBestPSO', 'GlobalBestPSO', ([], {'n_particles': '(100)', 'dimensions': '(2)', 'options': 'options', 'bounds': 'bounds'}), '(n_particles=100, dimensions=2, options=options, bounds=bounds)\n', (1516, 1579), False, 'from pyswarms.single import GlobalBestPSO, LocalBestPSO\n'), ((1674, 1705), 'numpy.isclose', 'np.isclose', (['cost', '(0)'], {'rtol': '(0.001)'}), '(cost, 0, rtol=0.001)\n', (1684, 1705), True, 'import numpy as np\n'), ((1717, 1752), 'numpy.isclose', 'np.isclose', (['pos[0]', '(1.0)'], {'rtol': '(0.001)'}), '(pos[0], 1.0, rtol=0.001)\n', (1727, 1752), True, 'import numpy as np\n'), ((1764, 1799), 'numpy.isclose', 'np.isclose', (['pos[1]', '(1.0)'], {'rtol': '(0.001)'}), '(pos[1], 1.0, rtol=0.001)\n', (1774, 1799), True, 'import numpy as np\n'), ((2171, 2247), 'pyswarms.single.GlobalBestPSO', 'GlobalBestPSO', ([], {'n_particles': '(100)', 'dimensions': '(2)', 'options': 'options', 'bounds': 'bounds'}), '(n_particles=100, dimensions=2, options=options, bounds=bounds)\n', (2184, 2247), False, 'from pyswarms.single import GlobalBestPSO, LocalBestPSO\n'), ((2344, 2375), 'numpy.isclose', 'np.isclose', (['cost', '(0)'], {'rtol': '(0.001)'}), '(cost, 0, rtol=0.001)\n', (2354, 2375), True, 'import numpy as np\n'), ((2387, 2422), 'numpy.isclose', 'np.isclose', (['pos[0]', '(1.0)'], {'rtol': '(0.001)'}), '(pos[0], 1.0, rtol=0.001)\n', (2397, 2422), True, 'import numpy as np\n'), ((2434, 2469), 'numpy.isclose', 'np.isclose', (['pos[1]', '(1.0)'], {'rtol': '(0.001)'}), '(pos[1], 1.0, rtol=0.001)\n', (2444, 2469), True, 'import numpy as np\n'), ((2843, 2918), 'pyswarms.single.LocalBestPSO', 'LocalBestPSO', ([], {'n_particles': '(100)', 'dimensions': '(2)', 'options': 'options', 'bounds': 'bounds'}), '(n_particles=100, dimensions=2, options=options, bounds=bounds)\n', (2855, 2918), False, 'from pyswarms.single import GlobalBestPSO, LocalBestPSO\n'), ((3027, 3058), 'numpy.isclose', 'np.isclose', (['cost', '(0)'], {'rtol': '(0.001)'}), '(cost, 0, rtol=0.001)\n', (3037, 3058), True, 'import numpy as np\n'), ((3070, 3105), 'numpy.isclose', 'np.isclose', (['pos[0]', '(1.0)'], {'rtol': '(0.001)'}), '(pos[0], 1.0, rtol=0.001)\n', (3080, 3105), True, 'import numpy as np\n'), ((3117, 3152), 'numpy.isclose', 'np.isclose', (['pos[1]', '(1.0)'], {'rtol': '(0.001)'}), '(pos[1], 1.0, rtol=0.001)\n', (3127, 3152), True, 'import numpy as np\n'), ((3532, 3607), 'pyswarms.single.LocalBestPSO', 'LocalBestPSO', ([], {'n_particles': '(100)', 'dimensions': '(2)', 'options': 'options', 'bounds': 'bounds'}), '(n_particles=100, dimensions=2, options=options, bounds=bounds)\n', (3544, 3607), False, 'from pyswarms.single import GlobalBestPSO, LocalBestPSO\n'), ((3710, 3741), 'numpy.isclose', 'np.isclose', (['cost', '(0)'], {'rtol': '(0.001)'}), '(cost, 0, rtol=0.001)\n', (3720, 3741), True, 'import numpy as np\n'), ((3753, 3788), 'numpy.isclose', 'np.isclose', (['pos[0]', '(1.0)'], {'rtol': '(0.001)'}), '(pos[0], 1.0, rtol=0.001)\n', (3763, 3788), True, 'import numpy as np\n'), ((3800, 3835), 'numpy.isclose', 'np.isclose', (['pos[1]', '(1.0)'], {'rtol': '(0.001)'}), '(pos[1], 1.0, rtol=0.001)\n', (3810, 3835), True, 'import numpy as np\n'), ((4199, 4275), 'pyswarms.single.GlobalBestPSO', 'GlobalBestPSO', ([], {'n_particles': '(100)', 'dimensions': '(2)', 'options': 'options', 'bounds': 'bounds'}), '(n_particles=100, dimensions=2, options=options, bounds=bounds)\n', (4212, 4275), False, 'from pyswarms.single import GlobalBestPSO, LocalBestPSO\n'), ((4840, 4916), 'pyswarms.single.GlobalBestPSO', 'GlobalBestPSO', ([], {'n_particles': '(100)', 'dimensions': '(2)', 'options': 'options', 'bounds': 'bounds'}), '(n_particles=100, dimensions=2, options=options, bounds=bounds)\n', (4853, 4916), False, 'from pyswarms.single import GlobalBestPSO, LocalBestPSO\n'), ((5496, 5571), 'pyswarms.single.LocalBestPSO', 'LocalBestPSO', ([], {'n_particles': '(100)', 'dimensions': '(2)', 'options': 'options', 'bounds': 'bounds'}), '(n_particles=100, dimensions=2, options=options, bounds=bounds)\n', (5508, 5571), False, 'from pyswarms.single import GlobalBestPSO, LocalBestPSO\n'), ((6135, 6210), 'pyswarms.single.LocalBestPSO', 'LocalBestPSO', ([], {'n_particles': '(100)', 'dimensions': '(2)', 'options': 'options', 'bounds': 'bounds'}), '(n_particles=100, dimensions=2, options=options, bounds=bounds)\n', (6147, 6210), False, 'from pyswarms.single import GlobalBestPSO, LocalBestPSO\n'), ((6793, 6868), 'pyswarms.single.LocalBestPSO', 'LocalBestPSO', ([], {'n_particles': '(100)', 'dimensions': '(2)', 'options': 'options', 'bounds': 'bounds'}), '(n_particles=100, dimensions=2, options=options, bounds=bounds)\n', (6805, 6868), False, 'from pyswarms.single import GlobalBestPSO, LocalBestPSO\n'), ((7439, 7515), 'pyswarms.single.GlobalBestPSO', 'GlobalBestPSO', ([], {'n_particles': '(100)', 'dimensions': '(2)', 'options': 'options', 'bounds': 'bounds'}), '(n_particles=100, dimensions=2, options=options, bounds=bounds)\n', (7452, 7515), False, 'from pyswarms.single import GlobalBestPSO, LocalBestPSO\n'), ((653, 663), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (660, 663), True, 'import numpy as np\n'), ((1428, 1438), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1435, 1438), True, 'import numpy as np\n'), ((2096, 2106), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2103, 2106), True, 'import numpy as np\n'), ((2768, 2778), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2775, 2778), True, 'import numpy as np\n'), ((3457, 3467), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (3464, 3467), True, 'import numpy as np\n'), ((4124, 4134), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (4131, 4134), True, 'import numpy as np\n'), ((4299, 4323), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4312, 4323), False, 'import pytest\n'), ((4765, 4775), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (4772, 4775), True, 'import numpy as np\n'), ((4940, 4964), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4953, 4964), False, 'import pytest\n'), ((5421, 5431), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (5428, 5431), True, 'import numpy as np\n'), ((5595, 5619), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (5608, 5619), False, 'import pytest\n'), ((6060, 6070), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (6067, 6070), True, 'import numpy as np\n'), ((6234, 6258), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (6247, 6258), False, 'import pytest\n'), ((6718, 6728), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (6725, 6728), True, 'import numpy as np\n'), ((6892, 6916), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (6905, 6916), False, 'import pytest\n'), ((7364, 7374), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (7371, 7374), True, 'import numpy as np\n'), ((7539, 7563), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (7552, 7563), False, 'import pytest\n')] |
import os
import joblib
import numpy as np
from pathlib import Path
from pyxit.estimator import COLORSPACE_RGB, COLORSPACE_TRGB, COLORSPACE_HSV, COLORSPACE_GRAY, _raw_to_trgb, _raw_to_hsv
import shapely
from shapely import wkt
from shapely.affinity import affine_transform, translate
from skimage.util.shape import view_as_windows
from cytomine import CytomineJob, Cytomine
from cytomine.models import ImageInstanceCollection, ImageInstance, AttachedFileCollection, Job, PropertyCollection, \
AnnotationCollection, Annotation, JobCollection
from cytomine.utilities.software import parse_domain_list, str2bool
from sldc import SemanticSegmenter, SSLWorkflowBuilder, StandardOutputLogger, Logger, ImageWindow, Image
from sldc_cytomine import CytomineTileBuilder
from sldc_cytomine.image_adapter import CytomineDownloadableTile
class CytomineSlide(Image):
"""
A slide from a cytomine project
"""
def __init__(self, img_instance, zoom_level=0):
"""Construct CytomineSlide objects
Parameters
----------
img_instance: ImageInstance
The the image instance
zoom_level: int
The zoom level at which the slide must be read. The maximum zoom level is 0 (most zoomed in). The greater
the value, the lower the zoom.
"""
self._img_instance = img_instance
self._zoom_level = zoom_level
@classmethod
def from_id(cls, id_img_instance, zoom_level=0):
return cls(ImageInstance.fetch(id_img_instance), zoom_level=zoom_level)
@property
def image_instance(self):
return self._img_instance
@property
def np_image(self):
raise NotImplementedError("Disabled due to the too heavy size of the images")
@property
def width(self):
return self._img_instance.width // (2 ** self.zoom_level)
@property
def height(self):
return self._img_instance.height // (2 ** self.zoom_level)
@property
def channels(self):
return 3
@property
def zoom_level(self):
return self._zoom_level
@property
def api_zoom_level(self):
"""The zoom level used by cytomine api uses 0 as lower level of zoom (most zoomed out). This property
returns a zoom value that can be used to communicate with the backend."""
return self._img_instance.depth - self.zoom_level
def __str__(self):
return "CytomineSlide (#{}) ({} x {}) (zoom: {})".format(self._img_instance.id, self.width, self.height, self.zoom_level)
def extract_windows(image, dims, step):
# subwindows on input image
subwindows = view_as_windows(image, dims, step=step)
subwindows = subwindows.reshape([-1, np.product(dims)])
# generate tile identifierss
n_pixels = int(np.prod(image.shape[:2]))
window_ids = np.arange(n_pixels).reshape(image.shape[:2])
identifiers = view_as_windows(window_ids, dims[:2], step=step)
identifiers = identifiers[:, :, 0, 0].reshape([-1])
return subwindows, identifiers
class ExtraTreesSegmenter(SemanticSegmenter):
def __init__(self, pyxit, classes=None, background=0, min_std=0, max_mean=255, prediction_step=1):
super(ExtraTreesSegmenter, self).__init__(classes=classes)
self._pyxit = pyxit
self._prediction_step = prediction_step
self._min_std = min_std
self._max_mean = max_mean
self._background = background
def _process_tile(self, image):
channels = [image]
if image.ndim > 2:
channels = [image[:, :, i] for i in range(image.shape[2])]
return np.any([
np.std(c) > self._min_std or np.mean(c) < self._max_mean
for c in channels
])
def _convert_colorspace(self, image):
colorspace = self._pyxit.colorspace
flattened = image.reshape([-1] if image.ndim == 2 else [-1, image.shape[2]])
if colorspace == COLORSPACE_RGB:
return image
elif colorspace == COLORSPACE_TRGB:
return _raw_to_trgb(flattened).reshape(image.shape)
elif colorspace == COLORSPACE_HSV:
return _raw_to_hsv(flattened).reshape(image.shape)
elif colorspace == COLORSPACE_GRAY:
return _raw_to_hsv(flattened).reshape(image.shape[:2])
else:
raise ValueError("unknown colorspace code '{}'".format(colorspace))
def segment(self, image):
# extract mask
mask = np.ones(image.shape[:2], dtype="bool")
if image.ndim == 3 and image.shape[2] == 2 or image.shape[2] == 4:
mask = image[:, :, -1].astype("bool")
image = np.copy(image[:, :, :-1]) # remove mask from image
# skip processing if tile is supposed background (checked via mean & std) or not in the mask
if not (self._process_tile(image) and np.any(mask)):
return np.full(image.shape[:2], self._background)
# change colorspace
image = self._convert_colorspace(image).reshape(image.shape)
# prepare windows
target_height = self._pyxit.target_height
target_width = self._pyxit.target_width
w_dims = [target_height, target_width]
if image.ndim > 2 and image.shape[2] > 1:
w_dims += [image.shape[2]]
subwindows, w_identifiers = extract_windows(image, w_dims, self._prediction_step)
# predict
y = np.array(self._pyxit.base_estimator.predict_proba(subwindows))
cm_dims = list(image.shape[:2]) + [self._pyxit.n_classes_]
confidence_map = np.zeros(cm_dims, dtype="float")
pred_count_map = np.zeros(cm_dims[:2], dtype="int32")
for row, w_index in enumerate(w_identifiers):
im_width = image.shape[1]
pred_dims = [target_height, target_width, self._pyxit.n_classes_]
x_off, y_off = w_index % im_width, w_index // im_width
confidence_map[y_off:(y_off+target_height), x_off:(x_off+target_width)] += y[:, row, :].reshape(pred_dims)
pred_count_map[y_off:(y_off+target_height), x_off:(x_off+target_width)] += 1
# average over multiple predictions
confidence_map /= np.expand_dims(pred_count_map, axis=2)
# remove classe where there is no mask
class_map = np.take(self._pyxit.classes_, np.argmax(confidence_map, axis=2))
class_map[np.logical_not(mask)] = self._background
return class_map
class AnnotationAreaChecker(object):
def __init__(self, min_area, max_area, level):
self._min_area = min_area
self._max_area = max_area
self._level = level
def check(self, annot):
if self._max_area < 0:
return self._min_area < annot.area * (2**self._level)
else:
return self._min_area < (annot.area * (2**self._level)) < self._max_area
def change_referential(p, height):
return affine_transform(p, [1, 0, 0, -1, 0, height])
def get_iip_window_from_annotation(slide, annotation, zoom_level):
"""generate a iip-compatible roi based on an annotation at the given zoom level"""
roi_polygon = change_referential(wkt.loads(annotation.location), slide.image_instance.height)
if zoom_level == 0:
return slide.window_from_polygon(roi_polygon, mask=True)
# recompute the roi so that it matches the iip tile topology
zoom_ratio = 1 / (2 ** zoom_level)
scaled_roi = affine_transform(roi_polygon, [zoom_ratio, 0, 0, zoom_ratio, 0, 0])
min_x, min_y, max_x, max_y = (int(v) for v in scaled_roi.bounds)
diff_min_x, diff_min_y = min_x % 256, min_y % 256
diff_max_x, diff_max_y = max_x % 256, max_y % 256
min_x -= diff_min_x
min_y -= diff_min_y
max_x = min(slide.width, max_x + 256 - diff_max_x)
max_y = min(slide.height, max_y + 256 - diff_max_y)
return slide.window((min_x, min_y), max_x - min_x, max_y - min_y, scaled_roi)
def extract_images_or_rois(parameters):
# work at image level or ROIs by term
images = ImageInstanceCollection()
if parameters.cytomine_id_images is not None:
id_images = parse_domain_list(parameters.cytomine_id_images)
images.extend([ImageInstance().fetch(_id) for _id in id_images])
else:
images = images.fetch_with_filter("project", parameters.cytomine_id_project)
slides = [CytomineSlide(img, parameters.cytomine_zoom_level) for img in images]
if parameters.cytomine_id_roi_term is None:
return slides
# fetch ROI annotations, all users
collection = AnnotationCollection(
terms=[parameters.cytomine_id_roi_term],
reviewed=parameters.cytomine_reviewed_roi,
project=parameters.cytomine_id_project,
showWKT=True,
includeAlgo=True
).fetch()
slides_map = {slide.image_instance.id: slide for slide in slides}
regions = list()
for annotation in collection:
if annotation.image not in slides_map:
continue
slide = slides_map[annotation.image]
regions.append(get_iip_window_from_annotation(slide, annotation, parameters.cytomine_zoom_level))
return regions
class CytomineOldIIPTile(CytomineDownloadableTile):
def download_tile_image(self):
slide = self.base_image
col_tile = self.abs_offset_x // 256
row_tile = self.abs_offset_y // 256
_slice = slide.image_instance
response = Cytomine.get_instance().get('imaging_server.json', None)
imageServerUrl = response['collection'][0]['url']
return Cytomine.get_instance().download_file(imageServerUrl + "/image/tile", self.cache_filepath, False, payload={
"zoomify": _slice.fullPath,
"mimeType": _slice.mime,
"x": col_tile,
"y": row_tile,
"z": slide.api_zoom_level
})
def main(argv):
with CytomineJob.from_cli(argv) as cj:
# use only images from the current project
cj.job.update(progress=1, statusComment="Preparing execution")
# extract images to process
if cj.parameters.cytomine_zoom_level > 0 and (cj.parameters.cytomine_tile_size != 256 or cj.parameters.cytomine_tile_overlap != 0):
raise ValueError("when using zoom_level > 0, tile size should be 256 "
"(given {}) and overlap should be 0 (given {})".format(
cj.parameters.cytomine_tile_size, cj.parameters.cytomine_tile_overlap))
cj.job.update(progress=1, statusComment="Preparing execution (creating folders,...).")
# working path
root_path = str(Path.home())
working_path = os.path.join(root_path, "images")
os.makedirs(working_path, exist_ok=True)
# load training information
cj.job.update(progress=5, statusComment="Extract properties from training job.")
train_job = Job().fetch(cj.parameters.cytomine_id_job)
properties = PropertyCollection(train_job).fetch().as_dict()
binary = str2bool(properties["binary"].value)
classes = parse_domain_list(properties["classes"].value)
cj.job.update(progress=10, statusComment="Download the model file.")
attached_files = AttachedFileCollection(train_job).fetch()
model_file = attached_files.find_by_attribute("filename", "model.joblib")
model_filepath = os.path.join(root_path, "model.joblib")
model_file.download(model_filepath, override=True)
pyxit = joblib.load(model_filepath)
# set n_jobs
pyxit.base_estimator.n_jobs = cj.parameters.n_jobs
pyxit.n_jobs = cj.parameters.n_jobs
cj.job.update(progress=45, statusComment="Build workflow.")
builder = SSLWorkflowBuilder()
builder.set_tile_size(cj.parameters.cytomine_tile_size, cj.parameters.cytomine_tile_size)
builder.set_overlap(cj.parameters.cytomine_tile_overlap)
builder.set_tile_builder(CytomineTileBuilder(working_path, tile_class=CytomineOldIIPTile, n_jobs=cj.parameters.n_jobs))
builder.set_logger(StandardOutputLogger(level=Logger.INFO))
builder.set_n_jobs(1)
builder.set_background_class(0)
# value 0 will prevent merging but still requires to run the merging check
# procedure (inefficient)
builder.set_distance_tolerance(2 if cj.parameters.union_enabled else 0)
builder.set_segmenter(ExtraTreesSegmenter(
pyxit=pyxit,
classes=classes,
prediction_step=cj.parameters.pyxit_prediction_step,
background=0,
min_std=cj.parameters.tile_filter_min_stddev,
max_mean=cj.parameters.tile_filter_max_mean
))
workflow = builder.get()
area_checker = AnnotationAreaChecker(
min_area=cj.parameters.min_annotation_area,
max_area=cj.parameters.max_annotation_area,
level=cj.parameters.cytomine_zoom_level
)
def get_term(label):
if binary:
if "cytomine_id_predict_term" not in cj.parameters or not cj.parameters.cytomine_id_predict_term:
return []
else:
return [int(cj.parameters.cytomine_id_predict_term)]
# multi-class
return [label]
zoom_mult = (2 ** cj.parameters.cytomine_zoom_level)
zones = extract_images_or_rois(cj.parameters)
for zone in cj.monitor(zones, start=50, end=90, period=0.05, prefix="Segmenting images/ROIs"):
results = workflow.process(zone)
if cj.parameters.cytomine_id_roi_term is not None:
ROI = change_referential(translate(zone.polygon_mask, zone.offset[0], zone.offset[1]), zone.base_image.height)
if cj.parameters.cytomine_zoom_level > 0:
ROI = affine_transform(ROI, [zoom_mult, 0, 0, zoom_mult, 0, 0])
annotations = AnnotationCollection()
for obj in results:
if not area_checker.check(obj.polygon):
continue
polygon = obj.polygon
if isinstance(zone, ImageWindow):
polygon = affine_transform(polygon, [1, 0, 0, 1, zone.abs_offset_x, zone.abs_offset_y])
polygon = change_referential(polygon, zone.base_image.height)
if cj.parameters.cytomine_zoom_level > 0:
polygon = affine_transform(polygon, [zoom_mult, 0, 0, zoom_mult, 0, 0])
if cj.parameters.cytomine_id_roi_term is not None:
polygon = polygon.intersection(ROI)
if not polygon.is_empty:
annotations.append(Annotation(
location=polygon.wkt,
id_terms=get_term(obj.label),
id_project=cj.project.id,
id_image=zone.base_image.image_instance.id
))
"""
Some annotations found thanks to the algorithm are Geometry Collections,
containing more than one element -> keep only polygons from those
GeometryCollections
"""
annotations_ok = AnnotationCollection()
for annotation in annotations:
if isinstance(shapely.wkt.loads(annotation.location), shapely.geometry.collection.GeometryCollection):
for geom in shapely.wkt.loads(annotation.location):
if isinstance(geom, shapely.geometry.Polygon):
annotations_ok.append(Annotation(
location=geom.wkt,
id_terms=annotation.term,
id_project=annotation.project,
id_image=annotation.image
))
elif isinstance(shapely.wkt.loads(annotation.location), shapely.geometry.Polygon):
annotations_ok.append(annotation)
else:
continue
annotations_ok.save()
cj.job.update(status=Job.TERMINATED, status_comment="Finish", progress=100)
if __name__ == "__main__":
import sys
main(sys.argv[1:])
| [
"pathlib.Path.home",
"numpy.argmax",
"cytomine.models.ImageInstanceCollection",
"numpy.ones",
"cytomine.models.Job",
"pyxit.estimator._raw_to_hsv",
"numpy.product",
"numpy.mean",
"numpy.arange",
"os.path.join",
"sldc.StandardOutputLogger",
"numpy.prod",
"numpy.full",
"numpy.copy",
"skima... | [((2626, 2665), 'skimage.util.shape.view_as_windows', 'view_as_windows', (['image', 'dims'], {'step': 'step'}), '(image, dims, step=step)\n', (2641, 2665), False, 'from skimage.util.shape import view_as_windows\n'), ((2884, 2932), 'skimage.util.shape.view_as_windows', 'view_as_windows', (['window_ids', 'dims[:2]'], {'step': 'step'}), '(window_ids, dims[:2], step=step)\n', (2899, 2932), False, 'from skimage.util.shape import view_as_windows\n'), ((6868, 6913), 'shapely.affinity.affine_transform', 'affine_transform', (['p', '[1, 0, 0, -1, 0, height]'], {}), '(p, [1, 0, 0, -1, 0, height])\n', (6884, 6913), False, 'from shapely.affinity import affine_transform, translate\n'), ((7378, 7445), 'shapely.affinity.affine_transform', 'affine_transform', (['roi_polygon', '[zoom_ratio, 0, 0, zoom_ratio, 0, 0]'], {}), '(roi_polygon, [zoom_ratio, 0, 0, zoom_ratio, 0, 0])\n', (7394, 7445), False, 'from shapely.affinity import affine_transform, translate\n'), ((7961, 7986), 'cytomine.models.ImageInstanceCollection', 'ImageInstanceCollection', ([], {}), '()\n', (7984, 7986), False, 'from cytomine.models import ImageInstanceCollection, ImageInstance, AttachedFileCollection, Job, PropertyCollection, AnnotationCollection, Annotation, JobCollection\n'), ((2778, 2802), 'numpy.prod', 'np.prod', (['image.shape[:2]'], {}), '(image.shape[:2])\n', (2785, 2802), True, 'import numpy as np\n'), ((4444, 4482), 'numpy.ones', 'np.ones', (['image.shape[:2]'], {'dtype': '"""bool"""'}), "(image.shape[:2], dtype='bool')\n", (4451, 4482), True, 'import numpy as np\n'), ((5541, 5573), 'numpy.zeros', 'np.zeros', (['cm_dims'], {'dtype': '"""float"""'}), "(cm_dims, dtype='float')\n", (5549, 5573), True, 'import numpy as np\n'), ((5599, 5635), 'numpy.zeros', 'np.zeros', (['cm_dims[:2]'], {'dtype': '"""int32"""'}), "(cm_dims[:2], dtype='int32')\n", (5607, 5635), True, 'import numpy as np\n'), ((6153, 6191), 'numpy.expand_dims', 'np.expand_dims', (['pred_count_map'], {'axis': '(2)'}), '(pred_count_map, axis=2)\n', (6167, 6191), True, 'import numpy as np\n'), ((7107, 7137), 'shapely.wkt.loads', 'wkt.loads', (['annotation.location'], {}), '(annotation.location)\n', (7116, 7137), False, 'from shapely import wkt\n'), ((8057, 8105), 'cytomine.utilities.software.parse_domain_list', 'parse_domain_list', (['parameters.cytomine_id_images'], {}), '(parameters.cytomine_id_images)\n', (8074, 8105), False, 'from cytomine.utilities.software import parse_domain_list, str2bool\n'), ((9793, 9819), 'cytomine.CytomineJob.from_cli', 'CytomineJob.from_cli', (['argv'], {}), '(argv)\n', (9813, 9819), False, 'from cytomine import CytomineJob, Cytomine\n'), ((10561, 10594), 'os.path.join', 'os.path.join', (['root_path', '"""images"""'], {}), "(root_path, 'images')\n", (10573, 10594), False, 'import os\n'), ((10603, 10643), 'os.makedirs', 'os.makedirs', (['working_path'], {'exist_ok': '(True)'}), '(working_path, exist_ok=True)\n', (10614, 10643), False, 'import os\n'), ((10919, 10955), 'cytomine.utilities.software.str2bool', 'str2bool', (["properties['binary'].value"], {}), "(properties['binary'].value)\n", (10927, 10955), False, 'from cytomine.utilities.software import parse_domain_list, str2bool\n'), ((10974, 11020), 'cytomine.utilities.software.parse_domain_list', 'parse_domain_list', (["properties['classes'].value"], {}), "(properties['classes'].value)\n", (10991, 11020), False, 'from cytomine.utilities.software import parse_domain_list, str2bool\n'), ((11273, 11312), 'os.path.join', 'os.path.join', (['root_path', '"""model.joblib"""'], {}), "(root_path, 'model.joblib')\n", (11285, 11312), False, 'import os\n'), ((11388, 11415), 'joblib.load', 'joblib.load', (['model_filepath'], {}), '(model_filepath)\n', (11399, 11415), False, 'import joblib\n'), ((11628, 11648), 'sldc.SSLWorkflowBuilder', 'SSLWorkflowBuilder', ([], {}), '()\n', (11646, 11648), False, 'from sldc import SemanticSegmenter, SSLWorkflowBuilder, StandardOutputLogger, Logger, ImageWindow, Image\n'), ((1486, 1522), 'cytomine.models.ImageInstance.fetch', 'ImageInstance.fetch', (['id_img_instance'], {}), '(id_img_instance)\n', (1505, 1522), False, 'from cytomine.models import ImageInstanceCollection, ImageInstance, AttachedFileCollection, Job, PropertyCollection, AnnotationCollection, Annotation, JobCollection\n'), ((2707, 2723), 'numpy.product', 'np.product', (['dims'], {}), '(dims)\n', (2717, 2723), True, 'import numpy as np\n'), ((2821, 2840), 'numpy.arange', 'np.arange', (['n_pixels'], {}), '(n_pixels)\n', (2830, 2840), True, 'import numpy as np\n'), ((4628, 4653), 'numpy.copy', 'np.copy', (['image[:, :, :-1]'], {}), '(image[:, :, :-1])\n', (4635, 4653), True, 'import numpy as np\n'), ((4862, 4904), 'numpy.full', 'np.full', (['image.shape[:2]', 'self._background'], {}), '(image.shape[:2], self._background)\n', (4869, 4904), True, 'import numpy as np\n'), ((6290, 6323), 'numpy.argmax', 'np.argmax', (['confidence_map'], {'axis': '(2)'}), '(confidence_map, axis=2)\n', (6299, 6323), True, 'import numpy as np\n'), ((6343, 6363), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (6357, 6363), True, 'import numpy as np\n'), ((8486, 8672), 'cytomine.models.AnnotationCollection', 'AnnotationCollection', ([], {'terms': '[parameters.cytomine_id_roi_term]', 'reviewed': 'parameters.cytomine_reviewed_roi', 'project': 'parameters.cytomine_id_project', 'showWKT': '(True)', 'includeAlgo': '(True)'}), '(terms=[parameters.cytomine_id_roi_term], reviewed=\n parameters.cytomine_reviewed_roi, project=parameters.\n cytomine_id_project, showWKT=True, includeAlgo=True)\n', (8506, 8672), False, 'from cytomine.models import ImageInstanceCollection, ImageInstance, AttachedFileCollection, Job, PropertyCollection, AnnotationCollection, Annotation, JobCollection\n'), ((10525, 10536), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (10534, 10536), False, 'from pathlib import Path\n'), ((11845, 11943), 'sldc_cytomine.CytomineTileBuilder', 'CytomineTileBuilder', (['working_path'], {'tile_class': 'CytomineOldIIPTile', 'n_jobs': 'cj.parameters.n_jobs'}), '(working_path, tile_class=CytomineOldIIPTile, n_jobs=cj.\n parameters.n_jobs)\n', (11864, 11943), False, 'from sldc_cytomine import CytomineTileBuilder\n'), ((11967, 12006), 'sldc.StandardOutputLogger', 'StandardOutputLogger', ([], {'level': 'Logger.INFO'}), '(level=Logger.INFO)\n', (11987, 12006), False, 'from sldc import SemanticSegmenter, SSLWorkflowBuilder, StandardOutputLogger, Logger, ImageWindow, Image\n'), ((13819, 13841), 'cytomine.models.AnnotationCollection', 'AnnotationCollection', ([], {}), '()\n', (13839, 13841), False, 'from cytomine.models import ImageInstanceCollection, ImageInstance, AttachedFileCollection, Job, PropertyCollection, AnnotationCollection, Annotation, JobCollection\n'), ((15108, 15130), 'cytomine.models.AnnotationCollection', 'AnnotationCollection', ([], {}), '()\n', (15128, 15130), False, 'from cytomine.models import ImageInstanceCollection, ImageInstance, AttachedFileCollection, Job, PropertyCollection, AnnotationCollection, Annotation, JobCollection\n'), ((4828, 4840), 'numpy.any', 'np.any', (['mask'], {}), '(mask)\n', (4834, 4840), True, 'import numpy as np\n'), ((9348, 9371), 'cytomine.Cytomine.get_instance', 'Cytomine.get_instance', ([], {}), '()\n', (9369, 9371), False, 'from cytomine import CytomineJob, Cytomine\n'), ((9478, 9501), 'cytomine.Cytomine.get_instance', 'Cytomine.get_instance', ([], {}), '()\n', (9499, 9501), False, 'from cytomine import CytomineJob, Cytomine\n'), ((10790, 10795), 'cytomine.models.Job', 'Job', ([], {}), '()\n', (10793, 10795), False, 'from cytomine.models import ImageInstanceCollection, ImageInstance, AttachedFileCollection, Job, PropertyCollection, AnnotationCollection, Annotation, JobCollection\n'), ((11124, 11157), 'cytomine.models.AttachedFileCollection', 'AttachedFileCollection', (['train_job'], {}), '(train_job)\n', (11146, 11157), False, 'from cytomine.models import ImageInstanceCollection, ImageInstance, AttachedFileCollection, Job, PropertyCollection, AnnotationCollection, Annotation, JobCollection\n'), ((13564, 13624), 'shapely.affinity.translate', 'translate', (['zone.polygon_mask', 'zone.offset[0]', 'zone.offset[1]'], {}), '(zone.polygon_mask, zone.offset[0], zone.offset[1])\n', (13573, 13624), False, 'from shapely.affinity import affine_transform, translate\n'), ((13734, 13791), 'shapely.affinity.affine_transform', 'affine_transform', (['ROI', '[zoom_mult, 0, 0, zoom_mult, 0, 0]'], {}), '(ROI, [zoom_mult, 0, 0, zoom_mult, 0, 0])\n', (13750, 13791), False, 'from shapely.affinity import affine_transform, translate\n'), ((14077, 14154), 'shapely.affinity.affine_transform', 'affine_transform', (['polygon', '[1, 0, 0, 1, zone.abs_offset_x, zone.abs_offset_y]'], {}), '(polygon, [1, 0, 0, 1, zone.abs_offset_x, zone.abs_offset_y])\n', (14093, 14154), False, 'from shapely.affinity import affine_transform, translate\n'), ((14323, 14384), 'shapely.affinity.affine_transform', 'affine_transform', (['polygon', '[zoom_mult, 0, 0, zoom_mult, 0, 0]'], {}), '(polygon, [zoom_mult, 0, 0, zoom_mult, 0, 0])\n', (14339, 14384), False, 'from shapely.affinity import affine_transform, translate\n'), ((15204, 15242), 'shapely.wkt.loads', 'shapely.wkt.loads', (['annotation.location'], {}), '(annotation.location)\n', (15221, 15242), False, 'import shapely\n'), ((15325, 15363), 'shapely.wkt.loads', 'shapely.wkt.loads', (['annotation.location'], {}), '(annotation.location)\n', (15342, 15363), False, 'import shapely\n'), ((3620, 3629), 'numpy.std', 'np.std', (['c'], {}), '(c)\n', (3626, 3629), True, 'import numpy as np\n'), ((3649, 3659), 'numpy.mean', 'np.mean', (['c'], {}), '(c)\n', (3656, 3659), True, 'import numpy as np\n'), ((4019, 4042), 'pyxit.estimator._raw_to_trgb', '_raw_to_trgb', (['flattened'], {}), '(flattened)\n', (4031, 4042), False, 'from pyxit.estimator import COLORSPACE_RGB, COLORSPACE_TRGB, COLORSPACE_HSV, COLORSPACE_GRAY, _raw_to_trgb, _raw_to_hsv\n'), ((8129, 8144), 'cytomine.models.ImageInstance', 'ImageInstance', ([], {}), '()\n', (8142, 8144), False, 'from cytomine.models import ImageInstanceCollection, ImageInstance, AttachedFileCollection, Job, PropertyCollection, AnnotationCollection, Annotation, JobCollection\n'), ((10854, 10883), 'cytomine.models.PropertyCollection', 'PropertyCollection', (['train_job'], {}), '(train_job)\n', (10872, 10883), False, 'from cytomine.models import ImageInstanceCollection, ImageInstance, AttachedFileCollection, Job, PropertyCollection, AnnotationCollection, Annotation, JobCollection\n'), ((15791, 15829), 'shapely.wkt.loads', 'shapely.wkt.loads', (['annotation.location'], {}), '(annotation.location)\n', (15808, 15829), False, 'import shapely\n'), ((4126, 4148), 'pyxit.estimator._raw_to_hsv', '_raw_to_hsv', (['flattened'], {}), '(flattened)\n', (4137, 4148), False, 'from pyxit.estimator import COLORSPACE_RGB, COLORSPACE_TRGB, COLORSPACE_HSV, COLORSPACE_GRAY, _raw_to_trgb, _raw_to_hsv\n'), ((4233, 4255), 'pyxit.estimator._raw_to_hsv', '_raw_to_hsv', (['flattened'], {}), '(flattened)\n', (4244, 4255), False, 'from pyxit.estimator import COLORSPACE_RGB, COLORSPACE_TRGB, COLORSPACE_HSV, COLORSPACE_GRAY, _raw_to_trgb, _raw_to_hsv\n'), ((15486, 15604), 'cytomine.models.Annotation', 'Annotation', ([], {'location': 'geom.wkt', 'id_terms': 'annotation.term', 'id_project': 'annotation.project', 'id_image': 'annotation.image'}), '(location=geom.wkt, id_terms=annotation.term, id_project=\n annotation.project, id_image=annotation.image)\n', (15496, 15604), False, 'from cytomine.models import ImageInstanceCollection, ImageInstance, AttachedFileCollection, Job, PropertyCollection, AnnotationCollection, Annotation, JobCollection\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import json
import os
import six
import numpy as np
import h5py
import codecs
input_txt = '../../../datasets/SocialMedia/captions_1M_alltxt.txt'
output_h5 = '../../../datasets/SocialMedia/captions_1M_alltxt.h5'
output_json ='../../../datasets/SocialMedia/captions_1M_alltxt.json'
val_frac = 0.1
test_frac = 0.1
quiet = False
encoding = 'utf-8'
if __name__ == '__main__':
if encoding == 'bytes': encoding = None
# First go the file once to see how big it is and to build the vocab
token_to_idx = {}
total_size = 0
with codecs.open(input_txt, 'r', encoding) as f:
for line in f:
total_size += len(line)
for char in line:
if char not in token_to_idx:
token_to_idx[char] = len(token_to_idx) + 1
# Now we can figure out the split sizes
val_size = int(val_frac * total_size)
test_size = int(test_frac * total_size)
train_size = total_size - val_size - test_size
if not quiet:
print('Total vocabulary size: %d' % len(token_to_idx))
print('Total tokens in file: %d' % total_size)
print(' Training size: %d' % train_size)
print(' Val size: %d' % val_size)
print(' Test size: %d' % test_size)
# Choose the datatype based on the vocabulary size
dtype = np.uint8
if len(token_to_idx) > 255:
dtype = np.uint32
if not quiet:
print('Using dtype ', dtype)
# Just load data into memory ... we'll have to do something more clever
# for huge datasets but this should be fine for now
train = np.zeros(train_size, dtype=dtype)
val = np.zeros(val_size, dtype=dtype)
test = np.zeros(test_size, dtype=dtype)
splits = [train, val, test]
# Go through the file again and write data to numpy arrays
split_idx, cur_idx = 0, 0
with codecs.open(input_txt, 'r', encoding) as f:
for line in f:
for char in line:
splits[split_idx][cur_idx] = token_to_idx[char]
cur_idx += 1
if cur_idx == splits[split_idx].size:
split_idx += 1
cur_idx = 0
# Write data to HDF5 file
with h5py.File(output_h5, 'w') as f:
f.create_dataset('train', data=train)
f.create_dataset('val', data=val)
f.create_dataset('test', data=test)
# For 'bytes' encoding, replace non-ascii characters so the json dump
# doesn't crash
if encoding is None:
new_token_to_idx = {}
for token, idx in six.iteritems(token_to_idx):
if ord(token) > 127:
new_token_to_idx['[%d]' % ord(token)] = idx
else:
new_token_to_idx[token] = idx
token_to_idx = new_token_to_idx
# Dump a JSON file for the vocab
json_data = {
'token_to_idx': token_to_idx,
'idx_to_token': {v: k for k, v in six.iteritems(token_to_idx)},
}
with open(output_json, 'w') as f:
json.dump(json_data, f)
| [
"json.dump",
"h5py.File",
"codecs.open",
"numpy.zeros",
"six.iteritems"
] | [((1559, 1592), 'numpy.zeros', 'np.zeros', (['train_size'], {'dtype': 'dtype'}), '(train_size, dtype=dtype)\n', (1567, 1592), True, 'import numpy as np\n'), ((1601, 1632), 'numpy.zeros', 'np.zeros', (['val_size'], {'dtype': 'dtype'}), '(val_size, dtype=dtype)\n', (1609, 1632), True, 'import numpy as np\n'), ((1642, 1674), 'numpy.zeros', 'np.zeros', (['test_size'], {'dtype': 'dtype'}), '(test_size, dtype=dtype)\n', (1650, 1674), True, 'import numpy as np\n'), ((612, 649), 'codecs.open', 'codecs.open', (['input_txt', '"""r"""', 'encoding'], {}), "(input_txt, 'r', encoding)\n", (623, 649), False, 'import codecs\n'), ((1802, 1839), 'codecs.open', 'codecs.open', (['input_txt', '"""r"""', 'encoding'], {}), "(input_txt, 'r', encoding)\n", (1813, 1839), False, 'import codecs\n'), ((2095, 2120), 'h5py.File', 'h5py.File', (['output_h5', '"""w"""'], {}), "(output_h5, 'w')\n", (2104, 2120), False, 'import h5py\n'), ((2409, 2436), 'six.iteritems', 'six.iteritems', (['token_to_idx'], {}), '(token_to_idx)\n', (2422, 2436), False, 'import six\n'), ((2801, 2824), 'json.dump', 'json.dump', (['json_data', 'f'], {}), '(json_data, f)\n', (2810, 2824), False, 'import json\n'), ((2727, 2754), 'six.iteritems', 'six.iteritems', (['token_to_idx'], {}), '(token_to_idx)\n', (2740, 2754), False, 'import six\n')] |
import os
import numpy as np
from src.modelTraining.base_model import BaseModel
from src.modelTraining.model_factory import model_creator, mse
from keras.callbacks import EarlyStopping
from keras.regularizers import l2, l1
class InDelModel(BaseModel):
def __init__(self) -> None:
super().__init__()
def prepare_data(self):
x_t, y_t = self.get_xy_split()
x_t = x_t[:, -384:]
y_ins = np.sum(y_t[:, -21:], axis=1)
y_del = np.sum(y_t[:, :-21], axis=1)
y_t = np.array([[0, 1] if y_ins > y_del else [1, 0]
for y_ins, y_del in zip(y_ins, y_del)]).astype('float32')
train_size = round(len(x_t) * 0.9)
x_train, x_test = x_t[:train_size, :], x_t[train_size:, :]
y_train, y_test = y_t[:train_size], y_t[train_size:]
return x_train, x_test, y_train, y_test
def train_model(self):
x_train, x_test, y_train, y_test = self.prepare_data()
np.random.seed(0)
lambdas = self.get_lambda()
models_l1, models_l2 = [], []
errors_l1, errors_l2 = [], []
for idx, kernel_weight in enumerate(lambdas):
print(f"Percentage done: ({idx/len(lambdas):.2f})")
model_l1 = model_creator(
num_units=2,
kernel_regularizer=l1,
kernel_weight=kernel_weight,
loss="binary_crossentropy",
input_shape=(384, )
)
model_l1.fit(x_train, y_train, epochs=100, validation_data=(x_test, y_test),
callbacks=[EarlyStopping(patience=1)], verbose=0)
models_l1.append(model_l1)
y_hat = model_l1.predict(x_test)
errors_l1.append(mse(y_hat, y_test))
model_l2 = model_creator(
num_units=2,
kernel_regularizer=l2,
kernel_weight=kernel_weight,
loss="binary_crossentropy",
input_shape=(384, )
)
model_l2.fit(x_train, y_train, epochs=100, validation_data=(x_test, y_test),
callbacks=[EarlyStopping(patience=1)], verbose=0)
models_l2.append(model_l2)
y_hat = model_l2.predict(x_test)
errors_l2.append(mse(y_hat, y_test))
models_l1[np.argmin(errors_l1)].save("./models/indel_l1.h5")
models_l2[np.argmin(errors_l2)].save("./models/indel_l2.h5")
return errors_l1, errors_l2
| [
"numpy.random.seed",
"numpy.sum",
"numpy.argmin",
"src.modelTraining.model_factory.model_creator",
"keras.callbacks.EarlyStopping",
"src.modelTraining.model_factory.mse"
] | [((431, 459), 'numpy.sum', 'np.sum', (['y_t[:, -21:]'], {'axis': '(1)'}), '(y_t[:, -21:], axis=1)\n', (437, 459), True, 'import numpy as np\n'), ((476, 504), 'numpy.sum', 'np.sum', (['y_t[:, :-21]'], {'axis': '(1)'}), '(y_t[:, :-21], axis=1)\n', (482, 504), True, 'import numpy as np\n'), ((970, 987), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (984, 987), True, 'import numpy as np\n'), ((1244, 1375), 'src.modelTraining.model_factory.model_creator', 'model_creator', ([], {'num_units': '(2)', 'kernel_regularizer': 'l1', 'kernel_weight': 'kernel_weight', 'loss': '"""binary_crossentropy"""', 'input_shape': '(384,)'}), "(num_units=2, kernel_regularizer=l1, kernel_weight=\n kernel_weight, loss='binary_crossentropy', input_shape=(384,))\n", (1257, 1375), False, 'from src.modelTraining.model_factory import model_creator, mse\n'), ((1787, 1918), 'src.modelTraining.model_factory.model_creator', 'model_creator', ([], {'num_units': '(2)', 'kernel_regularizer': 'l2', 'kernel_weight': 'kernel_weight', 'loss': '"""binary_crossentropy"""', 'input_shape': '(384,)'}), "(num_units=2, kernel_regularizer=l2, kernel_weight=\n kernel_weight, loss='binary_crossentropy', input_shape=(384,))\n", (1800, 1918), False, 'from src.modelTraining.model_factory import model_creator, mse\n'), ((1743, 1761), 'src.modelTraining.model_factory.mse', 'mse', (['y_hat', 'y_test'], {}), '(y_hat, y_test)\n', (1746, 1761), False, 'from src.modelTraining.model_factory import model_creator, mse\n'), ((2286, 2304), 'src.modelTraining.model_factory.mse', 'mse', (['y_hat', 'y_test'], {}), '(y_hat, y_test)\n', (2289, 2304), False, 'from src.modelTraining.model_factory import model_creator, mse\n'), ((2325, 2345), 'numpy.argmin', 'np.argmin', (['errors_l1'], {}), '(errors_l1)\n', (2334, 2345), True, 'import numpy as np\n'), ((2394, 2414), 'numpy.argmin', 'np.argmin', (['errors_l2'], {}), '(errors_l2)\n', (2403, 2414), True, 'import numpy as np\n'), ((1591, 1616), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(1)'}), '(patience=1)\n', (1604, 1616), False, 'from keras.callbacks import EarlyStopping\n'), ((2134, 2159), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(1)'}), '(patience=1)\n', (2147, 2159), False, 'from keras.callbacks import EarlyStopping\n')] |
#!/usr/bin/env python
"""
Differential evolution test program.
Usage:
de.py [options]
Options:
-h, --help Show this message and exit.
-n N Number of generations in DE. [default: 20]
--print-level LEVEL
Print verbose level. [default: 1]
"""
from __future__ import print_function
import os,sys
from docopt import docopt
import numpy as np
from numpy import exp, sin, cos
import random
import copy
from multiprocessing import Process, Pool
from time import time
__author__ = "<NAME>"
__version__ = "190904"
_fname_gen = 'out.de.generations'
_fname_ind = 'out.de.individuals'
def test_func(var, vranges, **kwargs):
x,y= var
res= x**2 +y**2 +100.0*exp(-x**2 -y**2)*sin(2.0*(x+y))*cos(2*(x-y)) \
+80.0*exp(-(x-1)**2 -(y-1)**2)*cos(x+4*y)*sin(2*x-y) \
+200.0*sin(x+y)*exp(-(x-3)**2-(y-1)**2)
return res
def test_write_func(vs,vrs,fname,**kwargs):
with open(fname,'w') as f:
for i,v in enumerate(vs):
vr = vrs[i]
f.write(' {0:10.3f} {1:10.3f} {2:10.3f}\n'.format(v,*vr))
return None
def wrap(vs,vrs):
vsnew = copy.copy(vs)
for i,v in enumerate(vsnew):
vmin, vmax = vrs[i]
vsnew[i] = min(max(v,vmin),vmax)
return vsnew
class Individual:
"""
Individual class that consists of variables as vector elements.
"""
def __init__(self, iid, ndim, vranges, loss_func):
self.iid = iid
self.ndim = ndim
self.loss_func = loss_func
self.vector = np.zeros(self.ndim)
self.vranges = vranges
self.val = None
def set_variable(self,variables):
if len(variables) != len(self.vector):
raise ValueError()
self.vector = variables
# print('iid, v before wrap,vrs =',self.iid,self.vector,self.vranges)
self.wrap_range()
# print('iid, v after wrap,vrs =',self.iid,self.vector,self.vranges)
self.val = None
return None
def init_random(self):
for i in range(self.ndim):
vmin, vmax = self.vranges[i]
# vmin = self.vranges[i,0]
# vmax = self.vranges[i,1]
v = random.random()*(vmax -vmin) +vmin
self.vector[i] = v
# print(' i,vmin,vmax,v=',i,vmin,vmax,v)
self.wrap_range()
self.val = None
return None
def wrap_range(self):
self.vector = wrap(self.vector, self.vranges)
def calc_loss_func(self,kwargs):
"""
Compute loss function value using self.loss_func function given in the constructor.
In order to return a result in multiprocessing.Process, it also takes an argument q.
"""
# print('type(kwargs)=',type(kwargs))
val = self.loss_func(self.vector, self.vranges, **kwargs)
# print(' iid,v,val=',self.iid,self.vector,val)
# q.put(val)
return val, kwargs['index']
class DE:
"""
Differential evolution class.
"""
def __init__(self, N, F, CR, T, variables, vranges, loss_func, write_func,
nproc=0,**kwargs):
"""
Conctructor of DE class.
loss_func:
Loss function to be minimized with variables and **kwargs.
nproc:
Number of processes used to run N individuals.
"""
if N < 4:
raise ValueError('N must be greater than 3 in DE!')
self.N = N # Number of individuals in a generation
self.F = F # Fraction of mixing in DE
self.CR = CR # Cross-over rate
self.T = T # Temperature (kT) to compute adoption probability
self.nproc = nproc
# if self.T < 1e-10:
# raise ValueError('T is too small.')
self.ndim = len(variables)
self.vs = variables
self.vrs = vranges
# print('original variables=',self.vs,self.vrs)
self.loss_func = loss_func
self.write_func = write_func
self.kwargs = kwargs
self.bestind = None
self.print_level = 0
if 'print_level' in kwargs.keys():
self.print_level = kwargs['print_level']
#...initialize population
self.population = []
self.iidmax = 0
for i in range(N):
self.iidmax += 1
ind = Individual(self.iidmax, self.ndim, self.vrs, self.loss_func)
if i == 0:
ind.set_variable(self.vs)
else:
ind.init_random()
self.population.append(ind)
#...Evaluate loss function values
# qs = [ Queue() for i in range(self.N) ]
prcs = []
if self.nproc > 0 : # use specified number of cores by nproc
pool = Pool(processes=self.nproc)
else:
pool = Pool()
for ip,pi in enumerate(self.population):
kwtmp = copy.copy(self.kwargs)
kwtmp['index'] = ip
kwtmp['iid'] = pi.iid
# prcs.append(Process(target=pi.calc_loss_func, args=(kwtmp,qs[ip])))
prcs.append(pool.apply_async(pi.calc_loss_func, (kwtmp,)))
results = [ res.get() for res in prcs ]
for res in results:
val,ip = res
self.population[ip].val = val
self.keep_best()
if self.print_level > 2:
for pi in self.population:
self.write_variables(pi,
fname='in.vars.fitpot.{0:d}'.format(pi.iid),
**self.kwargs)
else:
self.write_variables(self.bestind,
fname='in.vars.fitpot.{0:d}'.format(self.bestind.iid),
**self.kwargs)
return None
def keep_best(self):
vals = []
for i,pi in enumerate(self.population):
# print('i,val,vec=',i,pi.val,pi.vector)
if pi.val == None:
raise ValueError('Something went wrong.')
vals.append(pi.val)
minval = min(vals)
if self.bestind == None or minval < self.bestind.val:
idx = vals.index(minval)
self.bestind = copy.deepcopy(self.population[idx])
return None
def run(self,maxiter=100):
"""
Perfom DE.
"""
if 'start' in self.kwargs.keys():
start = self.kwargs['start']
else:
start = time()
fgen = open(_fname_gen,'w')
find = open(_fname_ind,'w')
for i,ind in enumerate(self.population):
fgen.write(' 0 {0:8d} {1:12.4e}\n'.format(ind.iid, ind.val))
find.write(' {0:8d} {1:12.4e}'.format(ind.iid, ind.val))
for j,vj in enumerate(ind.vector):
find.write(' {0:11.3e}'.format(vj))
find.write('\n')
if self.print_level > 0:
print(' step,time,best,vars= {0:6d} {1:8.1f} {2:8.4f}'.format(0, time()-start,
self.bestind.val),end="")
for i in range(min(16,self.ndim)):
print(' {0:6.3f}'.format(self.bestind.vector[i]),end="")
print('', flush=True)
for it in range(maxiter):
candidates = []
#...Create candidates
for ip,pi in enumerate(self.population):
vi = pi.vector
#...pick other 3 individuals
indices= [ j for j in range(self.N) if j != i ]
irand = int(random.random()*len(indices))
i1 = indices.pop(irand)
irand = int(random.random()*len(indices))
i2 = indices.pop(irand)
irand = int(random.random()*len(indices))
i3 = indices.pop(irand)
# print('i,i1,i2,i3=',i,i1,i2,i3)
ind1 = self.population[i1]
ind2 = self.population[i2]
ind3 = self.population[i3]
v1 = ind1.vector
v2 = ind2.vector
v3 = ind3.vector
vd = v1 +self.F *(v2 -v3)
#...cross over
vnew = np.array(vd)
for k in range(len(vi)):
r = random.random()
if r > self.CR:
vnew[k] = vi[k]
#...create new individual for trial
self.iidmax += 1
newind = Individual(self.iidmax, self.ndim, self.vrs, self.loss_func)
newind.set_variable(vnew)
candidates.append(newind)
#...Evaluate loss func values of candidates
#...This block can be parallelize and it makes the program much faster
# for ic,ci in enumerate(candidates):
# self.kwargs['index'] = ic
# ci.calc_loss_func(self.kwargs)
#...Evaluate loss function values
# qs = [ Queue() for i in range(self.N) ]
prcs = []
for ic,ci in enumerate(candidates):
kwtmp = copy.copy(self.kwargs)
kwtmp['index'] = ic
kwtmp['iid'] = ci.iid
# prcs.append(Process(target=ci.calc_loss_func, args=(kwtmp,qs[ic])))
prcs.append(pool.apply_async(ci.calc_loss_func, (kwtmp,)))
results = [ res.get() for res in prcs ]
for res in results:
val,ic = res
candidates[ic].val = val
# for p in prcs:
# p.start()
# for p in prcs:
# p.join()
# for ic,ci in enumerate(candidates):
# ci.val = qs[ic].get()
#...Check best
for ic,ci in enumerate(candidates):
if ci.val < self.bestind.val:
self.bestind = ci
self.write_variables(ci,
fname='in.vars.fitpot.{0:d}'.format(ci.iid),
**self.kwargs)
if self.print_level > 2:
for ci in candidates:
self.write_variables(ci,
fname='in.vars.fitpot.{0:d}'.format(ci.iid),
**self.kwargs)
#...Decide whether or not to adopt new one
for ic,ci in enumerate(candidates):
pi = self.population[ic]
# #...Skip if pi is the current best
# if pi.iid == self.bestind.iid:
# continue
#...adoption probability
dval = ci.val -pi.val
if dval < 0.0:
prob = 1.0
else:
if self.T > 0.0:
prob = np.exp(-dval/self.T)
else:
prob = 0.0
r = random.random()
if r < prob: # replace with new individual
self.population[ic] = ci
find.write(' {0:8d} {1:12.4e}'.format(ci.iid, ci.val))
for k,vk in enumerate(ci.vector):
find.write(' {0:11.3e}'.format(vk))
find.write('\n')
else:
pass
if self.print_level > 0:
print(' step,time,best,vars= {0:6d} {1:8.1f} {2:8.4f}'.format(it+1, time()-start,
self.bestind.val),end="")
for i in range(min(16,self.ndim)):
print(' {0:6.3f}'.format(self.bestind.vector[i]),end="")
print('', flush=True)
for i,ind in enumerate(self.population):
fgen.write(' {0:5d} {1:8d} {2:12.4e}\n'.format(it+1, ind.iid, ind.val))
fgen.close()
find.close()
#...Finaly write out the best one
self.write_variables(self.bestind,fname='in.vars.fitpot.best',**self.kwargs)
return None
def write_variables(self,ind,fname='in.vars.fitpot',**kwargs):
vs = ind.vector
vrs = ind.vranges
self.write_func(vs,vrs,fname,**kwargs)
return None
if __name__ == "__main__":
args = docopt(__doc__)
n = int(args['-n'])
kwargs = {}
kwargs['print_level'] = int(args['--print-level'])
vs = np.array([1.0, -0.5])
vrs = np.array([[-1.0, 2.0],[-1.0, 1.0]])
de = DE(10, 0.8, 0.5, 1.0, vs, vrs, test_func, test_write_func, **kwargs)
de.run(n)
| [
"copy.deepcopy",
"docopt.docopt",
"numpy.zeros",
"copy.copy",
"time.time",
"random.random",
"numpy.sin",
"numpy.array",
"numpy.exp",
"numpy.cos",
"multiprocessing.Pool"
] | [((1115, 1128), 'copy.copy', 'copy.copy', (['vs'], {}), '(vs)\n', (1124, 1128), False, 'import copy\n'), ((12272, 12287), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (12278, 12287), False, 'from docopt import docopt\n'), ((12393, 12414), 'numpy.array', 'np.array', (['[1.0, -0.5]'], {}), '([1.0, -0.5])\n', (12401, 12414), True, 'import numpy as np\n'), ((12425, 12461), 'numpy.array', 'np.array', (['[[-1.0, 2.0], [-1.0, 1.0]]'], {}), '([[-1.0, 2.0], [-1.0, 1.0]])\n', (12433, 12461), True, 'import numpy as np\n'), ((1511, 1530), 'numpy.zeros', 'np.zeros', (['self.ndim'], {}), '(self.ndim)\n', (1519, 1530), True, 'import numpy as np\n'), ((823, 856), 'numpy.exp', 'exp', (['(-(x - 3) ** 2 - (y - 1) ** 2)'], {}), '(-(x - 3) ** 2 - (y - 1) ** 2)\n', (826, 856), False, 'from numpy import exp, sin, cos\n'), ((4689, 4715), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.nproc'}), '(processes=self.nproc)\n', (4693, 4715), False, 'from multiprocessing import Process, Pool\n'), ((4749, 4755), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (4753, 4755), False, 'from multiprocessing import Process, Pool\n'), ((4838, 4860), 'copy.copy', 'copy.copy', (['self.kwargs'], {}), '(self.kwargs)\n', (4847, 4860), False, 'import copy\n'), ((6141, 6176), 'copy.deepcopy', 'copy.deepcopy', (['self.population[idx]'], {}), '(self.population[idx])\n', (6154, 6176), False, 'import copy\n'), ((6406, 6412), 'time.time', 'time', ([], {}), '()\n', (6410, 6412), False, 'from time import time\n'), ((785, 799), 'numpy.sin', 'sin', (['(2 * x - y)'], {}), '(2 * x - y)\n', (788, 799), False, 'from numpy import exp, sin, cos\n'), ((814, 824), 'numpy.sin', 'sin', (['(x + y)'], {}), '(x + y)\n', (817, 824), False, 'from numpy import exp, sin, cos\n'), ((8164, 8176), 'numpy.array', 'np.array', (['vd'], {}), '(vd)\n', (8172, 8176), True, 'import numpy as np\n'), ((9066, 9088), 'copy.copy', 'copy.copy', (['self.kwargs'], {}), '(self.kwargs)\n', (9075, 9088), False, 'import copy\n'), ((10901, 10916), 'random.random', 'random.random', ([], {}), '()\n', (10914, 10916), False, 'import random\n'), ((719, 735), 'numpy.cos', 'cos', (['(2 * (x - y))'], {}), '(2 * (x - y))\n', (722, 735), False, 'from numpy import exp, sin, cos\n'), ((774, 788), 'numpy.cos', 'cos', (['(x + 4 * y)'], {}), '(x + 4 * y)\n', (777, 788), False, 'from numpy import exp, sin, cos\n'), ((2160, 2175), 'random.random', 'random.random', ([], {}), '()\n', (2173, 2175), False, 'import random\n'), ((8242, 8257), 'random.random', 'random.random', ([], {}), '()\n', (8255, 8257), False, 'import random\n'), ((704, 722), 'numpy.sin', 'sin', (['(2.0 * (x + y))'], {}), '(2.0 * (x + y))\n', (707, 722), False, 'from numpy import exp, sin, cos\n'), ((749, 782), 'numpy.exp', 'exp', (['(-(x - 1) ** 2 - (y - 1) ** 2)'], {}), '(-(x - 1) ** 2 - (y - 1) ** 2)\n', (752, 782), False, 'from numpy import exp, sin, cos\n'), ((6923, 6929), 'time.time', 'time', ([], {}), '()\n', (6927, 6929), False, 'from time import time\n'), ((7524, 7539), 'random.random', 'random.random', ([], {}), '()\n', (7537, 7539), False, 'import random\n'), ((7622, 7637), 'random.random', 'random.random', ([], {}), '()\n', (7635, 7637), False, 'import random\n'), ((7720, 7735), 'random.random', 'random.random', ([], {}), '()\n', (7733, 7735), False, 'import random\n'), ((10799, 10821), 'numpy.exp', 'np.exp', (['(-dval / self.T)'], {}), '(-dval / self.T)\n', (10805, 10821), True, 'import numpy as np\n'), ((687, 708), 'numpy.exp', 'exp', (['(-x ** 2 - y ** 2)'], {}), '(-x ** 2 - y ** 2)\n', (690, 708), False, 'from numpy import exp, sin, cos\n'), ((11419, 11425), 'time.time', 'time', ([], {}), '()\n', (11423, 11425), False, 'from time import time\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 5 20:24:50 2016
@author: alex
"""
from AlexRobotics.dynamic import Manipulator as M
from AlexRobotics.control import ComputedTorque as CTC
import matplotlib.pyplot as plt
import numpy as np
""" Define system """
# Define real dynamic system
R = M.TwoLinkManipulator()
R.f_dist_steady = np.array([ 10 , 10 ])
# Define approx dynamic system used by controller
R_hat = M.TwoLinkManipulator()
#R_hat.f_dist_steady = np.array([ 0 , 0 ]) # Model not aware of disturbance
# Define controller
CTC_controller = CTC.ComputedTorqueController( R_hat )
CTC_controller.w0 = 1
#CTC_controller.dist_obs_active = False
CTC_controller.dist_obs_active = True
# Asign feedback law to the dynamic system
R.ctl = CTC_controller.ctl
""" Simulation and plotting """
# Ploting a trajectory
x0 = [3,-1,0,0]
tf = 10
R.plotAnimation( x0 , tf , n = 1001 , solver='euler' )
R.Sim.plot_CL( 'x' )
R.Sim.plot_CL( 'u' )
# Hold figures alive
plt.show() | [
"matplotlib.pyplot.show",
"numpy.array",
"AlexRobotics.control.ComputedTorque.ComputedTorqueController",
"AlexRobotics.dynamic.Manipulator.TwoLinkManipulator"
] | [((317, 339), 'AlexRobotics.dynamic.Manipulator.TwoLinkManipulator', 'M.TwoLinkManipulator', ([], {}), '()\n', (337, 339), True, 'from AlexRobotics.dynamic import Manipulator as M\n'), ((358, 376), 'numpy.array', 'np.array', (['[10, 10]'], {}), '([10, 10])\n', (366, 376), True, 'import numpy as np\n'), ((453, 475), 'AlexRobotics.dynamic.Manipulator.TwoLinkManipulator', 'M.TwoLinkManipulator', ([], {}), '()\n', (473, 475), True, 'from AlexRobotics.dynamic import Manipulator as M\n'), ((609, 644), 'AlexRobotics.control.ComputedTorque.ComputedTorqueController', 'CTC.ComputedTorqueController', (['R_hat'], {}), '(R_hat)\n', (637, 644), True, 'from AlexRobotics.control import ComputedTorque as CTC\n'), ((1039, 1049), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1047, 1049), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""musawenkosi - Tensorflow Multivariate Practical3.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1i-6hu3MdyNlaO6CAkM23f5MtCHwyxHja
## Regression as Neural Networks Practical
"""
import numpy as np
np.random.seed(1348) # for reproducibility
import pandas
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras import metrics
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
dataframe = pandas.read_csv("https://raw.githubusercontent.com/eijaz1/Deep-Learning-in-Keras-Tutorial/master/data/hourly_wages_data.csv")
dataset = dataframe.values
dataframe.head()
X = dataframe.drop(columns=['wage_per_hour']).values
Y = dataframe['wage_per_hour'].values
X.shape #This code checks the shape of the data we have.
Y.shape #This code checks the shape of the features.
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.3) #This splits the data into training and testing.
"""###***Create a neural network model***"""
# define the model
def NueralNetwork():
# create model
model = Sequential()
# add one fully connected layer
model.add(Dense(units = 8, input_dim=9, activation='relu'))
# add a fully connected layer for the output
model.add(Dense(units=1))
# Compile model
model.compile(loss='mse', optimizer='adam',metrics=[metrics.mse])
return model
model = NueralNetwork() #This loads the model.
"""###***Determine the number of trainable parameters***"""
model.summary() #This code gives the summary
"""### ***Split the data into training and test data***
"""
history = model.fit(X_train, Y_train, epochs=24, batch_size=4, verbose=1)
"""### **Predict on the test data**"""
prediction = model.predict(X_test)
"""### **Compute the mean squared error**
"""
mean_squared_error(Y_test, prediction)
"""###***Plot the error over the epochs***"""
plt.figure(figsize=(8, 8))
plt.plot(history.history['mean_squared_error'])
plt.title('Model loss')
plt.ylabel('Mean Squared Error')
plt.xlabel('Epoch')
plt.show()
"""1) How many inputs would a neural network have if we tried to solve this problem? \\
##**534**
2) How many outputs would the neural network have?
###***8***
3) What is the goal here? What are we trying to achieve with machine learning?
###**Is to predict the pay**
""" | [
"matplotlib.pyplot.title",
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"tensorflow.keras.layers.Dense",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.figure",
"tensorflow.keras.Sequential",
"matplotlib.pyplot.ylabel",
"matplotlib.p... | [((299, 319), 'numpy.random.seed', 'np.random.seed', (['(1348)'], {}), '(1348)\n', (313, 319), True, 'import numpy as np\n'), ((759, 894), 'pandas.read_csv', 'pandas.read_csv', (['"""https://raw.githubusercontent.com/eijaz1/Deep-Learning-in-Keras-Tutorial/master/data/hourly_wages_data.csv"""'], {}), "(\n 'https://raw.githubusercontent.com/eijaz1/Deep-Learning-in-Keras-Tutorial/master/data/hourly_wages_data.csv'\n )\n", (774, 894), False, 'import pandas\n'), ((1170, 1207), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.3)'}), '(X, Y, test_size=0.3)\n', (1186, 1207), False, 'from sklearn.model_selection import train_test_split\n'), ((2112, 2150), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['Y_test', 'prediction'], {}), '(Y_test, prediction)\n', (2130, 2150), False, 'from sklearn.metrics import mean_squared_error\n'), ((2199, 2225), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (2209, 2225), True, 'import matplotlib.pyplot as plt\n'), ((2226, 2273), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['mean_squared_error']"], {}), "(history.history['mean_squared_error'])\n", (2234, 2273), True, 'import matplotlib.pyplot as plt\n'), ((2274, 2297), 'matplotlib.pyplot.title', 'plt.title', (['"""Model loss"""'], {}), "('Model loss')\n", (2283, 2297), True, 'import matplotlib.pyplot as plt\n'), ((2298, 2330), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean Squared Error"""'], {}), "('Mean Squared Error')\n", (2308, 2330), True, 'import matplotlib.pyplot as plt\n'), ((2331, 2350), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (2341, 2350), True, 'import matplotlib.pyplot as plt\n'), ((2351, 2361), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2359, 2361), True, 'import matplotlib.pyplot as plt\n'), ((1374, 1386), 'tensorflow.keras.Sequential', 'Sequential', ([], {}), '()\n', (1384, 1386), False, 'from tensorflow.keras import Sequential\n'), ((1442, 1488), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(8)', 'input_dim': '(9)', 'activation': '"""relu"""'}), "(units=8, input_dim=9, activation='relu')\n", (1447, 1488), False, 'from tensorflow.keras.layers import Dense\n'), ((1560, 1574), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(1)'}), '(units=1)\n', (1565, 1574), False, 'from tensorflow.keras.layers import Dense\n')] |
import tkinter as tk
from tkinter import *
import cv2
import csv
import os
import numpy as np
from PIL import Image,ImageTk
import pandas as pd
import datetime
import time
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
####GUI for manually fill attendance
def manually_fill():
global sb
sb = tk.Tk()
sb.iconbitmap('FRAMS.ico')
sb.title("Enter subject name...")
sb.geometry('580x320')
sb.configure(background='snow')
##if no subject name is entered call this function
def err_screen_for_subject():
def ec_delete():
ec.destroy()
global ec
ec = tk.Tk()
ec.geometry('300x100')
ec.iconbitmap('FRAMS.ico')
ec.title('Warning!!')
ec.configure(background='snow')
Label(ec, text='Please enter your subject name!!!', fg='red', bg='white', font=('times', 16, ' bold ')).pack()
Button(ec, text='OK', command=ec_delete, fg="black", bg="lawn green", width=9, height=1, activebackground="Red",
font=('times', 15, ' bold ')).place(x=90, y=50)
##To enter students manually for a subject
def fill_attendance():
global subb
subb = SUB_ENTRY.get()
if subb=='':
err_screen_for_subject()
else:
##time & date for the csv file of subject
ts = time.time()
Date = datetime.datetime.fromtimestamp(ts).strftime('%Y_%m_%d')
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
Hour, Minute, Second = timeStamp.split(":")
fileName = str(subb + "_" + Date + "_Time_" + Hour + "_" + Minute + "_" + Second)
col_names = ['Enrollment', 'Name', 'Date', 'Time']
attendance = pd.DataFrame(columns=col_names)
sb.destroy()
MFW = tk.Tk()
MFW.iconbitmap('FRAMS.ico')
MFW.title("Manually attendance of "+ str(subb))
MFW.geometry('880x470')
MFW.configure(background='snow')
def del_errsc2():
errsc2.destroy()
def err_screen1():
global errsc2
errsc2 = tk.Tk()
errsc2.geometry('330x100')
errsc2.iconbitmap('FRAMS.ico')
errsc2.title('Warning!!')
errsc2.configure(background='snow')
Label(errsc2, text='Please enter Student & Enrollment!!!', fg='red', bg='white',
font=('times', 16, ' bold ')).pack()
Button(errsc2, text='OK', command=del_errsc2, fg="black", bg="lawn green", width=9, height=1,
activebackground="Red", font=('times', 15, ' bold ')).place(x=90, y=50)
def testVal(inStr, acttyp):
if acttyp == '1': # insert
if not inStr.isdigit():
return False
return True
ENR = tk.Label(MFW, text="Enter Enrollment", width=15, height=2, fg="white", bg="blue2",
font=('times', 15, ' bold '))
ENR.place(x=30, y=100)
STU_NAME = tk.Label(MFW, text="Enter Student name", width=15, height=2, fg="white", bg="blue2",
font=('times', 15, ' bold '))
STU_NAME.place(x=30, y=200)
global ENR_ENTRY
ENR_ENTRY = tk.Entry(MFW, width=20,validate='key', bg="yellow", fg="red", font=('times', 23, ' bold '))
ENR_ENTRY['validatecommand'] = (ENR_ENTRY.register(testVal), '%P', '%d')
ENR_ENTRY.place(x=290, y=105)
def remove_enr():
ENR_ENTRY.delete(first=0, last=22)
STUDENT_ENTRY = tk.Entry(MFW, width=20, bg="yellow", fg="red", font=('times', 23, ' bold '))
STUDENT_ENTRY.place(x=290, y=205)
def remove_student():
STUDENT_ENTRY.delete(first=0, last=22)
####get important variable
def enter_data_DF():
ENROLLMENT = ENR_ENTRY.get()
STUDENT = STUDENT_ENTRY.get()
if ENROLLMENT=='':
err_screen1()
elif STUDENT=='':
err_screen1()
else:
#df = pd.read_csv("StudentDetails\StudentDetails.csv")
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
#aa = df.loc[df['Enrollment'] == ENROLLMENT]['Name'].values
attendance.loc[len(attendance)] = [ENROLLMENT, STUDENT, date, timeStamp]
ENR_ENTRY.delete(first=0, last=22)
STUDENT_ENTRY.delete(first=0, last=22)
def create_csv():
# Save as a csv file
csv_name='Attendance\Manually Attendance\\'+fileName+'.csv'
print(attendance)
attendance.to_csv(csv_name, index=False)
O="CSV created Successfully"
Notifi.configure(text=O, bg="Green", fg="white", width=33, font=('times', 19, 'bold'))
Notifi.place(x=180, y=380)
import csv
import tkinter
root = tkinter.Tk()
root.title("Attendance of " + subb)
root.configure(background='snow')
with open(csv_name, newline="") as file:
reader = csv.reader(file)
r = 0
for col in reader:
c = 0
for row in col:
# i've added some styling
label = tkinter.Label(root, width=13, height=1, fg="black", font=('times', 13, ' bold '),
bg="lawn green", text=row, relief=tkinter.RIDGE)
label.grid(row=r, column=c)
c += 1
r += 1
root.mainloop()
Notifi = tk.Label(MFW, text="CSV created Successfully", bg="Green", fg="white", width=33,
height=2, font=('times', 19, 'bold'))
c1ear_enroll = tk.Button(MFW, text="Clear", command=remove_enr, fg="black", bg="deep pink", width=10,
height=1,
activebackground="Red", font=('times', 15, ' bold '))
c1ear_enroll.place(x=690, y=100)
c1ear_student = tk.Button(MFW, text="Clear", command=remove_student, fg="black", bg="deep pink", width=10,
height=1,
activebackground="Red", font=('times', 15, ' bold '))
c1ear_student.place(x=690, y=200)
DATA_SUB = tk.Button(MFW, text="Enter Data",command=enter_data_DF, fg="black", bg="lime green", width=20,
height=2,
activebackground="Red", font=('times', 15, ' bold '))
DATA_SUB.place(x=170, y=300)
MAKE_CSV = tk.Button(MFW, text="Convert to CSV",command=create_csv, fg="black", bg="red", width=20,
height=2,
activebackground="Red", font=('times', 15, ' bold '))
MAKE_CSV.place(x=570, y=300)
def attf():
import subprocess
subprocess.Popen(r'explorer /open,".\Attendance\Manually Attendance\"') #open attendance sheet window
attf = tk.Button(MFW, text="Check Sheets",command=attf,fg="black" ,bg="lawn green" ,width=12 ,height=1 ,activebackground = "Red" ,font=('times', 14, ' bold '))
attf.place(x=730, y=410)
MFW.mainloop()
SUB = tk.Label(sb, text="Enter Subject", width=15, height=2, fg="white", bg="blue2", font=('times', 15, ' bold '))
SUB.place(x=30, y=100)
global SUB_ENTRY
SUB_ENTRY = tk.Entry(sb, width=20, bg="yellow", fg="red", font=('times', 23, ' bold '))
SUB_ENTRY.place(x=250, y=105)
fill_manual_attendance = tk.Button(sb, text="Fill Attendance",command=fill_attendance, fg="white", bg="deep pink", width=20, height=2,
activebackground="Red", font=('times', 15, ' bold '))
fill_manual_attendance.place(x=250, y=160)
sb.mainloop()
##For clear textbox
def clear():
txt.delete(first=0, last=22)
def clear1():
txt2.delete(first=0, last=22)
def del_sc1():
sc1.destroy()
def err_screen():
global sc1
sc1 = tk.Tk()
sc1.geometry('300x100')
sc1.iconbitmap('FRAMS.ico')
sc1.title('Warning!!')
sc1.configure(background='snow')
Label(sc1,text='Enrollment & Name required!!!',fg='red',bg='white',font=('times', 16, ' bold ')).pack()
Button(sc1,text='OK',command=del_sc1,fg="black" ,bg="lawn green" ,width=9 ,height=1, activebackground = "Red" ,font=('times', 15, ' bold ')).place(x=90,y= 50)
##Error screen2
def del_sc2():
sc2.destroy()
def err_screen1():
global sc2
sc2 = tk.Tk()
sc2.geometry('300x100')
sc2.iconbitmap('FRAMS.ico')
sc2.title('Warning!!')
sc2.configure(background='snow')
Label(sc2,text='Please enter your subject name!!!',fg='red',bg='white',font=('times', 16, ' bold ')).pack()
Button(sc2,text='OK',command=del_sc2,fg="black" ,bg="lawn green" ,width=9 ,height=1, activebackground = "Red" ,font=('times', 15, ' bold ')).place(x=90,y= 50)
###For take images for datasets
def take_img():
l1 = txt.get()
l2 = txt2.get()
if l1 == '':
err_screen()
elif l2 == '':
err_screen()
else:
try:
cam = cv2.VideoCapture(0)
detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
Enrollment = txt.get()
Name = txt2.get()
sampleNum = 0
while (True):
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
# incrementing sample number
sampleNum = sampleNum + 1
# saving the captured face in the dataset folder
cv2.imwrite("TrainingImage/ " + Name + "." + Enrollment + '.' + str(sampleNum) + ".jpg",
gray[y:y + h, x:x + w])
cv2.imshow('Frame', img)
# wait for 100 miliseconds
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# break if the sample number is more than 70
elif sampleNum > 150:
break
cam.release()
cv2.destroyAllWindows()
ts = time.time()
Date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
Time = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
row = [Enrollment, Name, Date, Time]
with open('StudentDetails\StudentDetails.csv', 'a+') as csvFile:
writer = csv.writer(csvFile, delimiter=',')
writer.writerow(row)
csvFile.close()
clear()
clear1()
res = "Images Saved for Enrollment : " + Enrollment + " Name : " + Name
Notification.configure(text=res, bg="SpringGreen3", width=50, font=('times', 18, 'bold'))
Notification.place(x=250, y=400)
except FileExistsError as F:
f = 'Student Data already exists'
Notification.configure(text=f, bg="Red", width=21)
Notification.place(x=450, y=400)
###for choose subject and fill attendance
def subjectchoose():
def Fillattendances():
sub=tx.get()
now = time.time() ###For calculate seconds of video
future = now + 20
if time.time() < future:
if sub == '':
err_screen1()
else:
recognizer = cv2.face.LBPHFaceRecognizer_create() # cv2.createLBPHFaceRecognizer()
try:
recognizer.read("TrainingImageLabel\Trainner.yml")
except:
e = 'Model not found,Please train model'
Notifica.configure(text=e, bg="red", fg="black", width=33, font=('times', 15, 'bold'))
Notifica.place(x=20, y=250)
return
harcascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(harcascadePath)
df = pd.read_csv("StudentDetails\StudentDetails.csv")
cam = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_SIMPLEX
col_names = ['Enrollment', 'Name', 'Date', 'Time']
attendance = pd.DataFrame(columns=col_names)
while True:
ret, im = cam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.2, 5)
for (x, y, w, h) in faces:
global Id
Id, conf = recognizer.predict(gray[y:y + h, x:x + w])
if (conf <70):
print(conf)
global Subject
global aa
global date
global timeStamp
Subject = tx.get()
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
aa = df.loc[df['Enrollment'] == Id]['Name'].values
global tt
tt = str(Id) + "-" + aa
En = '15624031' + str(Id)
attendance.loc[len(attendance)] = [Id, aa, date, timeStamp]
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 260, 0), 7)
cv2.putText(im, str(tt), (x + h, y), font, 1, (255, 255, 0,), 4)
else:
Id = 'Unknown'
tt = str(Id)
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 25, 255), 7)
cv2.putText(im, str(tt), (x + h, y), font, 1, (0, 25, 255), 4)
if time.time() > future:
break
attendance = attendance.drop_duplicates(['Enrollment'], keep='first')
cv2.imshow('Filling attedance..', im)
key = cv2.waitKey(30) & 0xff
if key == 27:
break
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
Hour, Minute, Second = timeStamp.split(":")
fileName = "Attendance/" + Subject + "_" + date + "_" + Hour + "-" + Minute + "-" + Second + ".csv"
attendance = attendance.drop_duplicates(['Enrollment'], keep='first')
print(attendance)
attendance.to_csv(fileName, index=False)
M = 'Attendance filled Successfully'
Notifica.configure(text=M, bg="Green", fg="white", width=33, font=('times', 15, 'bold'))
Notifica.place(x=20, y=250)
cam.release()
cv2.destroyAllWindows()
import csv
import tkinter
root = tkinter.Tk()
root.title("Attendance of " + Subject)
root.configure(background='snow')
cs = './' + fileName
with open(cs, newline="") as file:
reader = csv.reader(file)
r = 0
for col in reader:
c = 0
for row in col:
# i've added some styling
label = tkinter.Label(root, width=8, height=1, fg="black", font=('times', 15, ' bold '),
bg="lawn green", text=row, relief=tkinter.RIDGE)
label.grid(row=r, column=c)
c += 1
r += 1
root.mainloop()
print(attendance)
###windo is frame for subject chooser
windo = tk.Tk()
windo.iconbitmap('FRAMS.ico')
windo.title("Enter subject name...")
windo.geometry('580x320')
windo.configure(background='snow')
Notifica = tk.Label(windo, text="Attendance filled Successfully", bg="Green", fg="white", width=33,
height=2, font=('times', 15, 'bold'))
def Attf():
import subprocess
subprocess.Popen(r'explorer /select,".\Attendance\Manually Attendance\"') #open attendance sheet window
attf = tk.Button(windo, text="Check Sheets",command=Attf,fg="black" ,bg="lawn green" ,width=12 ,height=1 ,activebackground = "Red" ,font=('times', 14, ' bold '))
attf.place(x=430, y=255)
sub = tk.Label(windo, text="Enter Subject", width=15, height=2, fg="white", bg="blue2", font=('times', 15, ' bold '))
sub.place(x=30, y=100)
tx = tk.Entry(windo, width=20, bg="yellow", fg="red", font=('times', 23, ' bold '))
tx.place(x=250, y=105)
fill_a = tk.Button(windo, text="Fill Attendance", fg="white",command=Fillattendances, bg="deep pink", width=20, height=2,
activebackground="Red", font=('times', 15, ' bold '))
fill_a.place(x=250, y=160)
windo.mainloop()
def admin_panel():
import csv
import tkinter
root = tkinter.Tk()
root.title("Student Details")
root.configure(background='snow')
cs = './StudentDetails/StudentDetails.csv'
with open(cs, newline="") as file:
reader = csv.reader(file)
r = 0
for col in reader:
c = 0
for row in col:
# i've added some styling
label = tkinter.Label(root, width=8, height=1, fg="black", font=('times', 15, ' bold '),
bg="lawn green", text=row, relief=tkinter.RIDGE)
label.grid(row=r, column=c)
c += 1
r += 1
root.mainloop()
###For train the model
def trainimg():
recognizer = cv2.face.LBPHFaceRecognizer_create()
global detector
detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
try:
global faces,Id
faces, Id = getImagesAndLabels("TrainingImage")
except Exception as e:
l='please make "TrainingImage" folder & put Images'
Notification.configure(text=l, bg="SpringGreen3", width=50, font=('times', 18, 'bold'))
Notification.place(x=250, y=400)
return
try:
recognizer.train(faces, np.array(Id))
except:
Notification.configure(text="No student information found !!!", bg="SpringGreen3", width=50, font=('times', 18, 'bold'))
Notification.place(x=250, y=400)
return
try:
recognizer.write("TrainingImageLabel\Trainner.yml")
except Exception as e:
q='Please make "TrainingImageLabel" folder'
Notification.configure(text=q, bg="SpringGreen3", width=50, font=('times', 18, 'bold'))
Notification.place(x=250, y=400)
return
res = "Model Trained"
Notification.configure(text=res, bg="SpringGreen3", width=50, font=('times', 18, 'bold'))
Notification.place(x=250, y=400)
def getImagesAndLabels(path):
imagePaths = [os.path.join(path, f) for f in os.listdir(path)]
# create empth face list
faceSamples = []
# create empty ID list
Ids = []
# now looping through all the image paths and loading the Ids and the images
for imagePath in imagePaths:
# loading the image and converting it to gray scale
pilImage = Image.open(imagePath).convert('L')
# Now we are converting the PIL image into numpy array
imageNp = np.array(pilImage, 'uint8')
# getting the Id from the image
Id = int(os.path.split(imagePath)[-1].split(".")[1])
# extract the face from the training image sample
faces = detector.detectMultiScale(imageNp)
# If a face is there then append that in the list as well as Id of it
for (x, y, w, h) in faces:
faceSamples.append(imageNp[y:y + h, x:x + w])
Ids.append(Id)
return faceSamples, Ids
def stats():
stud_details = pd.read_csv('StudentDetails/StudentDetails.csv')
students = list(stud_details.Name.dropna())
subjects = []
sub_strength=[]
att = {student: 0 for student in students}
for file in os.listdir('Attendance'):
if file.endswith('.csv'):
sub = file.split('_')[0]
subjects.append(sub) if sub not in subjects else subjects
attendance_df = pd.read_csv('Attendance/' + file)
sub_strength.append(attendance_df.shape[0])
for student in students:
for name in attendance_df['Name']:
if student in name:
att[student] += 1
print(att)
plt.bar(range(len(att)), list(att.values()), align='center')
plt.xticks(range(len(att)), list(att.keys()))
plt.show()
if __name__ == '__main__':
#####Window is our Main frame of system
window = tk.Tk()
window.title("FRAMS-Face Recognition Based Attendance Management System")
window.geometry('1280x720')
window.configure(background='darkblue')
window.grid_rowconfigure(0, weight=1)
window.grid_columnconfigure(0, weight=1)
window.iconbitmap('FRAMS.ico')
def on_closing():
from tkinter import messagebox
if messagebox.askokcancel("Quit", "Do you want to quit?"):
window.destroy()
window.protocol("WM_DELETE_WINDOW", on_closing)
message = tk.Label(window, text="Face-Recognition-Based-Attendance-Management-System", bg="dark blue", fg="snow", width=50,
height=3, font=('times', 30, 'italic bold '))
message.place(x=80, y=20)
Notification = tk.Label(window, text="All things good", bg="Green", fg="white", width=15,
height=3, font=('times', 17, 'bold'))
lbl = tk.Label(window, text="Enter Enrollment", width=20, height=2, fg="black", bg="cyan", font=('times', 15, ' bold '))
lbl.place(x=200, y=200)
def testVal(inStr,acttyp):
if acttyp == '1': #insert
if not inStr.isdigit():
return False
return True
txt = tk.Entry(window, validate="key", width=20, bg="snow", fg="red", font=('times', 25, ' bold '))
txt['validatecommand'] = (txt.register(testVal),'%P','%d')
txt.place(x=550, y=210)
lbl2 = tk.Label(window, text="Enter Name", width=20, fg="black", bg="cyan", height=2, font=('times', 15, ' bold '))
lbl2.place(x=200, y=300)
txt2 = tk.Entry(window, width=20, bg="snow", fg="red", font=('times', 25, ' bold '))
txt2.place(x=550, y=310)
clearButton = tk.Button(window, text="Clear",command=clear,fg="black" ,bg="cyan" ,width=10 ,height=1 ,activebackground = "Red" ,font=('times', 15, ' bold '))
clearButton.place(x=950, y=210)
clearButton1 = tk.Button(window, text="Clear",command=clear1,fg="black" ,bg="cyan" ,width=10 ,height=1, activebackground = "Red" ,font=('times', 15, ' bold '))
clearButton1.place(x=950, y=310)
AP = tk.Button(window, text="Check Register students",command=admin_panel,fg="black" ,bg="grey" ,width=19 ,height=1, activebackground = "Red" ,font=('times', 15, ' bold '))
AP.place(x=990, y=410)
takeImg = tk.Button(window, text="Take Images",command=take_img,fg="white" ,bg="grey",borderwidth=5 ,width=20 ,height=3, activebackground = "Red" ,font=('times', 15, ' bold '))
takeImg.place(x=90, y=500)
trainImg = tk.Button(window, text="Train Images",fg="white",command=trainimg ,bg="grey",borderwidth=5 ,width=20 ,height=3, activebackground = "Red" ,font=('times', 15, ' bold '))
trainImg.place(x=390, y=500)
FA = tk.Button(window, text="Automatic Attendance",fg="white",command=subjectchoose ,bg="grey",borderwidth=5 ,width=20 ,height=3, activebackground = "Red" ,font=('times', 15, ' bold '))
FA.place(x=690, y=500)
quitWindow = tk.Button(window, text="Manually Fill Attendance", command=manually_fill ,fg="white",borderwidth=5 ,bg="grey" ,width=20 ,height=3, activebackground = "Red" ,font=('times', 15, ' bold '))
quitWindow.place(x=990, y=500)
Stats = tk.Button(window, text="Statistics", fg="white", command=stats, bg="grey", borderwidth=5,
width=20, height=3, activebackground="Red", font=('times', 15, ' bold '))
Stats.place(x=590, y=600)
window.mainloop() | [
"csv.reader",
"pandas.read_csv",
"cv2.rectangle",
"cv2.imshow",
"os.path.join",
"tkinter.Label",
"pandas.DataFrame",
"tkinter.Button",
"cv2.cvtColor",
"tkinter.Entry",
"cv2.destroyAllWindows",
"tkinter.Tk",
"subprocess.Popen",
"matplotlib.pyplot.show",
"csv.writer",
"cv2.waitKey",
"t... | [((351, 358), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (356, 358), True, 'import tkinter as tk\n'), ((7886, 7999), 'tkinter.Label', 'tk.Label', (['sb'], {'text': '"""Enter Subject"""', 'width': '(15)', 'height': '(2)', 'fg': '"""white"""', 'bg': '"""blue2"""', 'font': "('times', 15, ' bold ')"}), "(sb, text='Enter Subject', width=15, height=2, fg='white', bg=\n 'blue2', font=('times', 15, ' bold '))\n", (7894, 7999), True, 'import tkinter as tk\n'), ((8061, 8136), 'tkinter.Entry', 'tk.Entry', (['sb'], {'width': '(20)', 'bg': '"""yellow"""', 'fg': '"""red"""', 'font': "('times', 23, ' bold ')"}), "(sb, width=20, bg='yellow', fg='red', font=('times', 23, ' bold '))\n", (8069, 8136), True, 'import tkinter as tk\n'), ((8203, 8376), 'tkinter.Button', 'tk.Button', (['sb'], {'text': '"""Fill Attendance"""', 'command': 'fill_attendance', 'fg': '"""white"""', 'bg': '"""deep pink"""', 'width': '(20)', 'height': '(2)', 'activebackground': '"""Red"""', 'font': "('times', 15, ' bold ')"}), "(sb, text='Fill Attendance', command=fill_attendance, fg='white',\n bg='deep pink', width=20, height=2, activebackground='Red', font=(\n 'times', 15, ' bold '))\n", (8212, 8376), True, 'import tkinter as tk\n'), ((8647, 8654), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (8652, 8654), True, 'import tkinter as tk\n'), ((9147, 9154), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (9152, 9154), True, 'import tkinter as tk\n'), ((16867, 16874), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (16872, 16874), True, 'import tkinter as tk\n'), ((17034, 17165), 'tkinter.Label', 'tk.Label', (['windo'], {'text': '"""Attendance filled Successfully"""', 'bg': '"""Green"""', 'fg': '"""white"""', 'width': '(33)', 'height': '(2)', 'font': "('times', 15, 'bold')"}), "(windo, text='Attendance filled Successfully', bg='Green', fg=\n 'white', width=33, height=2, font=('times', 15, 'bold'))\n", (17042, 17165), True, 'import tkinter as tk\n'), ((17357, 17520), 'tkinter.Button', 'tk.Button', (['windo'], {'text': '"""Check Sheets"""', 'command': 'Attf', 'fg': '"""black"""', 'bg': '"""lawn green"""', 'width': '(12)', 'height': '(1)', 'activebackground': '"""Red"""', 'font': "('times', 14, ' bold ')"}), "(windo, text='Check Sheets', command=Attf, fg='black', bg=\n 'lawn green', width=12, height=1, activebackground='Red', font=('times',\n 14, ' bold '))\n", (17366, 17520), True, 'import tkinter as tk\n'), ((17556, 17672), 'tkinter.Label', 'tk.Label', (['windo'], {'text': '"""Enter Subject"""', 'width': '(15)', 'height': '(2)', 'fg': '"""white"""', 'bg': '"""blue2"""', 'font': "('times', 15, ' bold ')"}), "(windo, text='Enter Subject', width=15, height=2, fg='white', bg=\n 'blue2', font=('times', 15, ' bold '))\n", (17564, 17672), True, 'import tkinter as tk\n'), ((17705, 17783), 'tkinter.Entry', 'tk.Entry', (['windo'], {'width': '(20)', 'bg': '"""yellow"""', 'fg': '"""red"""', 'font': "('times', 23, ' bold ')"}), "(windo, width=20, bg='yellow', fg='red', font=('times', 23, ' bold '))\n", (17713, 17783), True, 'import tkinter as tk\n'), ((17825, 18002), 'tkinter.Button', 'tk.Button', (['windo'], {'text': '"""Fill Attendance"""', 'fg': '"""white"""', 'command': 'Fillattendances', 'bg': '"""deep pink"""', 'width': '(20)', 'height': '(2)', 'activebackground': '"""Red"""', 'font': "('times', 15, ' bold ')"}), "(windo, text='Fill Attendance', fg='white', command=\n Fillattendances, bg='deep pink', width=20, height=2, activebackground=\n 'Red', font=('times', 15, ' bold '))\n", (17834, 18002), True, 'import tkinter as tk\n'), ((18132, 18144), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (18142, 18144), False, 'import tkinter\n'), ((18824, 18860), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (18858, 18860), False, 'import cv2\n'), ((18896, 18956), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_default.xml"""'], {}), "('haarcascade_frontalface_default.xml')\n", (18917, 18956), False, 'import cv2\n'), ((20992, 21040), 'pandas.read_csv', 'pd.read_csv', (['"""StudentDetails/StudentDetails.csv"""'], {}), "('StudentDetails/StudentDetails.csv')\n", (21003, 21040), True, 'import pandas as pd\n'), ((21190, 21214), 'os.listdir', 'os.listdir', (['"""Attendance"""'], {}), "('Attendance')\n", (21200, 21214), False, 'import os\n'), ((21779, 21789), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21787, 21789), True, 'import matplotlib.pyplot as plt\n'), ((21877, 21884), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (21882, 21884), True, 'import tkinter as tk\n'), ((22388, 22555), 'tkinter.Label', 'tk.Label', (['window'], {'text': '"""Face-Recognition-Based-Attendance-Management-System"""', 'bg': '"""dark blue"""', 'fg': '"""snow"""', 'width': '(50)', 'height': '(3)', 'font': "('times', 30, 'italic bold ')"}), "(window, text='Face-Recognition-Based-Attendance-Management-System',\n bg='dark blue', fg='snow', width=50, height=3, font=('times', 30,\n 'italic bold '))\n", (22396, 22555), True, 'import tkinter as tk\n'), ((22622, 22738), 'tkinter.Label', 'tk.Label', (['window'], {'text': '"""All things good"""', 'bg': '"""Green"""', 'fg': '"""white"""', 'width': '(15)', 'height': '(3)', 'font': "('times', 17, 'bold')"}), "(window, text='All things good', bg='Green', fg='white', width=15,\n height=3, font=('times', 17, 'bold'))\n", (22630, 22738), True, 'import tkinter as tk\n'), ((22772, 22890), 'tkinter.Label', 'tk.Label', (['window'], {'text': '"""Enter Enrollment"""', 'width': '(20)', 'height': '(2)', 'fg': '"""black"""', 'bg': '"""cyan"""', 'font': "('times', 15, ' bold ')"}), "(window, text='Enter Enrollment', width=20, height=2, fg='black',\n bg='cyan', font=('times', 15, ' bold '))\n", (22780, 22890), True, 'import tkinter as tk\n'), ((23081, 23179), 'tkinter.Entry', 'tk.Entry', (['window'], {'validate': '"""key"""', 'width': '(20)', 'bg': '"""snow"""', 'fg': '"""red"""', 'font': "('times', 25, ' bold ')"}), "(window, validate='key', width=20, bg='snow', fg='red', font=(\n 'times', 25, ' bold '))\n", (23089, 23179), True, 'import tkinter as tk\n'), ((23278, 23391), 'tkinter.Label', 'tk.Label', (['window'], {'text': '"""Enter Name"""', 'width': '(20)', 'fg': '"""black"""', 'bg': '"""cyan"""', 'height': '(2)', 'font': "('times', 15, ' bold ')"}), "(window, text='Enter Name', width=20, fg='black', bg='cyan', height\n =2, font=('times', 15, ' bold '))\n", (23286, 23391), True, 'import tkinter as tk\n'), ((23428, 23505), 'tkinter.Entry', 'tk.Entry', (['window'], {'width': '(20)', 'bg': '"""snow"""', 'fg': '"""red"""', 'font': "('times', 25, ' bold ')"}), "(window, width=20, bg='snow', fg='red', font=('times', 25, ' bold '))\n", (23436, 23505), True, 'import tkinter as tk\n'), ((23554, 23702), 'tkinter.Button', 'tk.Button', (['window'], {'text': '"""Clear"""', 'command': 'clear', 'fg': '"""black"""', 'bg': '"""cyan"""', 'width': '(10)', 'height': '(1)', 'activebackground': '"""Red"""', 'font': "('times', 15, ' bold ')"}), "(window, text='Clear', command=clear, fg='black', bg='cyan', width\n =10, height=1, activebackground='Red', font=('times', 15, ' bold '))\n", (23563, 23702), True, 'import tkinter as tk\n'), ((23757, 23905), 'tkinter.Button', 'tk.Button', (['window'], {'text': '"""Clear"""', 'command': 'clear1', 'fg': '"""black"""', 'bg': '"""cyan"""', 'width': '(10)', 'height': '(1)', 'activebackground': '"""Red"""', 'font': "('times', 15, ' bold ')"}), "(window, text='Clear', command=clear1, fg='black', bg='cyan',\n width=10, height=1, activebackground='Red', font=('times', 15, ' bold '))\n", (23766, 23905), True, 'import tkinter as tk\n'), ((23951, 24128), 'tkinter.Button', 'tk.Button', (['window'], {'text': '"""Check Register students"""', 'command': 'admin_panel', 'fg': '"""black"""', 'bg': '"""grey"""', 'width': '(19)', 'height': '(1)', 'activebackground': '"""Red"""', 'font': "('times', 15, ' bold ')"}), "(window, text='Check Register students', command=admin_panel, fg=\n 'black', bg='grey', width=19, height=1, activebackground='Red', font=(\n 'times', 15, ' bold '))\n", (23960, 24128), True, 'import tkinter as tk\n'), ((24163, 24340), 'tkinter.Button', 'tk.Button', (['window'], {'text': '"""Take Images"""', 'command': 'take_img', 'fg': '"""white"""', 'bg': '"""grey"""', 'borderwidth': '(5)', 'width': '(20)', 'height': '(3)', 'activebackground': '"""Red"""', 'font': "('times', 15, ' bold ')"}), "(window, text='Take Images', command=take_img, fg='white', bg=\n 'grey', borderwidth=5, width=20, height=3, activebackground='Red', font\n =('times', 15, ' bold '))\n", (24172, 24340), True, 'import tkinter as tk\n'), ((24380, 24558), 'tkinter.Button', 'tk.Button', (['window'], {'text': '"""Train Images"""', 'fg': '"""white"""', 'command': 'trainimg', 'bg': '"""grey"""', 'borderwidth': '(5)', 'width': '(20)', 'height': '(3)', 'activebackground': '"""Red"""', 'font': "('times', 15, ' bold ')"}), "(window, text='Train Images', fg='white', command=trainimg, bg=\n 'grey', borderwidth=5, width=20, height=3, activebackground='Red', font\n =('times', 15, ' bold '))\n", (24389, 24558), True, 'import tkinter as tk\n'), ((24593, 24783), 'tkinter.Button', 'tk.Button', (['window'], {'text': '"""Automatic Attendance"""', 'fg': '"""white"""', 'command': 'subjectchoose', 'bg': '"""grey"""', 'borderwidth': '(5)', 'width': '(20)', 'height': '(3)', 'activebackground': '"""Red"""', 'font': "('times', 15, ' bold ')"}), "(window, text='Automatic Attendance', fg='white', command=\n subjectchoose, bg='grey', borderwidth=5, width=20, height=3,\n activebackground='Red', font=('times', 15, ' bold '))\n", (24602, 24783), True, 'import tkinter as tk\n'), ((24822, 25015), 'tkinter.Button', 'tk.Button', (['window'], {'text': '"""Manually Fill Attendance"""', 'command': 'manually_fill', 'fg': '"""white"""', 'borderwidth': '(5)', 'bg': '"""grey"""', 'width': '(20)', 'height': '(3)', 'activebackground': '"""Red"""', 'font': "('times', 15, ' bold ')"}), "(window, text='Manually Fill Attendance', command=manually_fill,\n fg='white', borderwidth=5, bg='grey', width=20, height=3,\n activebackground='Red', font=('times', 15, ' bold '))\n", (24831, 25015), True, 'import tkinter as tk\n'), ((25060, 25232), 'tkinter.Button', 'tk.Button', (['window'], {'text': '"""Statistics"""', 'fg': '"""white"""', 'command': 'stats', 'bg': '"""grey"""', 'borderwidth': '(5)', 'width': '(20)', 'height': '(3)', 'activebackground': '"""Red"""', 'font': "('times', 15, ' bold ')"}), "(window, text='Statistics', fg='white', command=stats, bg='grey',\n borderwidth=5, width=20, height=3, activebackground='Red', font=(\n 'times', 15, ' bold '))\n", (25069, 25232), True, 'import tkinter as tk\n'), ((664, 671), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (669, 671), True, 'import tkinter as tk\n'), ((11980, 11991), 'time.time', 'time.time', ([], {}), '()\n', (11989, 11991), False, 'import time\n'), ((17240, 17315), 'subprocess.Popen', 'subprocess.Popen', (['"""explorer /select,".\\\\Attendance\\\\Manually Attendance\\\\\\""""'], {}), '(\'explorer /select,".\\\\Attendance\\\\Manually Attendance\\\\"\')\n', (17256, 17315), False, 'import subprocess\n'), ((18321, 18337), 'csv.reader', 'csv.reader', (['file'], {}), '(file)\n', (18331, 18337), False, 'import csv\n'), ((20046, 20067), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (20058, 20067), False, 'import os\n'), ((20494, 20521), 'numpy.array', 'np.array', (['pilImage', '"""uint8"""'], {}), "(pilImage, 'uint8')\n", (20502, 20521), True, 'import numpy as np\n'), ((22236, 22290), 'tkinter.messagebox.askokcancel', 'messagebox.askokcancel', (['"""Quit"""', '"""Do you want to quit?"""'], {}), "('Quit', 'Do you want to quit?')\n", (22258, 22290), False, 'from tkinter import messagebox\n'), ((1381, 1392), 'time.time', 'time.time', ([], {}), '()\n', (1390, 1392), False, 'import time\n'), ((1788, 1819), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'col_names'}), '(columns=col_names)\n', (1800, 1819), True, 'import pandas as pd\n'), ((1864, 1871), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (1869, 1871), True, 'import tkinter as tk\n'), ((2972, 3089), 'tkinter.Label', 'tk.Label', (['MFW'], {'text': '"""Enter Enrollment"""', 'width': '(15)', 'height': '(2)', 'fg': '"""white"""', 'bg': '"""blue2"""', 'font': "('times', 15, ' bold ')"}), "(MFW, text='Enter Enrollment', width=15, height=2, fg='white', bg=\n 'blue2', font=('times', 15, ' bold '))\n", (2980, 3089), True, 'import tkinter as tk\n'), ((3171, 3290), 'tkinter.Label', 'tk.Label', (['MFW'], {'text': '"""Enter Student name"""', 'width': '(15)', 'height': '(2)', 'fg': '"""white"""', 'bg': '"""blue2"""', 'font': "('times', 15, ' bold ')"}), "(MFW, text='Enter Student name', width=15, height=2, fg='white', bg\n ='blue2', font=('times', 15, ' bold '))\n", (3179, 3290), True, 'import tkinter as tk\n'), ((3412, 3509), 'tkinter.Entry', 'tk.Entry', (['MFW'], {'width': '(20)', 'validate': '"""key"""', 'bg': '"""yellow"""', 'fg': '"""red"""', 'font': "('times', 23, ' bold ')"}), "(MFW, width=20, validate='key', bg='yellow', fg='red', font=(\n 'times', 23, ' bold '))\n", (3420, 3509), True, 'import tkinter as tk\n'), ((3742, 3818), 'tkinter.Entry', 'tk.Entry', (['MFW'], {'width': '(20)', 'bg': '"""yellow"""', 'fg': '"""red"""', 'font': "('times', 23, ' bold ')"}), "(MFW, width=20, bg='yellow', fg='red', font=('times', 23, ' bold '))\n", (3750, 3818), True, 'import tkinter as tk\n'), ((6123, 6245), 'tkinter.Label', 'tk.Label', (['MFW'], {'text': '"""CSV created Successfully"""', 'bg': '"""Green"""', 'fg': '"""white"""', 'width': '(33)', 'height': '(2)', 'font': "('times', 19, 'bold')"}), "(MFW, text='CSV created Successfully', bg='Green', fg='white',\n width=33, height=2, font=('times', 19, 'bold'))\n", (6131, 6245), True, 'import tkinter as tk\n'), ((6303, 6457), 'tkinter.Button', 'tk.Button', (['MFW'], {'text': '"""Clear"""', 'command': 'remove_enr', 'fg': '"""black"""', 'bg': '"""deep pink"""', 'width': '(10)', 'height': '(1)', 'activebackground': '"""Red"""', 'font': "('times', 15, ' bold ')"}), "(MFW, text='Clear', command=remove_enr, fg='black', bg='deep pink',\n width=10, height=1, activebackground='Red', font=('times', 15, ' bold '))\n", (6312, 6457), True, 'import tkinter as tk\n'), ((6602, 6765), 'tkinter.Button', 'tk.Button', (['MFW'], {'text': '"""Clear"""', 'command': 'remove_student', 'fg': '"""black"""', 'bg': '"""deep pink"""', 'width': '(10)', 'height': '(1)', 'activebackground': '"""Red"""', 'font': "('times', 15, ' bold ')"}), "(MFW, text='Clear', command=remove_student, fg='black', bg=\n 'deep pink', width=10, height=1, activebackground='Red', font=('times',\n 15, ' bold '))\n", (6611, 6765), True, 'import tkinter as tk\n'), ((6904, 7072), 'tkinter.Button', 'tk.Button', (['MFW'], {'text': '"""Enter Data"""', 'command': 'enter_data_DF', 'fg': '"""black"""', 'bg': '"""lime green"""', 'width': '(20)', 'height': '(2)', 'activebackground': '"""Red"""', 'font': "('times', 15, ' bold ')"}), "(MFW, text='Enter Data', command=enter_data_DF, fg='black', bg=\n 'lime green', width=20, height=2, activebackground='Red', font=('times',\n 15, ' bold '))\n", (6913, 7072), True, 'import tkinter as tk\n'), ((7194, 7356), 'tkinter.Button', 'tk.Button', (['MFW'], {'text': '"""Convert to CSV"""', 'command': 'create_csv', 'fg': '"""black"""', 'bg': '"""red"""', 'width': '(20)', 'height': '(2)', 'activebackground': '"""Red"""', 'font': "('times', 15, ' bold ')"}), "(MFW, text='Convert to CSV', command=create_csv, fg='black', bg=\n 'red', width=20, height=2, activebackground='Red', font=('times', 15,\n ' bold '))\n", (7203, 7356), True, 'import tkinter as tk\n'), ((7652, 7813), 'tkinter.Button', 'tk.Button', (['MFW'], {'text': '"""Check Sheets"""', 'command': 'attf', 'fg': '"""black"""', 'bg': '"""lawn green"""', 'width': '(12)', 'height': '(1)', 'activebackground': '"""Red"""', 'font': "('times', 14, ' bold ')"}), "(MFW, text='Check Sheets', command=attf, fg='black', bg=\n 'lawn green', width=12, height=1, activebackground='Red', font=('times',\n 14, ' bold '))\n", (7661, 7813), True, 'import tkinter as tk\n'), ((12064, 12075), 'time.time', 'time.time', ([], {}), '()\n', (12073, 12075), False, 'import time\n'), ((19327, 19339), 'numpy.array', 'np.array', (['Id'], {}), '(Id)\n', (19335, 19339), True, 'import numpy as np\n'), ((20077, 20093), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (20087, 20093), False, 'import os\n'), ((21385, 21418), 'pandas.read_csv', 'pd.read_csv', (["('Attendance/' + file)"], {}), "('Attendance/' + file)\n", (21396, 21418), True, 'import pandas as pd\n'), ((2204, 2211), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (2209, 2211), True, 'import tkinter as tk\n'), ((5322, 5334), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (5332, 5334), False, 'import tkinter\n'), ((7529, 7602), 'subprocess.Popen', 'subprocess.Popen', (['"""explorer /open,".\\\\Attendance\\\\Manually Attendance\\\\\\""""'], {}), '(\'explorer /open,".\\\\Attendance\\\\Manually Attendance\\\\"\')\n', (7545, 7602), False, 'import subprocess\n'), ((9764, 9783), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (9780, 9783), False, 'import cv2\n'), ((9807, 9867), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_default.xml"""'], {}), "('haarcascade_frontalface_default.xml')\n", (9828, 9867), False, 'import cv2\n'), ((10930, 10953), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (10951, 10953), False, 'import cv2\n'), ((10971, 10982), 'time.time', 'time.time', ([], {}), '()\n', (10980, 10982), False, 'import time\n'), ((12189, 12225), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (12223, 12225), False, 'import cv2\n'), ((12721, 12758), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['harcascadePath'], {}), '(harcascadePath)\n', (12742, 12758), False, 'import cv2\n'), ((12780, 12829), 'pandas.read_csv', 'pd.read_csv', (['"""StudentDetails\\\\StudentDetails.csv"""'], {}), "('StudentDetails\\\\StudentDetails.csv')\n", (12791, 12829), True, 'import pandas as pd\n'), ((12851, 12870), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (12867, 12870), False, 'import cv2\n'), ((13015, 13046), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'col_names'}), '(columns=col_names)\n', (13027, 13046), True, 'import pandas as pd\n'), ((15079, 15090), 'time.time', 'time.time', ([], {}), '()\n', (15088, 15090), False, 'import time\n'), ((15860, 15883), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (15881, 15883), False, 'import cv2\n'), ((15966, 15978), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (15976, 15978), False, 'import tkinter\n'), ((18492, 18625), 'tkinter.Label', 'tkinter.Label', (['root'], {'width': '(8)', 'height': '(1)', 'fg': '"""black"""', 'font': "('times', 15, ' bold ')", 'bg': '"""lawn green"""', 'text': 'row', 'relief': 'tkinter.RIDGE'}), "(root, width=8, height=1, fg='black', font=('times', 15,\n ' bold '), bg='lawn green', text=row, relief=tkinter.RIDGE)\n", (18505, 18625), False, 'import tkinter\n'), ((20378, 20399), 'PIL.Image.open', 'Image.open', (['imagePath'], {}), '(imagePath)\n', (20388, 20399), False, 'from PIL import Image, ImageTk\n'), ((1412, 1447), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (1443, 1447), False, 'import datetime\n'), ((1493, 1528), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (1524, 1528), False, 'import datetime\n'), ((5523, 5539), 'csv.reader', 'csv.reader', (['file'], {}), '(file)\n', (5533, 5539), False, 'import csv\n'), ((10046, 10083), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (10058, 10083), False, 'import cv2\n'), ((11286, 11320), 'csv.writer', 'csv.writer', (['csvFile'], {'delimiter': '""","""'}), "(csvFile, delimiter=',')\n", (11296, 11320), False, 'import csv\n'), ((13143, 13179), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2GRAY'], {}), '(im, cv2.COLOR_BGR2GRAY)\n', (13155, 13179), False, 'import cv2\n'), ((14906, 14943), 'cv2.imshow', 'cv2.imshow', (['"""Filling attedance.."""', 'im'], {}), "('Filling attedance..', im)\n", (14916, 14943), False, 'import cv2\n'), ((16201, 16217), 'csv.reader', 'csv.reader', (['file'], {}), '(file)\n', (16211, 16217), False, 'import csv\n'), ((10211, 10269), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (10224, 10269), False, 'import cv2\n'), ((10619, 10643), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'img'], {}), "('Frame', img)\n", (10629, 10643), False, 'import cv2\n'), ((11002, 11037), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (11033, 11037), False, 'import datetime\n'), ((11078, 11113), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (11109, 11113), False, 'import datetime\n'), ((14743, 14754), 'time.time', 'time.time', ([], {}), '()\n', (14752, 14754), False, 'import time\n'), ((14970, 14985), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (14981, 14985), False, 'import cv2\n'), ((15114, 15149), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (15145, 15149), False, 'import datetime\n'), ((15199, 15234), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (15230, 15234), False, 'import datetime\n'), ((5766, 5900), 'tkinter.Label', 'tkinter.Label', (['root'], {'width': '(13)', 'height': '(1)', 'fg': '"""black"""', 'font': "('times', 13, ' bold ')", 'bg': '"""lawn green"""', 'text': 'row', 'relief': 'tkinter.RIDGE'}), "(root, width=13, height=1, fg='black', font=('times', 13,\n ' bold '), bg='lawn green', text=row, relief=tkinter.RIDGE)\n", (5779, 5900), False, 'import tkinter\n'), ((10706, 10720), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (10717, 10720), False, 'import cv2\n'), ((13736, 13747), 'time.time', 'time.time', ([], {}), '()\n', (13745, 13747), False, 'import time\n'), ((14276, 14333), 'cv2.rectangle', 'cv2.rectangle', (['im', '(x, y)', '(x + w, y + h)', '(0, 260, 0)', '(7)'], {}), '(im, (x, y), (x + w, y + h), (0, 260, 0), 7)\n', (14289, 14333), False, 'import cv2\n'), ((14570, 14628), 'cv2.rectangle', 'cv2.rectangle', (['im', '(x, y)', '(x + w, y + h)', '(0, 25, 255)', '(7)'], {}), '(im, (x, y), (x + w, y + h), (0, 25, 255), 7)\n', (14583, 14628), False, 'import cv2\n'), ((16444, 16577), 'tkinter.Label', 'tkinter.Label', (['root'], {'width': '(8)', 'height': '(1)', 'fg': '"""black"""', 'font': "('times', 15, ' bold ')", 'bg': '"""lawn green"""', 'text': 'row', 'relief': 'tkinter.RIDGE'}), "(root, width=8, height=1, fg='black', font=('times', 15,\n ' bold '), bg='lawn green', text=row, relief=tkinter.RIDGE)\n", (16457, 16577), False, 'import tkinter\n'), ((20580, 20604), 'os.path.split', 'os.path.split', (['imagePath'], {}), '(imagePath)\n', (20593, 20604), False, 'import os\n'), ((4381, 4416), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (4412, 4416), False, 'import datetime\n'), ((4470, 4505), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (4501, 4505), False, 'import datetime\n'), ((13783, 13818), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (13814, 13818), False, 'import datetime\n'), ((13880, 13915), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (13911, 13915), False, 'import datetime\n')] |
import fire
import enum
import numpy as np
from collections import defaultdict
from statistics import mean
from utils import read_json
class ArenaEvaluator:
def _idcg(self, l):
return sum((1.0 / np.log(i + 2) for i in range(l)))
def __init__(self):
self._idcgs = [self._idcg(i) for i in range(101)]
def _ndcg(self, gt, rec):
dcg = 0.0
for i, r in enumerate(rec):
if r in gt:
dcg += 1.0 / np.log(i + 2)
return dcg / self._idcgs[len(gt)]
def _eval(self, results, questions, answers):
if len(results) < len(answers):
print("[Warning] 제출한 정답이 부족합니다.")
questions_dict = {p['id']: p for p in questions}
answers_dict = {p['id']: p for p in answers}
total_music_ndcgs = list()
total_tag_ndcgs = list()
total_scores = list()
case_music_ndcgs = defaultdict(list)
case_tag_ndcgs = defaultdict(list)
case_scores = defaultdict(list)
for p in results:
pid = p['id']
songs = p['songs']
tags = p['tags']
if pid not in questions_dict:
raise Exception(f"questions에 없습니다: {pid}")
if pid not in answers_dict:
raise Exception(f"answers 없습니다: {pid}")
question = questions_dict[pid]
answer = answers_dict[pid]
question_type = get_question_type(question)
# Validate playlist
if len(songs) != 100:
raise Exception(f"추천 곡 결과의 개수가 맞지 않습니다: {pid}")
if len(tags) != 10:
raise Exception(f"추천 태그 결과의 개수가 맞지 않습니다: {pid}")
if len(set(songs)) != 100:
raise Exception(f"한 플레이리스트에 중복된 곡 추천은 허용되지 않습니다: {pid}")
if len(set(tags)) != 10:
raise Exception(f"한 플레이리스트에 중복된 태그 추천은 허용되지 않습니다: {pid}")
cur_music_ndcg = self._ndcg(answer['songs'], songs)
cur_tag_ndcg = self._ndcg(answer['tags'], tags)
cur_score = cur_music_ndcg * 0.85 + cur_tag_ndcg * 0.15
# Update total score
total_music_ndcgs.append(cur_music_ndcg)
total_tag_ndcgs.append(cur_tag_ndcg)
total_scores.append(cur_score)
# Update case score
case_music_ndcgs[question_type].append(cur_music_ndcg)
case_tag_ndcgs[question_type].append(cur_tag_ndcg)
case_scores[question_type].append(cur_score)
return (
total_music_ndcgs, total_tag_ndcgs, total_scores,
case_music_ndcgs, case_tag_ndcgs, case_scores,
)
def evaluate(self, gt_fname, rec_fname, question_fname):
try:
questions = read_json(question_fname)
answers = read_json(gt_fname)
results = read_json(rec_fname)
(total_music_ndcgs, total_tag_ndcgs, total_scores,
case_music_ndcgs, case_tag_ndcgs, case_scores) = self._eval(results, questions, answers)
print_scores(
total_music_ndcgs, total_tag_ndcgs, total_scores,
case_music_ndcgs, case_tag_ndcgs, case_scores,
)
except Exception as e:
print(e)
class QuestionType(enum.Enum):
ALL = enum.auto()
SONG_TAG = enum.auto()
SONG_TITLE = enum.auto()
TAG_TITLE = enum.auto()
SONG_ONLY = enum.auto()
TAG_ONLY = enum.auto()
TITLE_ONLY = enum.auto()
NOTHING = enum.auto()
QUESTION_TYPE_MAP = {
# (songs, tags, title): question_type
(True, True, True): QuestionType.ALL,
(True, True, False): QuestionType.SONG_TAG,
(True, False, True): QuestionType.SONG_TITLE,
(False, True, True): QuestionType.TAG_TITLE,
(True, False, False): QuestionType.SONG_ONLY,
(False, True, False): QuestionType.TAG_ONLY,
(False, False, True): QuestionType.TITLE_ONLY,
(False, False, False): QuestionType.NOTHING,
}
def get_question_type(question):
songs = question['songs']
tags = question['tags']
title = question['plylst_title']
has_songs = len(songs) > 0
has_tags = len(tags) > 0
has_title = title != ""
return QUESTION_TYPE_MAP[has_songs, has_tags, has_title]
def print_score(music_ndcgs, tag_ndcgs, scores):
music_ndcg = mean(music_ndcgs)
tag_ndcg = mean(tag_ndcgs)
score = mean(scores)
print(f"Music nDCG: {music_ndcg:.6}")
print(f"Tag nDCG: {tag_ndcg:.6}")
print(f"Score: {score:.6}")
def print_scores(
total_music_ndcgs, total_tag_ndcgs, total_scores,
case_music_ndcgs, case_tag_ndcgs, case_scores,
):
print("=== Total score ===")
print_score(total_music_ndcgs, total_tag_ndcgs, total_scores)
for question_type in QuestionType:
if question_type not in case_music_ndcgs:
continue
print(f"=== {question_type.name} score ===")
print_score(case_music_ndcgs[question_type], case_tag_ndcgs[question_type], case_scores[question_type])
def get_scores(
total_music_ndcgs, total_tag_ndcgs, total_scores,
case_music_ndcgs, case_tag_ndcgs, case_scores,
):
scores = {}
scores['total'] = {
'music_ndcg': mean(total_music_ndcgs),
'tag_ndcg': mean(total_tag_ndcgs),
'score': mean(total_scores),
}
for question_type in QuestionType:
if question_type not in case_music_ndcgs:
continue
scores[question_type.name] = {
'music_ndcg': mean(case_music_ndcgs[question_type]),
'tag_ndcg': mean(case_tag_ndcgs[question_type]),
'score': mean(case_scores[question_type]),
}
return scores
if __name__ == "__main__":
fire.Fire(ArenaEvaluator)
| [
"fire.Fire",
"numpy.log",
"collections.defaultdict",
"statistics.mean",
"enum.auto",
"utils.read_json"
] | [((3284, 3295), 'enum.auto', 'enum.auto', ([], {}), '()\n', (3293, 3295), False, 'import enum\n'), ((3311, 3322), 'enum.auto', 'enum.auto', ([], {}), '()\n', (3320, 3322), False, 'import enum\n'), ((3340, 3351), 'enum.auto', 'enum.auto', ([], {}), '()\n', (3349, 3351), False, 'import enum\n'), ((3368, 3379), 'enum.auto', 'enum.auto', ([], {}), '()\n', (3377, 3379), False, 'import enum\n'), ((3396, 3407), 'enum.auto', 'enum.auto', ([], {}), '()\n', (3405, 3407), False, 'import enum\n'), ((3423, 3434), 'enum.auto', 'enum.auto', ([], {}), '()\n', (3432, 3434), False, 'import enum\n'), ((3452, 3463), 'enum.auto', 'enum.auto', ([], {}), '()\n', (3461, 3463), False, 'import enum\n'), ((3478, 3489), 'enum.auto', 'enum.auto', ([], {}), '()\n', (3487, 3489), False, 'import enum\n'), ((4295, 4312), 'statistics.mean', 'mean', (['music_ndcgs'], {}), '(music_ndcgs)\n', (4299, 4312), False, 'from statistics import mean\n'), ((4328, 4343), 'statistics.mean', 'mean', (['tag_ndcgs'], {}), '(tag_ndcgs)\n', (4332, 4343), False, 'from statistics import mean\n'), ((4356, 4368), 'statistics.mean', 'mean', (['scores'], {}), '(scores)\n', (4360, 4368), False, 'from statistics import mean\n'), ((5704, 5729), 'fire.Fire', 'fire.Fire', (['ArenaEvaluator'], {}), '(ArenaEvaluator)\n', (5713, 5729), False, 'import fire\n'), ((896, 913), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (907, 913), False, 'from collections import defaultdict\n'), ((939, 956), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (950, 956), False, 'from collections import defaultdict\n'), ((979, 996), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (990, 996), False, 'from collections import defaultdict\n'), ((5199, 5222), 'statistics.mean', 'mean', (['total_music_ndcgs'], {}), '(total_music_ndcgs)\n', (5203, 5222), False, 'from statistics import mean\n'), ((5244, 5265), 'statistics.mean', 'mean', (['total_tag_ndcgs'], {}), '(total_tag_ndcgs)\n', (5248, 5265), False, 'from statistics import mean\n'), ((5284, 5302), 'statistics.mean', 'mean', (['total_scores'], {}), '(total_scores)\n', (5288, 5302), False, 'from statistics import mean\n'), ((2738, 2763), 'utils.read_json', 'read_json', (['question_fname'], {}), '(question_fname)\n', (2747, 2763), False, 'from utils import read_json\n'), ((2786, 2805), 'utils.read_json', 'read_json', (['gt_fname'], {}), '(gt_fname)\n', (2795, 2805), False, 'from utils import read_json\n'), ((2828, 2848), 'utils.read_json', 'read_json', (['rec_fname'], {}), '(rec_fname)\n', (2837, 2848), False, 'from utils import read_json\n'), ((5487, 5524), 'statistics.mean', 'mean', (['case_music_ndcgs[question_type]'], {}), '(case_music_ndcgs[question_type])\n', (5491, 5524), False, 'from statistics import mean\n'), ((5550, 5585), 'statistics.mean', 'mean', (['case_tag_ndcgs[question_type]'], {}), '(case_tag_ndcgs[question_type])\n', (5554, 5585), False, 'from statistics import mean\n'), ((5608, 5640), 'statistics.mean', 'mean', (['case_scores[question_type]'], {}), '(case_scores[question_type])\n', (5612, 5640), False, 'from statistics import mean\n'), ((209, 222), 'numpy.log', 'np.log', (['(i + 2)'], {}), '(i + 2)\n', (215, 222), True, 'import numpy as np\n'), ((464, 477), 'numpy.log', 'np.log', (['(i + 2)'], {}), '(i + 2)\n', (470, 477), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from malcolm.modules.pmac import VelocityProfile
class TestPmacStatusPart(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@staticmethod
def do_test_distance_range(v1, v2):
ds = np.arange(-100, 100, 0.5)
for d in ds:
t = 8
profile = VelocityProfile(v1, v2, d, t, 2.0, 10000)
profile.get_profile()
d_res = profile.calculate_distance()
assert np.isclose(
d_res, d
), "Wrong d({}). Expected d {}, vm {}, v1 {}, v2 {}, t {}".format(
d_res, d, profile.vm, v1, v2, t
)
@staticmethod
def do_test_time_range(v1, v2, quantize=False):
if quantize:
ts = np.arange(0.0105, 20, 1.0007)
else:
ts = np.arange(0.01, 20, 0.1)
for t in ts:
d = 100
profile = VelocityProfile(v1, v2, d, t, 2.0, 10, interval=0.002)
profile.get_profile()
if quantize:
profile.quantize()
d_res = profile.calculate_distance()
assert np.isclose(
d_res, 100
), "Wrong d({}). Expected d {}, vm {}, v1 {}, v2 {}, t {}".format(
d_res, d, profile.vm, profile.v1, profile.v2, profile.tv2
)
if quantize and profile.t_total >= 0.002:
assert profile.t1 > 0.002
assert profile.t2 > 0.002
assert profile.tm > 0.002 or profile.tm == 0
@staticmethod
def do_test_acceleration_range(v1, v2, quantize=False):
a = 1000000
while a > 0.001:
d = 0.001
profile = VelocityProfile(v1, v2, d, 0.1, a, 100, 10, interval=0.009)
profile.get_profile()
if quantize:
profile.quantize()
d_res = profile.calculate_distance()
assert np.isclose(
d_res, d
), "Wrong d({}). Expected d {}, vm {}, v1 {}, v2 {}, t {}".format(
d_res, d, profile.vm, profile.v1, profile.v2, profile.tv2
)
if quantize and profile.t_total >= 0.009:
assert profile.t1 > 0.009
assert profile.t2 > 0.009
assert profile.tm > 0.009 or profile.tm == 0
a /= 10
@staticmethod
def do_test_v_max_range(v1, v2):
ms = np.arange(4, 100, 3)
for v_max in ms:
d, t = 100, 8
profile = VelocityProfile(v1, v2, d, t, 2.0, v_max)
profile.get_profile()
d_res = profile.calculate_distance()
assert np.isclose(
d_res, 100
), "Wrong d({}). Expected d {}, vm {}, v1 {}, v2 {}, t {}".format(
d_res, d, profile.vm, profile.v1, profile.v2, profile.tv2
)
def test_zero_zero(self):
self.do_test_acceleration_range(0.0, 0.0, quantize=True)
self.do_test_distance_range(0.0, 0.0)
self.do_test_distance_range(0.0, 0.0)
self.do_test_time_range(0.0, 0.0)
self.do_test_time_range(0.0, 0.0, quantize=True)
self.do_test_v_max_range(0.0, 0.0)
def test_pos_pos(self):
self.do_test_distance_range(4.0, 2.0)
self.do_test_distance_range(400.0, 200.0)
self.do_test_time_range(4.0, 2.0)
self.do_test_time_range(4.0, 2.0, quantize=True)
self.do_test_v_max_range(4.0, 2.0)
def test_neg_pos(self):
self.do_test_distance_range(-2.0, 2.0)
self.do_test_distance_range(-200.0, 200.0)
self.do_test_time_range(-2.0, 2.0)
self.do_test_time_range(-2.0, 2.0, quantize=True)
self.do_test_v_max_range(-2.0, 2.0)
def test_pos_neg(self):
self.do_test_distance_range(4.0, -4.0)
self.do_test_distance_range(400.0, -400.0)
self.do_test_time_range(4.0, -4.0)
self.do_test_time_range(4.0, -4.0, quantize=True)
self.do_test_v_max_range(4.0, -4.0)
def test_neg_neg(self):
self.do_test_distance_range(-4.0, -4.0)
self.do_test_distance_range(-400.0, -400.0)
self.do_test_time_range(-4.0, -4.0)
self.do_test_time_range(-4.0, -4.0, quantize=True)
self.do_test_v_max_range(-4.0, -4.0)
| [
"numpy.isclose",
"malcolm.modules.pmac.VelocityProfile",
"numpy.arange"
] | [((277, 302), 'numpy.arange', 'np.arange', (['(-100)', '(100)', '(0.5)'], {}), '(-100, 100, 0.5)\n', (286, 302), True, 'import numpy as np\n'), ((2449, 2469), 'numpy.arange', 'np.arange', (['(4)', '(100)', '(3)'], {}), '(4, 100, 3)\n', (2458, 2469), True, 'import numpy as np\n'), ((364, 405), 'malcolm.modules.pmac.VelocityProfile', 'VelocityProfile', (['v1', 'v2', 'd', 't', '(2.0)', '(10000)'], {}), '(v1, v2, d, t, 2.0, 10000)\n', (379, 405), False, 'from malcolm.modules.pmac import VelocityProfile\n'), ((508, 528), 'numpy.isclose', 'np.isclose', (['d_res', 'd'], {}), '(d_res, d)\n', (518, 528), True, 'import numpy as np\n'), ((795, 824), 'numpy.arange', 'np.arange', (['(0.0105)', '(20)', '(1.0007)'], {}), '(0.0105, 20, 1.0007)\n', (804, 824), True, 'import numpy as np\n'), ((856, 880), 'numpy.arange', 'np.arange', (['(0.01)', '(20)', '(0.1)'], {}), '(0.01, 20, 0.1)\n', (865, 880), True, 'import numpy as np\n'), ((944, 998), 'malcolm.modules.pmac.VelocityProfile', 'VelocityProfile', (['v1', 'v2', 'd', 't', '(2.0)', '(10)'], {'interval': '(0.002)'}), '(v1, v2, d, t, 2.0, 10, interval=0.002)\n', (959, 998), False, 'from malcolm.modules.pmac import VelocityProfile\n'), ((1161, 1183), 'numpy.isclose', 'np.isclose', (['d_res', '(100)'], {}), '(d_res, 100)\n', (1171, 1183), True, 'import numpy as np\n'), ((1734, 1793), 'malcolm.modules.pmac.VelocityProfile', 'VelocityProfile', (['v1', 'v2', 'd', '(0.1)', 'a', '(100)', '(10)'], {'interval': '(0.009)'}), '(v1, v2, d, 0.1, a, 100, 10, interval=0.009)\n', (1749, 1793), False, 'from malcolm.modules.pmac import VelocityProfile\n'), ((1956, 1976), 'numpy.isclose', 'np.isclose', (['d_res', 'd'], {}), '(d_res, d)\n', (1966, 1976), True, 'import numpy as np\n'), ((2543, 2584), 'malcolm.modules.pmac.VelocityProfile', 'VelocityProfile', (['v1', 'v2', 'd', 't', '(2.0)', 'v_max'], {}), '(v1, v2, d, t, 2.0, v_max)\n', (2558, 2584), False, 'from malcolm.modules.pmac import VelocityProfile\n'), ((2687, 2709), 'numpy.isclose', 'np.isclose', (['d_res', '(100)'], {}), '(d_res, 100)\n', (2697, 2709), True, 'import numpy as np\n')] |
import numpy as np
class Var:
"""Var class is the base class for a variable in this automatic
differentiation library. Called with a val of type int or float,
and optional kwargs, namely derivative. Defining derivative
overwrites the default seed derivative value of 1 when calling a
new Var type. In order to get the reverse derivative, simply use
revder.
:return: Var object with val and der attributes
:rtype: AD_Object.Var
:example forward mode:
>>> from src.autodiff.AD_Object import Var
>>> x = Var(1, derivative=2)
>>> print(x)
Var(val=1, der=2)
>>> x**2 + 2*x + 1
Var(val=4, der=6)
:example reverse mode:
>>> x = Var(0.8)
>>> y = Var(2.0)
>>> a = x * y
>>> a.rder = 1.0
>>> print("∂a/∂x = {}".format(x.revder()))
∂a/∂x = 2.0
"""
def __init__(self, val, **kwargs):
self.val = np.array(val)
self.children = []
self.rder = None
if "derivative" in kwargs and (
isinstance(kwargs["derivative"], int)
or isinstance(kwargs["derivative"], float)
):
self.der = kwargs["derivative"]
else:
self.der = np.ones(np.shape(self.val))
self.args = kwargs
def revder(self):
if self.rder is None:
self.rder = sum(weight * var.revder() for weight, var in self.children)
return self.rder
def __repr__(self):
return f"Var(val={self.val}, der={self.der})"
def __add__(self, other):
try:
z = Var(self.val + other.val, derivative=self.der + other.der)
self.children.append((1.0, z))
other.children.append((1.0, z))
except AttributeError:
if isinstance(other, int) or isinstance(other, float):
z = Var(self.val + other, derivative=self.der)
else:
raise ValueError(
"Please use a Var type or num type for operations on Var"
)
return z
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
try:
z = Var(self.val - other.val, derivative=self.der - other.der)
self.children.append((1.0, z))
other.children.append((-1.0, z))
except AttributeError:
if isinstance(other, int) or isinstance(other, float):
z = Var(self.val - other, derivative=self.der)
else:
raise ValueError(
"Please use a Var type or num type for operations on Var"
)
return z
def __rsub__(self, other):
if not (isinstance(other, int) or isinstance(other, float)):
raise ValueError("Please use a Var type or num type for operations on Var")
return Var(other, derivative=0).__sub__(self)
def __mul__(self, other):
try:
z = Var(
self.val * other.val,
derivative=(self.der * other.val + self.val * other.der),
)
self.children.append((other.val, z))
other.children.append((self.val, z))
except AttributeError:
if isinstance(other, int) or isinstance(other, float):
z = Var(self.val * other, derivative=self.der * other)
else:
raise ValueError(
"Please use a Var type or num type for operations on Var"
)
return z
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other): # no div in Python, truediv
try:
if other.val == 0:
raise ValueError("cannot divide by 0")
z = Var(
(self.val / other.val),
derivative=(
(self.der * other.val - self.val * other.der) / other.val ** 2
),
)
self.children.append((1 / other.val, z))
other.children.append((-1 * self.val / (other.val ** 2), z))
except AttributeError:
if isinstance(other, int) or isinstance(other, float):
try:
z = Var((self.val / other), derivative=(self.der / other))
except ZeroDivisionError:
raise ValueError("Cannot divide by 0")
else:
raise ValueError(
"Please use a Var type or num type for operations on Var"
)
return z
def __rtruediv__(self, other):
if not (isinstance(other, int) or isinstance(other, float)):
raise ValueError("Please use a Var type or num type for operations on Var")
return Var(other, derivative=0).__truediv__(self)
def __neg__(self):
return self.__mul__(-1)
def __pow__(self, other):
# If other is an int or float, make it a Var with derivative of 0
# Retry afterwards
if isinstance(other, int) or isinstance(other, float):
return self.__pow__(Var(other, derivative=0))
try:
# applying exp rule
# i.e. a^b = e^(b*log(a)) => a^b*((a'*b)/a + b'*log(a))
if self.val == 0 and other.val <= 0:
raise ValueError(f"Cannot get derivative of 0 raised to {other.val}")
new_val = self.val ** other.val
if self.val == 0:
new_der = other.val * (self.val ** (other.val - 1)) * self.der + (
self.val ** other.val
)
else:
if other.val <= 0:
new_der = 0
else:
new_der = (
other.val * (self.val ** (other.val - 1)) * self.der
+ (self.val ** other.val) * np.log(np.abs(self.val)) * other.der
)
except AttributeError:
raise ValueError("Please use a numtype or Var type for the power")
return Var(new_val, derivative=new_der)
def __rpow__(self, other):
# Cover case in which other is invalid type
if not (isinstance(other, int) or isinstance(other, float)):
raise ValueError("Please use a Var type or num type for operations on Var")
return Var(other, derivative=0).__pow__(self)
def __eq__(self, other):
if isinstance(other, int) or isinstance(other, float):
return self.der == 0 and self.val == other
elif isinstance(other, Var):
return self.der == other.der and self.val == other.val
else:
raise ValueError("Please use a Var type or num type for operations on Var")
def __ne__(self, other):
return not self.__eq__(other)
| [
"numpy.shape",
"numpy.abs",
"numpy.array"
] | [((894, 907), 'numpy.array', 'np.array', (['val'], {}), '(val)\n', (902, 907), True, 'import numpy as np\n'), ((1205, 1223), 'numpy.shape', 'np.shape', (['self.val'], {}), '(self.val)\n', (1213, 1223), True, 'import numpy as np\n'), ((5823, 5839), 'numpy.abs', 'np.abs', (['self.val'], {}), '(self.val)\n', (5829, 5839), True, 'import numpy as np\n')] |
"""
@authors: <NAME>, <NAME>, <NAME>
@contact: <EMAIL>
REFERENCES:
[0] <NAME>, <NAME>, <NAME>,
"Mitigation of readout noise in near-term quantum devices
by classical post-processing based on detector tomography",
Quantum 4, 257 (2020)
[0.5] <NAME>, <NAME>, <NAME>, <NAME>,
"Modeling and mitigation of cross-talk effects in readout noise
with applications to the Quantum Approximate Optimization Algorithm",
Quantum 5, 464 (2021).
"""
from functions import ancillary_functions as anf
import numpy as np
from typing import Optional, Dict, List, Union
class GlobalNoiseMatrixCreator:
"""
This is class that, given noise matrices on clusters of qubits as function of input state of their
neighbors, constructs global noise model on all qubits.
"""
def __init__(self,
noise_matrices_dictionary: Dict[
str, Union[np.ndarray, Dict[str, Dict[str, np.ndarray]]]],
clusters_list: List[List[int]],
neighborhoods: Dict[str, List[int]]
) -> None:
self._noise_matrices_dictionary = noise_matrices_dictionary
self._clusters_list = clusters_list
for cluster_key, neighbors_list in neighborhoods.items():
if neighbors_list is not None:
if len(neighbors_list) == 0:
neighborhoods[cluster_key] = None
self._neighborhoods = neighborhoods
clusters_labels_list, neighbors_labels_list = [], []
for cluster_index in range(len(self._clusters_list)):
key_cluster = self.get_qubits_key(self._clusters_list[cluster_index])
clusters_labels_list.append(key_cluster)
neighbors_labels_list.append(self.get_qubits_key(self._neighborhoods[key_cluster]))
self._clusters_labels_list = clusters_labels_list
self._neighbors_labels_list = neighbors_labels_list
self._matrix_elements_dictionary = {}
self._number_of_qubits = sum([len(indices) for indices in clusters_list])
self._global_noise_matrix = np.zeros((self._number_of_qubits, self._number_of_qubits),
dtype=float)
@staticmethod
def get_qubits_key(list_of_qubits):
if list_of_qubits is None:
return None
return 'q' + 'q'.join([str(s) for s in list_of_qubits])
def update_labels_lists(self):
clusters_labels_list, neighbors_labels_list = [], []
for cluster_index in range(len(self._clusters_list)):
key_cluster = self.get_qubits_key(self._clusters_list[cluster_index])
clusters_labels_list.append(key_cluster)
neighbors_labels_list.append(self.get_qubits_key(self._neighborhoods[key_cluster]))
self._clusters_labels_list = clusters_labels_list
self._neighbors_labels_list = neighbors_labels_list
def compute_matrix_element(self,
input_state: str,
output_state: str):
"""
Function that computes single global noise matrix element.
:param input_state: bitstring denoting INPUT classical state
:param output_state: bitstring denoting OUTPUT classical state
"""
matrix_element = 1
for cluster_index in range(len(self._clusters_list)):
cluster_label_now = self._clusters_labels_list[cluster_index]
neighbors_now = self._neighborhoods[cluster_label_now]
neighbors_label_now = self._neighbors_labels_list[cluster_index]
qubits_now = self._clusters_list[cluster_index]
if neighbors_now is None:
neighbors_input_state_now = 'averaged'
else:
neighbors_input_state_now = ''.join([input_state[s] for s in neighbors_now])
cluster_input_state = ''.join([input_state[s] for s in qubits_now])
cluster_output_state = ''.join([output_state[s] for s in qubits_now])
if neighbors_label_now in self._noise_matrices_dictionary[cluster_label_now].keys():
cluster_matrix = \
self._noise_matrices_dictionary[cluster_label_now][neighbors_label_now][
neighbors_input_state_now]
else:
if neighbors_now is None:
try:
cluster_matrix = self._noise_matrices_dictionary[cluster_label_now][
'averaged']
except(KeyError):
cluster_matrix = self._noise_matrices_dictionary[cluster_label_now][
'']
else:
cluster_matrix = self._noise_matrices_dictionary[cluster_label_now][
neighbors_input_state_now]
matrix_element *= cluster_matrix[int(cluster_output_state, 2),
int(cluster_input_state, 2)]
return matrix_element
def compute_global_noise_matrix(self):
"""
This method_name is faster than other one
"""
number_of_qubits = self._number_of_qubits
dimension = int(2 ** number_of_qubits)
classical_register = anf.register_names_qubits(range(number_of_qubits))
self._global_noise_matrix = np.zeros((dimension, dimension))
for input_state_bitstring in classical_register:
for output_state_bitstring in classical_register:
self._global_noise_matrix[int(output_state_bitstring, 2),
int(input_state_bitstring, 2)] = \
self.compute_matrix_element(input_state_bitstring, output_state_bitstring)
return self._global_noise_matrix
def compute_global_noise_matrix_old(self,
clusters_list: Optional[List[List[int]]] = None,
neighbors_of_clusters: Optional[List[List[int]]] = None):
updated_lists = False
if clusters_list is None:
if self._clusters_list is None:
raise ValueError('Please provide clusters list.')
else:
self.update_labels_lists()
updated_lists = True
clusters_list = self._clusters_list
if neighbors_of_clusters is None:
if self._neighborhoods is None:
raise ValueError('Please provide neighbors list')
else:
if not updated_lists:
self.update_labels_lists()
neighbors_of_clusters = [self._neighborhoods[self.get_qubits_key(clust)] for clust in
clusters_list]
lambdas = [self._noise_matrices_dictionary[self.get_qubits_key(clust)] for clust in
clusters_list]
number_of_qubits = sum([len(inds) for inds in clusters_list])
d = int(2 ** number_of_qubits)
big_lambda = np.zeros((d, d))
for input_state_integer in range(d):
input_state_bitstring = "{0:b}".format(input_state_integer).zfill(number_of_qubits)
for output_state_integer in range(d):
output_state_bitstring = "{0:b}".format(output_state_integer).zfill(number_of_qubits)
element = 1
for cluster_index in range(len(lambdas)):
qubits_now = clusters_list[cluster_index]
neighbours_now = neighbors_of_clusters[cluster_index]
if neighbours_now is not None:
input_state_neighbors = ''.join(
[input_state_bitstring[a] for a in neighbours_now])
neighbors_string = self.get_qubits_key(neighbours_now)
if neighbors_string in lambdas[cluster_index].keys():
lambda_of_interest = lambdas[cluster_index][neighbors_string][
input_state_neighbors]
else:
lambda_of_interest = lambdas[cluster_index][
input_state_neighbors]
else:
try:
lambda_of_interest = lambdas[cluster_index]['averaged']
except(KeyError):
try:
lambda_of_interest = lambdas[cluster_index]['']
except(KeyError):
raise KeyError('Something wrong with averaged lambda')
small_string_ideal = ''.join(
[list(input_state_bitstring)[b] for b in qubits_now])
small_string_measured = ''.join(
[list(output_state_bitstring)[b] for b in qubits_now])
element *= lambda_of_interest[
int(small_string_measured, 2), int(small_string_ideal, 2)]
big_lambda[output_state_integer, input_state_integer] = element
return big_lambda
| [
"numpy.zeros"
] | [((2055, 2126), 'numpy.zeros', 'np.zeros', (['(self._number_of_qubits, self._number_of_qubits)'], {'dtype': 'float'}), '((self._number_of_qubits, self._number_of_qubits), dtype=float)\n', (2063, 2126), True, 'import numpy as np\n'), ((5306, 5338), 'numpy.zeros', 'np.zeros', (['(dimension, dimension)'], {}), '((dimension, dimension))\n', (5314, 5338), True, 'import numpy as np\n'), ((6976, 6992), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (6984, 6992), True, 'import numpy as np\n')] |
#!env bin
import gevent.monkey
gevent.monkey.patch_socket()
import argparse
import time
import sched
import gevent
import gevent.lock
from gevent import subprocess
from io import BytesIO
import numpy as np
import requests
import json
import sys
# Mine
import os
import common
#logger = common.getLogger(f"{os.path.basename(__file__).replace('.py', '')}")
import logging
URL = "http://frontend:5000"
#URL = "http://localhost:32788"
quantiles = [0.5, 0.9, 0.95]
response_times = []
responses = []
responses_by_model = {}
output_semaphore = gevent.lock.Semaphore(value=1)
output_fid = sys.stdout
def runInference(id_num, model_name, data):
ts = time.time()
response = requests.post(f"{URL}/infer/{model_name}", params={"id" : f"{id_num}"}) #, data={"data": data})
te = time.time()
try:
response_json = json.loads(response.text)
responses.append( response_json )
sys.stdout.write('.')
try:
responses_by_model[model_name].append(response_json)
except KeyError:
responses_by_model[model_name] = [response_json]
recordResponse(response_json)
except json.decoder.JSONDecodeError:
logging.error(f"request for {model_name} failed")
logging.error(response.text)
sys.stdout.write('X')
exit(8)
sys.stdout.flush()
response_times.append( (te-ts) )
def recordResponse(response):
output_semaphore.acquire()
output_fid.write(f"{json.dumps(response)}\n")
output_fid.flush()
output_semaphore.release()
def getParser(add_help=True, include_parents=True):
parser = argparse.ArgumentParser(add_help=add_help,
parents=([common.getParser(add_help=False)] if include_parents else [])
)
parser.add_argument('--restrict_requests', action="store_true")
parser.add_argument('--identifier', default="test")
return parser
def main():
args = getParser().parse_args()
data = common.getData()
workload = []
with open(args.workload_file, 'r') as workload_fid:
workload_events = [tuple(s.strip().split(' ')) for s in workload_fid.readlines()]
workload_events = list(map( (lambda e: (e[0], float(e[1]), e[2])), workload_events ))
global output_fid
output_fid = open(os.path.join("/etc/results/", f"{args.identifier}.log"), 'w')
if args.restrict_requests:
for id_num, event_time, model_name in workload_events:
runInference(id_num, model_name, data)
else:
threads = []
for id_num, event_time, model_name in workload_events:
threads.append(gevent.spawn_later(event_time, runInference, id_num, model_name, data))
gevent.joinall(threads)
sys.stdout.write('\n')
sys.stdout.flush()
#print(responses)
#print(np.quantile(np.array(response_times), [0.5, 0.9, 0.95]))
print("Overall latency")
print(np.quantile(np.array([r["overall_latency"] for r in responses]), quantiles))
for model_name, model_responses in responses_by_model.items():
print(f"{model_name} : {np.quantile(np.array([r['overall_latency'] for r in model_responses]), quantiles)}")
for q in quantiles:
print(f"{q} : {np.mean(np.array([np.quantile(np.array([r['overall_latency'] for r in model_responses]), [q]) for model_responses in responses_by_model.values()]))}")
print("")
print("Queue delay")
print(np.quantile(np.array([r["queue_delay"] for r in responses]), quantiles))
for model_name, model_responses in responses_by_model.items():
print(f"{model_name} : {np.quantile(np.array([r['queue_delay'] for r in model_responses]), quantiles)}")
for q in quantiles:
print(f"{q} : {np.mean(np.array([np.quantile(np.array([r['queue_delay'] for r in model_responses]), [q]) for model_responses in responses_by_model.values()]))}")
print(f"Average: {np.average(np.array([r['queue_delay'] for r in responses]))}")
output_fid.close()
return
if __name__ == '__main__':
main() | [
"gevent.lock.Semaphore",
"sys.stdout.write",
"logging.error",
"json.loads",
"json.dumps",
"time.time",
"common.getData",
"sys.stdout.flush",
"numpy.array",
"gevent.monkey.patch_socket",
"gevent.spawn_later",
"common.getParser",
"requests.post",
"os.path.join",
"gevent.joinall"
] | [((33, 61), 'gevent.monkey.patch_socket', 'gevent.monkey.patch_socket', ([], {}), '()\n', (59, 61), False, 'import gevent\n'), ((551, 581), 'gevent.lock.Semaphore', 'gevent.lock.Semaphore', ([], {'value': '(1)'}), '(value=1)\n', (572, 581), False, 'import gevent\n'), ((663, 674), 'time.time', 'time.time', ([], {}), '()\n', (672, 674), False, 'import time\n'), ((688, 758), 'requests.post', 'requests.post', (['f"""{URL}/infer/{model_name}"""'], {'params': "{'id': f'{id_num}'}"}), "(f'{URL}/infer/{model_name}', params={'id': f'{id_num}'})\n", (701, 758), False, 'import requests\n'), ((791, 802), 'time.time', 'time.time', ([], {}), '()\n', (800, 802), False, 'import time\n'), ((1264, 1282), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1280, 1282), False, 'import sys\n'), ((1867, 1883), 'common.getData', 'common.getData', ([], {}), '()\n', (1881, 1883), False, 'import common\n'), ((2584, 2606), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (2600, 2606), False, 'import sys\n'), ((2609, 2627), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2625, 2627), False, 'import sys\n'), ((830, 855), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (840, 855), False, 'import json\n'), ((898, 919), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (914, 919), False, 'import sys\n'), ((2176, 2231), 'os.path.join', 'os.path.join', (['"""/etc/results/"""', 'f"""{args.identifier}.log"""'], {}), "('/etc/results/', f'{args.identifier}.log')\n", (2188, 2231), False, 'import os\n'), ((2558, 2581), 'gevent.joinall', 'gevent.joinall', (['threads'], {}), '(threads)\n', (2572, 2581), False, 'import gevent\n'), ((1141, 1190), 'logging.error', 'logging.error', (['f"""request for {model_name} failed"""'], {}), "(f'request for {model_name} failed')\n", (1154, 1190), False, 'import logging\n'), ((1195, 1223), 'logging.error', 'logging.error', (['response.text'], {}), '(response.text)\n', (1208, 1223), False, 'import logging\n'), ((1228, 1249), 'sys.stdout.write', 'sys.stdout.write', (['"""X"""'], {}), "('X')\n", (1244, 1249), False, 'import sys\n'), ((2767, 2818), 'numpy.array', 'np.array', (["[r['overall_latency'] for r in responses]"], {}), "([r['overall_latency'] for r in responses])\n", (2775, 2818), True, 'import numpy as np\n'), ((3263, 3310), 'numpy.array', 'np.array', (["[r['queue_delay'] for r in responses]"], {}), "([r['queue_delay'] for r in responses])\n", (3271, 3310), True, 'import numpy as np\n'), ((1400, 1420), 'json.dumps', 'json.dumps', (['response'], {}), '(response)\n', (1410, 1420), False, 'import json\n'), ((2482, 2552), 'gevent.spawn_later', 'gevent.spawn_later', (['event_time', 'runInference', 'id_num', 'model_name', 'data'], {}), '(event_time, runInference, id_num, model_name, data)\n', (2500, 2552), False, 'import gevent\n'), ((1597, 1629), 'common.getParser', 'common.getParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (1613, 1629), False, 'import common\n'), ((3726, 3773), 'numpy.array', 'np.array', (["[r['queue_delay'] for r in responses]"], {}), "([r['queue_delay'] for r in responses])\n", (3734, 3773), True, 'import numpy as np\n'), ((2937, 2994), 'numpy.array', 'np.array', (["[r['overall_latency'] for r in model_responses]"], {}), "([r['overall_latency'] for r in model_responses])\n", (2945, 2994), True, 'import numpy as np\n'), ((3429, 3482), 'numpy.array', 'np.array', (["[r['queue_delay'] for r in model_responses]"], {}), "([r['queue_delay'] for r in model_responses])\n", (3437, 3482), True, 'import numpy as np\n'), ((3084, 3141), 'numpy.array', 'np.array', (["[r['overall_latency'] for r in model_responses]"], {}), "([r['overall_latency'] for r in model_responses])\n", (3092, 3141), True, 'import numpy as np\n'), ((3572, 3625), 'numpy.array', 'np.array', (["[r['queue_delay'] for r in model_responses]"], {}), "([r['queue_delay'] for r in model_responses])\n", (3580, 3625), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
.. codeauthor:: <NAME> <<EMAIL>>
.. codeauthor:: <NAME> <<EMAIL>>
"""
import argparse
import os
import numpy as np
import torch
from src.args import ArgumentParserRGBDSegmentation
from src.build_model import build_model
from src.prepare_data import prepare_data
if __name__ == '__main__':
# arguments
parser = ArgumentParserRGBDSegmentation(
description='Efficient RGBD Indoor Sematic Segmentation (ONNX Export)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.set_common_args()
parser.add_argument('--onnx_opset_version', type=int, default=11,
help='Different versions lead to different results but'
'not all versions are supported for a following'
'TensorRT conversion.')
parser.add_argument('--model_output_name', type=str, default='model',
help='Name for the onnx model that will be saved.')
args = parser.parse_args()
args.pretrained_on_imagenet = False
dataset, _ = prepare_data(args, with_input_orig=True)
model, device = build_model(args, dataset.n_classes_without_void)
os.makedirs('./onnx_models', exist_ok=True)
# load weights
if args.last_ckpt:
checkpoint = torch.load(args.last_ckpt,
map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint['state_dict'], strict=True)
model.eval()
model.to(device)
rgb = np.random.random(size=(1, 3, args.height, args.width))
rgb = rgb.astype(np.float32)
depth = np.random.random(size=(1, 1, args.height, args.width))
depth = depth.astype(np.float32)
onnx_file_path = os.path.join('onnx_models',
f'{args.model_output_name}.onnx')
rgb_torch = torch.from_numpy(rgb)
depth_torch = torch.from_numpy(depth)
rgb_torch = rgb_torch.to(device)
depth_torch = depth_torch.to(device)
if args.modality == 'rgbd':
# rgbd
inp = (rgb_torch, depth_torch)
input_names = ['rgb', 'depth']
elif args.modality == 'rgb':
# rgb
inp = rgb_torch
input_names = ['rgb']
else:
# depth
inp = depth_torch
input_names = ['depth']
torch.onnx.export(model,
inp,
onnx_file_path,
export_params=True,
input_names=input_names,
output_names=['output'],
do_constant_folding=True,
verbose=False,
opset_version=args.onnx_opset_version)
| [
"os.makedirs",
"src.args.ArgumentParserRGBDSegmentation",
"src.build_model.build_model",
"torch.onnx.export",
"torch.load",
"numpy.random.random",
"os.path.join",
"src.prepare_data.prepare_data",
"torch.from_numpy"
] | [((349, 516), 'src.args.ArgumentParserRGBDSegmentation', 'ArgumentParserRGBDSegmentation', ([], {'description': '"""Efficient RGBD Indoor Sematic Segmentation (ONNX Export)"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Efficient RGBD Indoor Sematic Segmentation (ONNX Export)',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (379, 516), False, 'from src.args import ArgumentParserRGBDSegmentation\n'), ((1073, 1113), 'src.prepare_data.prepare_data', 'prepare_data', (['args'], {'with_input_orig': '(True)'}), '(args, with_input_orig=True)\n', (1085, 1113), False, 'from src.prepare_data import prepare_data\n'), ((1134, 1183), 'src.build_model.build_model', 'build_model', (['args', 'dataset.n_classes_without_void'], {}), '(args, dataset.n_classes_without_void)\n', (1145, 1183), False, 'from src.build_model import build_model\n'), ((1189, 1232), 'os.makedirs', 'os.makedirs', (['"""./onnx_models"""'], {'exist_ok': '(True)'}), "('./onnx_models', exist_ok=True)\n", (1200, 1232), False, 'import os\n'), ((1518, 1572), 'numpy.random.random', 'np.random.random', ([], {'size': '(1, 3, args.height, args.width)'}), '(size=(1, 3, args.height, args.width))\n', (1534, 1572), True, 'import numpy as np\n'), ((1618, 1672), 'numpy.random.random', 'np.random.random', ([], {'size': '(1, 1, args.height, args.width)'}), '(size=(1, 1, args.height, args.width))\n', (1634, 1672), True, 'import numpy as np\n'), ((1732, 1793), 'os.path.join', 'os.path.join', (['"""onnx_models"""', 'f"""{args.model_output_name}.onnx"""'], {}), "('onnx_models', f'{args.model_output_name}.onnx')\n", (1744, 1793), False, 'import os\n'), ((1844, 1865), 'torch.from_numpy', 'torch.from_numpy', (['rgb'], {}), '(rgb)\n', (1860, 1865), False, 'import torch\n'), ((1884, 1907), 'torch.from_numpy', 'torch.from_numpy', (['depth'], {}), '(depth)\n', (1900, 1907), False, 'import torch\n'), ((2303, 2507), 'torch.onnx.export', 'torch.onnx.export', (['model', 'inp', 'onnx_file_path'], {'export_params': '(True)', 'input_names': 'input_names', 'output_names': "['output']", 'do_constant_folding': '(True)', 'verbose': '(False)', 'opset_version': 'args.onnx_opset_version'}), "(model, inp, onnx_file_path, export_params=True,\n input_names=input_names, output_names=['output'], do_constant_folding=\n True, verbose=False, opset_version=args.onnx_opset_version)\n", (2320, 2507), False, 'import torch\n'), ((1297, 1366), 'torch.load', 'torch.load', (['args.last_ckpt'], {'map_location': '(lambda storage, loc: storage)'}), '(args.last_ckpt, map_location=lambda storage, loc: storage)\n', (1307, 1366), False, 'import torch\n')] |
"""test_checks.py
This file is part of the test suite for keras2c
Implements tests for the checks run on the model before conversion
"""
#!/usr/bin/env python3
import unittest
import tensorflow.keras as keras
from keras2c import keras2c_main
import subprocess
import time
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, <NAME>"
__license__ = "MIT"
__maintainer__ = "<NAME>, https://github.com/f0uriest/keras2c"
__email__ = "<EMAIL>"
class TestChecks(unittest.TestCase):
"""tests for model validity checking"""
def test_is_model(self):
model = np.arange(10)
name = 'foo'
self.assertRaises(ValueError, keras2c_main.k2c, model, name)
def test_is_valid_cname(self):
inshp = (10, 8)
name = 'foobar'
a = keras.layers.Input(inshp, name='f/oo')
b = keras.layers.Dense(10)(a)
model = keras.models.Model(inputs=a, outputs=b)
self.assertRaises(AssertionError, keras2c_main.k2c, model, name)
name = '2foobar'
a = keras.layers.Input(inshp)
b = keras.layers.Dense(10)(a)
model = keras.models.Model(inputs=a, outputs=b)
self.assertRaises(AssertionError, keras2c_main.k2c, model, name)
def test_supported_layers(self):
inshp = (10, 8)
name = 'foobar'
a = keras.layers.Input(inshp)
b = keras.layers.Lambda(lambda x: x ** 2)(a)
model = keras.models.Model(inputs=a, outputs=b)
self.assertRaises(AssertionError, keras2c_main.k2c, model, name)
def test_activation_supported(self):
inshp = (10, 8)
name = 'foobar'
a = keras.layers.Input(inshp)
b = keras.layers.LSTM(10, activation='elu',
recurrent_activation='selu')(a)
model = keras.models.Model(inputs=a, outputs=b)
self.assertRaises(AssertionError, keras2c_main.k2c, model, name)
class TestConfigSupported(unittest.TestCase):
def test_rnn_config_supported(self):
inshp = (20, 10, 8)
name = 'foobar'
a = keras.layers.Input(batch_shape=inshp)
b = keras.layers.LSTM(10, return_state=True,
stateful=True)(a)
model = keras.models.Model(inputs=a, outputs=b)
self.assertRaises(AssertionError, keras2c_main.k2c, model, name)
def test_shared_axes(self):
inshp = (10, 8, 12)
name = 'foobar'
a = keras.layers.Input(inshp)
b = keras.layers.PReLU(shared_axes=[1, 2])(a)
model = keras.models.Model(inputs=a, outputs=b)
self.assertRaises(AssertionError, keras2c_main.k2c, model, name)
def test_data_format(self):
inshp = (8, 12)
name = 'foobar'
a = keras.layers.Input(inshp)
b = keras.layers.Conv1D(filters=10, kernel_size=2,
data_format='channels_first')(a)
model = keras.models.Model(inputs=a, outputs=b)
self.assertRaises(AssertionError, keras2c_main.k2c, model, name)
def test_broadcast_merge(self):
inshp1 = (12,)
inshp2 = (10, 12)
name = 'foobar'
a = keras.layers.Input(inshp1)
b = keras.layers.Input(inshp2)
c = keras.layers.Add()([a, b])
model = keras.models.Model(inputs=[a, b], outputs=c)
self.assertRaises(AssertionError, keras2c_main.k2c, model, name)
# keras itself doesnt support multiple axes for batchnorm, but tensorflow.keras does
# def test_batch_norm_axis(self):
# inshp = (8, 12, 16)
# name = 'foobar'
# axis = (2, 3)
# a = keras.layers.Input(inshp)
# b = keras.layers.BatchNormalization(axis=axis)(a)
# model = keras.models.Model(inputs=a, outputs=b)
# self.assertRaises(AssertionError, keras2c_main.k2c, model, name)
| [
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.layers.PReLU",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.keras.models.Model",
"numpy.arange",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Add",
"tens... | [((317, 355), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (353, 355), True, 'import tensorflow as tf\n'), ((654, 667), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (663, 667), True, 'import numpy as np\n'), ((854, 892), 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['inshp'], {'name': '"""f/oo"""'}), "(inshp, name='f/oo')\n", (872, 892), True, 'import tensorflow.keras as keras\n'), ((947, 986), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'a', 'outputs': 'b'}), '(inputs=a, outputs=b)\n', (965, 986), True, 'import tensorflow.keras as keras\n'), ((1098, 1123), 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['inshp'], {}), '(inshp)\n', (1116, 1123), True, 'import tensorflow.keras as keras\n'), ((1178, 1217), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'a', 'outputs': 'b'}), '(inputs=a, outputs=b)\n', (1196, 1217), True, 'import tensorflow.keras as keras\n'), ((1389, 1414), 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['inshp'], {}), '(inshp)\n', (1407, 1414), True, 'import tensorflow.keras as keras\n'), ((1484, 1523), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'a', 'outputs': 'b'}), '(inputs=a, outputs=b)\n', (1502, 1523), True, 'import tensorflow.keras as keras\n'), ((1699, 1724), 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['inshp'], {}), '(inshp)\n', (1717, 1724), True, 'import tensorflow.keras as keras\n'), ((1855, 1894), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'a', 'outputs': 'b'}), '(inputs=a, outputs=b)\n', (1873, 1894), True, 'import tensorflow.keras as keras\n'), ((2122, 2159), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'batch_shape': 'inshp'}), '(batch_shape=inshp)\n', (2140, 2159), True, 'import tensorflow.keras as keras\n'), ((2277, 2316), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'a', 'outputs': 'b'}), '(inputs=a, outputs=b)\n', (2295, 2316), True, 'import tensorflow.keras as keras\n'), ((2487, 2512), 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['inshp'], {}), '(inshp)\n', (2505, 2512), True, 'import tensorflow.keras as keras\n'), ((2583, 2622), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'a', 'outputs': 'b'}), '(inputs=a, outputs=b)\n', (2601, 2622), True, 'import tensorflow.keras as keras\n'), ((2789, 2814), 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['inshp'], {}), '(inshp)\n', (2807, 2814), True, 'import tensorflow.keras as keras\n'), ((2955, 2994), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'a', 'outputs': 'b'}), '(inputs=a, outputs=b)\n', (2973, 2994), True, 'import tensorflow.keras as keras\n'), ((3190, 3216), 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['inshp1'], {}), '(inshp1)\n', (3208, 3216), True, 'import tensorflow.keras as keras\n'), ((3229, 3255), 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['inshp2'], {}), '(inshp2)\n', (3247, 3255), True, 'import tensorflow.keras as keras\n'), ((3311, 3355), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': '[a, b]', 'outputs': 'c'}), '(inputs=[a, b], outputs=c)\n', (3329, 3355), True, 'import tensorflow.keras as keras\n'), ((905, 927), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {}), '(10)\n', (923, 927), True, 'import tensorflow.keras as keras\n'), ((1136, 1158), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {}), '(10)\n', (1154, 1158), True, 'import tensorflow.keras as keras\n'), ((1427, 1464), 'tensorflow.keras.layers.Lambda', 'keras.layers.Lambda', (['(lambda x: x ** 2)'], {}), '(lambda x: x ** 2)\n', (1446, 1464), True, 'import tensorflow.keras as keras\n'), ((1737, 1805), 'tensorflow.keras.layers.LSTM', 'keras.layers.LSTM', (['(10)'], {'activation': '"""elu"""', 'recurrent_activation': '"""selu"""'}), "(10, activation='elu', recurrent_activation='selu')\n", (1754, 1805), True, 'import tensorflow.keras as keras\n'), ((2172, 2227), 'tensorflow.keras.layers.LSTM', 'keras.layers.LSTM', (['(10)'], {'return_state': '(True)', 'stateful': '(True)'}), '(10, return_state=True, stateful=True)\n', (2189, 2227), True, 'import tensorflow.keras as keras\n'), ((2525, 2563), 'tensorflow.keras.layers.PReLU', 'keras.layers.PReLU', ([], {'shared_axes': '[1, 2]'}), '(shared_axes=[1, 2])\n', (2543, 2563), True, 'import tensorflow.keras as keras\n'), ((2827, 2903), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(10)', 'kernel_size': '(2)', 'data_format': '"""channels_first"""'}), "(filters=10, kernel_size=2, data_format='channels_first')\n", (2846, 2903), True, 'import tensorflow.keras as keras\n'), ((3268, 3286), 'tensorflow.keras.layers.Add', 'keras.layers.Add', ([], {}), '()\n', (3284, 3286), True, 'import tensorflow.keras as keras\n')] |
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
def plotFunction(x, y, z, title):
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm,linewidth=0, antialiased=False)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.suptitle(title)
fig.colorbar(surf, shrink=0.5, aspect=5)
def plotSave(x, y, z, path, title=''):
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm,linewidth=0, antialiased=False)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.suptitle(title)
fig.colorbar(surf, shrink=0.5, aspect=5)
path = path+'/'+title+'.pdf'
plt.savefig(path)
def FrankeFunctionWithNoise(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
noise = np.random.normal(0, 0.1, len(x)*len(x))
noise = noise.reshape(len(x),len(x))
return term1 + term2 + term3 + term4 + noise
def create_X(x, y, n ):
if len(x.shape) > 1:
x = np.ravel(x)
y = np.ravel(y)
N = len(x)
l = int((n+1)*(n+2)/2) # Number of elements in beta
X = np.ones((N,l))
for i in range(1,n+1):
q = int((i)*(i+1)/2)
for k in range(i+1):
X[:,q+k] = (x**(i-k))*(y**k)
return X
def MSE(y_pred, y_model):
return np.mean((y_pred - y_model)**2)
poly = 7
N = 50 #number of data
x = np.linspace(0, 1, N)
y = np.linspace(0, 1, N)
x_mesh, y_mesh = np.meshgrid(x,y)
z = FrankeFunctionWithNoise(x_mesh, y_mesh)
x_y = np.empty((len(x)*len(x), 2))
x_y[:, 0] = x_mesh.ravel()
x_y[:, 1] = y_mesh.ravel()
scaler = StandardScaler()
scaler.fit(x_y)
x_y = scaler.transform(x_y)
x_y_train, x_y_test, z_train, z_test = train_test_split(x_y, z.ravel(), test_size=0.2)
X_train = create_X(x_y_train[:, 0], x_y_train[:, 1], poly )
X_test = create_X(x_y_test[:, 0], x_y_test[:, 1], poly )
X = create_X(x_y[:, 0], x_y[:, 1], poly)
| [
"numpy.meshgrid",
"sklearn.preprocessing.StandardScaler",
"numpy.ravel",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.ticker.LinearLocator",
"matplotlib.ticker.FormatStrFormatter",
"numpy.linspace",
"numpy.exp",
"matplotlib.pyplot.savefig"
] | [((1869, 1889), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (1880, 1889), True, 'import numpy as np\n'), ((1894, 1914), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (1905, 1914), True, 'import numpy as np\n'), ((1932, 1949), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1943, 1949), True, 'import numpy as np\n'), ((2094, 2110), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2108, 2110), False, 'from sklearn.preprocessing import StandardScaler\n'), ((288, 300), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (298, 300), True, 'import matplotlib.pyplot as plt\n'), ((651, 663), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (661, 663), True, 'import matplotlib.pyplot as plt\n'), ((1001, 1018), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (1012, 1018), True, 'import matplotlib.pyplot as plt\n'), ((1606, 1621), 'numpy.ones', 'np.ones', (['(N, l)'], {}), '((N, l))\n', (1613, 1621), True, 'import numpy as np\n'), ((1799, 1831), 'numpy.mean', 'np.mean', (['((y_pred - y_model) ** 2)'], {}), '((y_pred - y_model) ** 2)\n', (1806, 1831), True, 'import numpy as np\n'), ((451, 468), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(10)'], {}), '(10)\n', (464, 468), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((503, 530), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.02f"""'], {}), "('%.02f')\n", (521, 530), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((814, 831), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(10)'], {}), '(10)\n', (827, 831), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((866, 893), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.02f"""'], {}), "('%.02f')\n", (884, 893), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((1071, 1131), 'numpy.exp', 'np.exp', (['(-(0.25 * (9 * x - 2) ** 2) - 0.25 * (9 * y - 2) ** 2)'], {}), '(-(0.25 * (9 * x - 2) ** 2) - 0.25 * (9 * y - 2) ** 2)\n', (1077, 1131), True, 'import numpy as np\n'), ((1135, 1187), 'numpy.exp', 'np.exp', (['(-(9 * x + 1) ** 2 / 49.0 - 0.1 * (9 * y + 1))'], {}), '(-(9 * x + 1) ** 2 / 49.0 - 0.1 * (9 * y + 1))\n', (1141, 1187), True, 'import numpy as np\n'), ((1192, 1249), 'numpy.exp', 'np.exp', (['(-(9 * x - 7) ** 2 / 4.0 - 0.25 * (9 * y - 3) ** 2)'], {}), '(-(9 * x - 7) ** 2 / 4.0 - 0.25 * (9 * y - 3) ** 2)\n', (1198, 1249), True, 'import numpy as np\n'), ((1253, 1297), 'numpy.exp', 'np.exp', (['(-(9 * x - 4) ** 2 - (9 * y - 7) ** 2)'], {}), '(-(9 * x - 4) ** 2 - (9 * y - 7) ** 2)\n', (1259, 1297), True, 'import numpy as np\n'), ((1490, 1501), 'numpy.ravel', 'np.ravel', (['x'], {}), '(x)\n', (1498, 1501), True, 'import numpy as np\n'), ((1514, 1525), 'numpy.ravel', 'np.ravel', (['y'], {}), '(y)\n', (1522, 1525), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from threading import Lock
import cv_bridge
import glob
import message_filters
import numpy as np
import os.path as osp
import rospy
import skimage.draw
import skimage.morphology
import tf
import yaml
from geometry_msgs.msg import Pose
from geometry_msgs.msg import PoseArray
from jsk_recognition_msgs.msg import BoundingBox
from jsk_recognition_msgs.msg import BoundingBoxArray
from jsk_topic_tools import ConnectionBasedTransport
from sensor_msgs.msg import Image
from std_srvs.srv import SetBool
from std_srvs.srv import SetBoolResponse
import grasp_fusion_lib
from grasp_fusion_lib.contrib.grasp_fusion.utils import get_primitives_poses
class PrimitiveMatching(ConnectionBasedTransport):
def __init__(self):
super(PrimitiveMatching, self).__init__()
self.br = cv_bridge.CvBridge()
self.instance_bg_label = rospy.get_param('~instance_bg_label')
self.heightmap_frame = rospy.get_param('~heightmap_frame')
# Size[m] of each height map pixel
self.voxel_size = rospy.get_param('~voxel_size')
self.cluster_tolerance = rospy.get_param('~cluster_tolerance', 0.02)
self.cluster_max_size = rospy.get_param('~cluster_max_size')
self.cluster_min_size = rospy.get_param('~cluster_min_size')
self.prob_threshold = rospy.get_param('~prob_threshold', 0.5)
self.reliable_pts_ratio = rospy.get_param('~reliable_pts_ratio', 0.25)
# Directory of grasp primitives
self.primitive_dir = rospy.get_param('~primitive_dir')
self.primitives = []
if not osp.isdir(self.primitive_dir):
err = 'Input primitive_dir is not directory: %s' \
% self.primitive_dir
rospy.logfatal(err)
rospy.signal_shutdown(err)
return
filenames = sorted(glob.glob(self.primitive_dir + "/*"))
for fname in filenames:
with open(fname) as f:
self.primitives.append(yaml.load(f))
# ROS publishers
self.pubs_poses = []
self.pubs_boxes = []
for prim in self.primitives:
self.pubs_poses.append(
self.advertise('~output/' + prim['label'] + '/poses',
PoseArray, queue_size=1))
self.pubs_boxes.append(
self.advertise('~output/' + prim['label'] + '/boxes',
BoundingBoxArray, queue_size=1))
self.pub_debug = self.advertise('~output/debug', Image, queue_size=1)
self.lock = Lock()
self.ignore_ins = False
self.srv_ignore_ins = rospy.Service(
'~ignore_instance', SetBool, self.ignore_ins_cb)
def subscribe(self):
self.sub_rgb = message_filters.Subscriber(
'~input/rgb', Image, queue_size=1, buff_size=2**24
)
self.sub_depth = message_filters.Subscriber(
'~input/depth', Image, queue_size=1, buff_size=2**24
)
self.sub_lbl_ins = message_filters.Subscriber(
'~input/label_instance', Image, queue_size=1, buff_size=2**24
)
self.sub_prob_pinch_aff = message_filters.Subscriber(
'~input/prob_pinch_affordance', Image,
queue_size=1, buff_size=2**24
)
self.sub_prob_pinch_sole_aff = message_filters.Subscriber(
'~input/prob_pinch_sole_affordance', Image,
queue_size=1, buff_size=2**24
)
self.sub_prob_suc_aff = message_filters.Subscriber(
'~input/prob_suction_affordance', Image,
queue_size=1, buff_size=2**24
)
sync = message_filters.TimeSynchronizer([
self.sub_rgb,
self.sub_depth,
self.sub_lbl_ins,
self.sub_prob_pinch_aff,
self.sub_prob_pinch_sole_aff,
self.sub_prob_suc_aff
], queue_size=100)
sync.registerCallback(self._cb)
def unsubscribe(self):
self.sub_depth.unregister()
self.sub_lbl_ins.unregister()
self.sub_prob_pinch_aff.unregister()
self.sub_prob_pinch_sole_aff.unregister()
self.sub_prob_suc_aff.unregister()
def _cb(
self,
imgmsg,
depthmsg,
lbl_ins_msg,
prob_pinch_aff_msg,
prob_pinch_sole_aff_msg,
prob_suc_aff_msg,
):
img = self.br.imgmsg_to_cv2(imgmsg, desired_encoding='rgb8')
depth = self.br.imgmsg_to_cv2(depthmsg, desired_encoding='32FC1')
lbl_ins = self.br.imgmsg_to_cv2(
lbl_ins_msg, desired_encoding='passthrough'
)
prob_pinch_aff = self.br.imgmsg_to_cv2(
prob_pinch_aff_msg, desired_encoding='passthrough'
)
prob_pinch_sole_aff = self.br.imgmsg_to_cv2(
prob_pinch_sole_aff_msg, desired_encoding='passthrough'
)
prob_suc_aff = self.br.imgmsg_to_cv2(
prob_suc_aff_msg, desired_encoding='passthrough'
)
with self.lock:
if self.ignore_ins:
lbl_ins = np.ones((lbl_ins.shape[0], lbl_ins.shape[1]),
dtype=lbl_ins.dtype)
prim_posess = get_primitives_poses(
self.primitives,
depth,
[prob_pinch_aff, prob_pinch_sole_aff, prob_suc_aff],
['pinch', 'pinch_sole', 'suction'],
self.cluster_tolerance,
self.cluster_max_size,
self.cluster_min_size,
voxel_size=self.voxel_size,
instance_label=lbl_ins,
instance_bg_label=self.instance_bg_label,
prob_threshold=self.prob_threshold,
reliable_pts_ratio=self.reliable_pts_ratio,
)
# Correction values for padding in get_heightmap
corr_x_m = 10 * self.voxel_size
corr_y_m = 12 * self.voxel_size
for i, poses in enumerate(prim_posess):
poses_msg = PoseArray()
poses_msg.header.stamp = depthmsg.header.stamp
poses_msg.header.frame_id = self.heightmap_frame
bboxes_msg = BoundingBoxArray()
bboxes_msg.header.stamp = depthmsg.header.stamp
bboxes_msg.header.frame_id = self.heightmap_frame
for pose in poses:
# Pose
pos_xy_pix = np.round(pose[1]).astype(int)
pos_xy_m = pose[1] * self.voxel_size
rad = np.radians(pose[2])
quat = tf.transformations.quaternion_about_axis(
rad, (0, 0, 1)
)
pos_z_m = depth[pos_xy_pix[1], pos_xy_pix[0]]
pose_msg = Pose()
pose_msg.position.x = pos_xy_m[0] - corr_x_m
pose_msg.position.y = pos_xy_m[1] - corr_y_m
pose_msg.position.z = pos_z_m
pose_msg.orientation.x = quat[0]
pose_msg.orientation.y = quat[1]
pose_msg.orientation.z = quat[2]
pose_msg.orientation.w = quat[3]
poses_msg.poses.append(pose_msg)
# Bounding box of instance
ins_mask = (lbl_ins == pose[0]) * (depth > 0)
# Denoise mask
skimage.morphology.remove_small_objects(
ins_mask, min_size=50, connectivity=1, in_place=True)
# array([[y, x], [y, x], ...])
ins_pts = np.array(np.where(ins_mask)).T
# array([[x, y], [x, y], ...])
pts_xy = ins_pts[:, [1, 0]]
rot = np.array([[np.cos(rad), -np.sin(rad)],
[np.sin(rad), np.cos(rad)]])
pts_aligned = np.dot(pts_xy, rot)
pts_center = np.mean(pts_xy, axis=0) * self.voxel_size
ins_depth = depth[ins_mask]
pts_center_z \
= (np.max(ins_depth) + np.min(ins_depth)) / 2
bbox_msg = BoundingBox()
bbox_msg.header.stamp = depthmsg.header.stamp
bbox_msg.header.frame_id = self.heightmap_frame
xs = pts_aligned[:, 0]
bbox_msg.dimensions.x \
= (np.max(xs) - np.min(xs)) * self.voxel_size
ys = pts_aligned[:, 1]
bbox_msg.dimensions.y \
= (np.max(ys) - np.min(ys)) * self.voxel_size
bbox_msg.dimensions.z \
= np.max(depth[ins_mask]) - np.min(depth[ins_mask])
bbox_msg.pose.position.x = pts_center[0] - corr_x_m
bbox_msg.pose.position.y = pts_center[1] - corr_y_m
bbox_msg.pose.position.z = pts_center_z
bbox_msg.pose.orientation = pose_msg.orientation
bboxes_msg.boxes.append(bbox_msg)
self.pubs_poses[i].publish(poses_msg)
self.pubs_boxes[i].publish(bboxes_msg)
# Publish image for debug
vizs = []
vizs.append(img)
vizs.append(grasp_fusion_lib.image.colorize_depth(
depth, min_value=0, max_value=0.3))
vizs.append(
grasp_fusion_lib.image.label2rgb(lbl_ins + 1, img, alpha=0.7)
)
viz = grasp_fusion_lib.image.colorize_heatmap(prob_suc_aff)
viz = grasp_fusion_lib.image.overlay_color_on_mono(
img_color=viz, img_mono=img, alpha=0.7
)
vizs.append(viz)
for c in range(prob_pinch_aff.shape[2]):
prob_c = prob_pinch_aff[:, :, c]
viz = grasp_fusion_lib.image.colorize_heatmap(prob_c)
viz = grasp_fusion_lib.image.overlay_color_on_mono(
img_color=viz, img_mono=img, alpha=0.7
)
vizs.append(viz)
for c in range(prob_pinch_sole_aff.shape[2]):
prob_c = prob_pinch_sole_aff[:, :, c]
viz = grasp_fusion_lib.image.colorize_heatmap(prob_c)
viz = grasp_fusion_lib.image.overlay_color_on_mono(
img_color=viz, img_mono=img, alpha=0.7
)
vizs.append(viz)
# vizs.extend([np.zeros_like(img)] * 2)
for poses in prim_posess:
vizs.append(self._primitive_poses2rgb(poses, img))
viz = grasp_fusion_lib.image.tile(
vizs, (-(-len(vizs) // 4), 4), boundary=True
)
debug_msg = self.br.cv2_to_imgmsg(viz, encoding='rgb8')
debug_msg.header.stamp = depthmsg.header.stamp
debug_msg.header.frame_id = self.heightmap_frame
self.pub_debug.publish(debug_msg)
def _primitive_poses2rgb(self, poses, img):
lbl = np.zeros(img.shape[:2], dtype=int)
for pose in poses:
rr, cc = skimage.draw.circle(
int(round(pose[1][1])), int(round(pose[1][0])), 5)
# Bug of skimage?
rr = np.where(rr < 0, 0, rr)
rr = np.where(rr >= lbl.shape[0], lbl.shape[0] - 1, rr)
cc = np.where(cc < 0, 0, cc)
cc = np.where(cc >= lbl.shape[1], lbl.shape[1] - 1, cc)
lbl[rr, cc] = pose[0] + 1
return grasp_fusion_lib.image.label2rgb(lbl, img, alpha=0.7)
def ignore_ins_cb(self, req):
with self.lock:
self.ignore_ins = req.data
return SetBoolResponse(success=True)
if __name__ == '__main__':
rospy.init_node('primitive_matching')
node = PrimitiveMatching()
rospy.spin()
| [
"jsk_recognition_msgs.msg.BoundingBox",
"yaml.load",
"numpy.ones",
"jsk_recognition_msgs.msg.BoundingBoxArray",
"numpy.mean",
"numpy.sin",
"glob.glob",
"numpy.round",
"message_filters.TimeSynchronizer",
"tf.transformations.quaternion_about_axis",
"grasp_fusion_lib.image.label2rgb",
"rospy.sign... | [((11267, 11304), 'rospy.init_node', 'rospy.init_node', (['"""primitive_matching"""'], {}), "('primitive_matching')\n", (11282, 11304), False, 'import rospy\n'), ((11340, 11352), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (11350, 11352), False, 'import rospy\n'), ((814, 834), 'cv_bridge.CvBridge', 'cv_bridge.CvBridge', ([], {}), '()\n', (832, 834), False, 'import cv_bridge\n'), ((869, 906), 'rospy.get_param', 'rospy.get_param', (['"""~instance_bg_label"""'], {}), "('~instance_bg_label')\n", (884, 906), False, 'import rospy\n'), ((938, 973), 'rospy.get_param', 'rospy.get_param', (['"""~heightmap_frame"""'], {}), "('~heightmap_frame')\n", (953, 973), False, 'import rospy\n'), ((1043, 1073), 'rospy.get_param', 'rospy.get_param', (['"""~voxel_size"""'], {}), "('~voxel_size')\n", (1058, 1073), False, 'import rospy\n'), ((1107, 1150), 'rospy.get_param', 'rospy.get_param', (['"""~cluster_tolerance"""', '(0.02)'], {}), "('~cluster_tolerance', 0.02)\n", (1122, 1150), False, 'import rospy\n'), ((1183, 1219), 'rospy.get_param', 'rospy.get_param', (['"""~cluster_max_size"""'], {}), "('~cluster_max_size')\n", (1198, 1219), False, 'import rospy\n'), ((1252, 1288), 'rospy.get_param', 'rospy.get_param', (['"""~cluster_min_size"""'], {}), "('~cluster_min_size')\n", (1267, 1288), False, 'import rospy\n'), ((1319, 1358), 'rospy.get_param', 'rospy.get_param', (['"""~prob_threshold"""', '(0.5)'], {}), "('~prob_threshold', 0.5)\n", (1334, 1358), False, 'import rospy\n'), ((1393, 1437), 'rospy.get_param', 'rospy.get_param', (['"""~reliable_pts_ratio"""', '(0.25)'], {}), "('~reliable_pts_ratio', 0.25)\n", (1408, 1437), False, 'import rospy\n'), ((1507, 1540), 'rospy.get_param', 'rospy.get_param', (['"""~primitive_dir"""'], {}), "('~primitive_dir')\n", (1522, 1540), False, 'import rospy\n'), ((2545, 2551), 'threading.Lock', 'Lock', ([], {}), '()\n', (2549, 2551), False, 'from threading import Lock\n'), ((2614, 2676), 'rospy.Service', 'rospy.Service', (['"""~ignore_instance"""', 'SetBool', 'self.ignore_ins_cb'], {}), "('~ignore_instance', SetBool, self.ignore_ins_cb)\n", (2627, 2676), False, 'import rospy\n'), ((2739, 2824), 'message_filters.Subscriber', 'message_filters.Subscriber', (['"""~input/rgb"""', 'Image'], {'queue_size': '(1)', 'buff_size': '(2 ** 24)'}), "('~input/rgb', Image, queue_size=1, buff_size=2 ** 24\n )\n", (2765, 2824), False, 'import message_filters\n'), ((2865, 2951), 'message_filters.Subscriber', 'message_filters.Subscriber', (['"""~input/depth"""', 'Image'], {'queue_size': '(1)', 'buff_size': '(2 ** 24)'}), "('~input/depth', Image, queue_size=1, buff_size=2 **\n 24)\n", (2891, 2951), False, 'import message_filters\n'), ((2995, 3090), 'message_filters.Subscriber', 'message_filters.Subscriber', (['"""~input/label_instance"""', 'Image'], {'queue_size': '(1)', 'buff_size': '(2 ** 24)'}), "('~input/label_instance', Image, queue_size=1,\n buff_size=2 ** 24)\n", (3021, 3090), False, 'import message_filters\n'), ((3141, 3243), 'message_filters.Subscriber', 'message_filters.Subscriber', (['"""~input/prob_pinch_affordance"""', 'Image'], {'queue_size': '(1)', 'buff_size': '(2 ** 24)'}), "('~input/prob_pinch_affordance', Image,\n queue_size=1, buff_size=2 ** 24)\n", (3167, 3243), False, 'import message_filters\n'), ((3311, 3418), 'message_filters.Subscriber', 'message_filters.Subscriber', (['"""~input/prob_pinch_sole_affordance"""', 'Image'], {'queue_size': '(1)', 'buff_size': '(2 ** 24)'}), "('~input/prob_pinch_sole_affordance', Image,\n queue_size=1, buff_size=2 ** 24)\n", (3337, 3418), False, 'import message_filters\n'), ((3479, 3583), 'message_filters.Subscriber', 'message_filters.Subscriber', (['"""~input/prob_suction_affordance"""', 'Image'], {'queue_size': '(1)', 'buff_size': '(2 ** 24)'}), "('~input/prob_suction_affordance', Image,\n queue_size=1, buff_size=2 ** 24)\n", (3505, 3583), False, 'import message_filters\n'), ((3627, 3812), 'message_filters.TimeSynchronizer', 'message_filters.TimeSynchronizer', (['[self.sub_rgb, self.sub_depth, self.sub_lbl_ins, self.sub_prob_pinch_aff,\n self.sub_prob_pinch_sole_aff, self.sub_prob_suc_aff]'], {'queue_size': '(100)'}), '([self.sub_rgb, self.sub_depth, self.\n sub_lbl_ins, self.sub_prob_pinch_aff, self.sub_prob_pinch_sole_aff,\n self.sub_prob_suc_aff], queue_size=100)\n', (3659, 3812), False, 'import message_filters\n'), ((5169, 5567), 'grasp_fusion_lib.contrib.grasp_fusion.utils.get_primitives_poses', 'get_primitives_poses', (['self.primitives', 'depth', '[prob_pinch_aff, prob_pinch_sole_aff, prob_suc_aff]', "['pinch', 'pinch_sole', 'suction']", 'self.cluster_tolerance', 'self.cluster_max_size', 'self.cluster_min_size'], {'voxel_size': 'self.voxel_size', 'instance_label': 'lbl_ins', 'instance_bg_label': 'self.instance_bg_label', 'prob_threshold': 'self.prob_threshold', 'reliable_pts_ratio': 'self.reliable_pts_ratio'}), "(self.primitives, depth, [prob_pinch_aff,\n prob_pinch_sole_aff, prob_suc_aff], ['pinch', 'pinch_sole', 'suction'],\n self.cluster_tolerance, self.cluster_max_size, self.cluster_min_size,\n voxel_size=self.voxel_size, instance_label=lbl_ins, instance_bg_label=\n self.instance_bg_label, prob_threshold=self.prob_threshold,\n reliable_pts_ratio=self.reliable_pts_ratio)\n", (5189, 5567), False, 'from grasp_fusion_lib.contrib.grasp_fusion.utils import get_primitives_poses\n'), ((9175, 9228), 'grasp_fusion_lib.image.colorize_heatmap', 'grasp_fusion_lib.image.colorize_heatmap', (['prob_suc_aff'], {}), '(prob_suc_aff)\n', (9214, 9228), False, 'import grasp_fusion_lib\n'), ((9243, 9331), 'grasp_fusion_lib.image.overlay_color_on_mono', 'grasp_fusion_lib.image.overlay_color_on_mono', ([], {'img_color': 'viz', 'img_mono': 'img', 'alpha': '(0.7)'}), '(img_color=viz, img_mono=img,\n alpha=0.7)\n', (9287, 9331), False, 'import grasp_fusion_lib\n'), ((10565, 10599), 'numpy.zeros', 'np.zeros', (['img.shape[:2]'], {'dtype': 'int'}), '(img.shape[:2], dtype=int)\n', (10573, 10599), True, 'import numpy as np\n'), ((11037, 11090), 'grasp_fusion_lib.image.label2rgb', 'grasp_fusion_lib.image.label2rgb', (['lbl', 'img'], {'alpha': '(0.7)'}), '(lbl, img, alpha=0.7)\n', (11069, 11090), False, 'import grasp_fusion_lib\n'), ((11204, 11233), 'std_srvs.srv.SetBoolResponse', 'SetBoolResponse', ([], {'success': '(True)'}), '(success=True)\n', (11219, 11233), False, 'from std_srvs.srv import SetBoolResponse\n'), ((1586, 1615), 'os.path.isdir', 'osp.isdir', (['self.primitive_dir'], {}), '(self.primitive_dir)\n', (1595, 1615), True, 'import os.path as osp\n'), ((1729, 1748), 'rospy.logfatal', 'rospy.logfatal', (['err'], {}), '(err)\n', (1743, 1748), False, 'import rospy\n'), ((1761, 1787), 'rospy.signal_shutdown', 'rospy.signal_shutdown', (['err'], {}), '(err)\n', (1782, 1787), False, 'import rospy\n'), ((1834, 1870), 'glob.glob', 'glob.glob', (["(self.primitive_dir + '/*')"], {}), "(self.primitive_dir + '/*')\n", (1843, 1870), False, 'import glob\n'), ((5913, 5924), 'geometry_msgs.msg.PoseArray', 'PoseArray', ([], {}), '()\n', (5922, 5924), False, 'from geometry_msgs.msg import PoseArray\n'), ((6070, 6088), 'jsk_recognition_msgs.msg.BoundingBoxArray', 'BoundingBoxArray', ([], {}), '()\n', (6086, 6088), False, 'from jsk_recognition_msgs.msg import BoundingBoxArray\n'), ((8969, 9041), 'grasp_fusion_lib.image.colorize_depth', 'grasp_fusion_lib.image.colorize_depth', (['depth'], {'min_value': '(0)', 'max_value': '(0.3)'}), '(depth, min_value=0, max_value=0.3)\n', (9006, 9041), False, 'import grasp_fusion_lib\n'), ((9089, 9150), 'grasp_fusion_lib.image.label2rgb', 'grasp_fusion_lib.image.label2rgb', (['(lbl_ins + 1)', 'img'], {'alpha': '(0.7)'}), '(lbl_ins + 1, img, alpha=0.7)\n', (9121, 9150), False, 'import grasp_fusion_lib\n'), ((9487, 9534), 'grasp_fusion_lib.image.colorize_heatmap', 'grasp_fusion_lib.image.colorize_heatmap', (['prob_c'], {}), '(prob_c)\n', (9526, 9534), False, 'import grasp_fusion_lib\n'), ((9553, 9641), 'grasp_fusion_lib.image.overlay_color_on_mono', 'grasp_fusion_lib.image.overlay_color_on_mono', ([], {'img_color': 'viz', 'img_mono': 'img', 'alpha': '(0.7)'}), '(img_color=viz, img_mono=img,\n alpha=0.7)\n', (9597, 9641), False, 'import grasp_fusion_lib\n'), ((9819, 9866), 'grasp_fusion_lib.image.colorize_heatmap', 'grasp_fusion_lib.image.colorize_heatmap', (['prob_c'], {}), '(prob_c)\n', (9858, 9866), False, 'import grasp_fusion_lib\n'), ((9885, 9973), 'grasp_fusion_lib.image.overlay_color_on_mono', 'grasp_fusion_lib.image.overlay_color_on_mono', ([], {'img_color': 'viz', 'img_mono': 'img', 'alpha': '(0.7)'}), '(img_color=viz, img_mono=img,\n alpha=0.7)\n', (9929, 9973), False, 'import grasp_fusion_lib\n'), ((10783, 10806), 'numpy.where', 'np.where', (['(rr < 0)', '(0)', 'rr'], {}), '(rr < 0, 0, rr)\n', (10791, 10806), True, 'import numpy as np\n'), ((10824, 10874), 'numpy.where', 'np.where', (['(rr >= lbl.shape[0])', '(lbl.shape[0] - 1)', 'rr'], {}), '(rr >= lbl.shape[0], lbl.shape[0] - 1, rr)\n', (10832, 10874), True, 'import numpy as np\n'), ((10892, 10915), 'numpy.where', 'np.where', (['(cc < 0)', '(0)', 'cc'], {}), '(cc < 0, 0, cc)\n', (10900, 10915), True, 'import numpy as np\n'), ((10933, 10983), 'numpy.where', 'np.where', (['(cc >= lbl.shape[1])', '(lbl.shape[1] - 1)', 'cc'], {}), '(cc >= lbl.shape[1], lbl.shape[1] - 1, cc)\n', (10941, 10983), True, 'import numpy as np\n'), ((5045, 5111), 'numpy.ones', 'np.ones', (['(lbl_ins.shape[0], lbl_ins.shape[1])'], {'dtype': 'lbl_ins.dtype'}), '((lbl_ins.shape[0], lbl_ins.shape[1]), dtype=lbl_ins.dtype)\n', (5052, 5111), True, 'import numpy as np\n'), ((6399, 6418), 'numpy.radians', 'np.radians', (['pose[2]'], {}), '(pose[2])\n', (6409, 6418), True, 'import numpy as np\n'), ((6442, 6498), 'tf.transformations.quaternion_about_axis', 'tf.transformations.quaternion_about_axis', (['rad', '(0, 0, 1)'], {}), '(rad, (0, 0, 1))\n', (6482, 6498), False, 'import tf\n'), ((6626, 6632), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (6630, 6632), False, 'from geometry_msgs.msg import Pose\n'), ((7661, 7680), 'numpy.dot', 'np.dot', (['pts_xy', 'rot'], {}), '(pts_xy, rot)\n', (7667, 7680), True, 'import numpy as np\n'), ((7920, 7933), 'jsk_recognition_msgs.msg.BoundingBox', 'BoundingBox', ([], {}), '()\n', (7931, 7933), False, 'from jsk_recognition_msgs.msg import BoundingBox\n'), ((1978, 1990), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (1987, 1990), False, 'import yaml\n'), ((7710, 7733), 'numpy.mean', 'np.mean', (['pts_xy'], {'axis': '(0)'}), '(pts_xy, axis=0)\n', (7717, 7733), True, 'import numpy as np\n'), ((8412, 8435), 'numpy.max', 'np.max', (['depth[ins_mask]'], {}), '(depth[ins_mask])\n', (8418, 8435), True, 'import numpy as np\n'), ((8438, 8461), 'numpy.min', 'np.min', (['depth[ins_mask]'], {}), '(depth[ins_mask])\n', (8444, 8461), True, 'import numpy as np\n'), ((6294, 6311), 'numpy.round', 'np.round', (['pose[1]'], {}), '(pose[1])\n', (6302, 6311), True, 'import numpy as np\n'), ((7396, 7414), 'numpy.where', 'np.where', (['ins_mask'], {}), '(ins_mask)\n', (7404, 7414), True, 'import numpy as np\n'), ((7850, 7867), 'numpy.max', 'np.max', (['ins_depth'], {}), '(ins_depth)\n', (7856, 7867), True, 'import numpy as np\n'), ((7870, 7887), 'numpy.min', 'np.min', (['ins_depth'], {}), '(ins_depth)\n', (7876, 7887), True, 'import numpy as np\n'), ((8162, 8172), 'numpy.max', 'np.max', (['xs'], {}), '(xs)\n', (8168, 8172), True, 'import numpy as np\n'), ((8175, 8185), 'numpy.min', 'np.min', (['xs'], {}), '(xs)\n', (8181, 8185), True, 'import numpy as np\n'), ((8307, 8317), 'numpy.max', 'np.max', (['ys'], {}), '(ys)\n', (8313, 8317), True, 'import numpy as np\n'), ((8320, 8330), 'numpy.min', 'np.min', (['ys'], {}), '(ys)\n', (8326, 8330), True, 'import numpy as np\n'), ((7542, 7553), 'numpy.cos', 'np.cos', (['rad'], {}), '(rad)\n', (7548, 7553), True, 'import numpy as np\n'), ((7603, 7614), 'numpy.sin', 'np.sin', (['rad'], {}), '(rad)\n', (7609, 7614), True, 'import numpy as np\n'), ((7616, 7627), 'numpy.cos', 'np.cos', (['rad'], {}), '(rad)\n', (7622, 7627), True, 'import numpy as np\n'), ((7556, 7567), 'numpy.sin', 'np.sin', (['rad'], {}), '(rad)\n', (7562, 7567), True, 'import numpy as np\n')] |
import copy
import torch
import datajoint as dj
import numpy as np
import matplotlib.pyplot as plt
from orbit_transfer.configs.dataset.mnist_1d import MNIST1DDatasetConfig
from nntransfer.tables import nnfabrik
import pickle as pkl
import tempfile
import requests
@nnfabrik.schema
class DataStorage(dj.Manual):
storage = "minio"
@property
def definition(self):
definition = """
# Contains the data generated by the transfer step, stored externally.
id: varchar(128)
---
data: attach@{storage}
""".format(
storage=self.storage
)
return definition
def plot_signals(
xs,
t,
labels=None,
args=None,
ratio=2.6,
do_transform=False,
dark_mode=False,
zoom=1,
):
rows, cols = 1, 10
fig = plt.figure(figsize=[cols * 1.5, rows * 1.5 * ratio], dpi=60)
for r in range(rows):
for c in range(cols):
ix = r * cols + c
x, t = xs[ix], t
ax = plt.subplot(rows, cols, ix + 1)
# plot the data
# if do_transform:
# assert args is not None, "Need an args object in order to do transforms"
# x, t = transform(x, t, args) # optionally, transform the signal in some manner
if dark_mode:
plt.plot(x, t, "wo", linewidth=6)
ax.set_facecolor("k")
else:
plt.plot(x, t, "k-", linewidth=2)
if labels is not None:
plt.title("label=" + str(labels[ix]), fontsize=22)
plt.xlim(-zoom, zoom)
plt.ylim(-zoom, zoom)
plt.gca().invert_yaxis()
plt.xticks([], []), plt.yticks([], [])
plt.subplots_adjust(wspace=0, hspace=0)
plt.tight_layout()
plt.show()
return fig
def dataset_fn(seed, **config):
config = MNIST1DDatasetConfig.from_dict(config)
print("Loading dataset: {}".format(config.dataset_cls))
torch.manual_seed(seed)
np.random.seed(seed)
if config.orignial:
url = 'https://github.com/greydanus/mnist1d/raw/master/mnist1d_data.pkl'
r = requests.get(url, allow_redirects=True)
open('./mnist1d_data.pkl', 'wb').write(r.content)
with open('./mnist1d_data.pkl', "rb") as handle:
data = pkl.load(handle)
else:
with tempfile.TemporaryDirectory() as temp_dir:
data_path = (DataStorage & {"id": "mnist1d_raw"}).fetch1("data", download_path=temp_dir)
with open(data_path, 'rb') as f:
data = pkl.load(f)
# split training into validation and training
val_size = int(len(data["x"]) * config.valid_size)
indices = np.random.permutation(len(data["x"]))
val_idx, training_idx = indices[:val_size], indices[val_size:]
data["x"], data["x_validation"] = data["x"][training_idx, :], data["x"][val_idx, :]
data["y"], data["y_validation"] = data["y"][training_idx], data["y"][val_idx]
train_data = copy.deepcopy(data)
test_data = copy.deepcopy(data)
test_all_data= copy.deepcopy(data)
if config.train_shift is not None:
def shift_data(data, min_shift, max_shift):
for l in ["x", "x_test", "x_validation"]:
if min_shift >= max_shift:
shift = (int(min_shift) * np.ones(data[l].shape[0]))
else:
np.random.seed(seed)
shift = np.random.randint(min_shift, max_shift, data[l].shape[0])
for i in range(data[l].shape[0]):
data[l][i] = np.roll(data[l][i], shift[i], axis=-1)
shift_data(train_data, 0, config.train_shift)
shift_data(test_data, config.train_shift, 40)
shift_data(test_all_data, 0, 40)
return {"train": train_data, "test": test_data, "test_all": test_all_data}
| [
"numpy.random.seed",
"numpy.ones",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.random.randint",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"tempfile.TemporaryDirectory",
"matplotlib.pyplot.yticks",
"requests.get",
"matplotlib.pyplot.xticks",
"copy.deepcopy",
"matplotlib... | [((824, 884), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[cols * 1.5, rows * 1.5 * ratio]', 'dpi': '(60)'}), '(figsize=[cols * 1.5, rows * 1.5 * ratio], dpi=60)\n', (834, 884), True, 'import matplotlib.pyplot as plt\n'), ((1779, 1818), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0)', 'hspace': '(0)'}), '(wspace=0, hspace=0)\n', (1798, 1818), True, 'import matplotlib.pyplot as plt\n'), ((1823, 1841), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1839, 1841), True, 'import matplotlib.pyplot as plt\n'), ((1846, 1856), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1854, 1856), True, 'import matplotlib.pyplot as plt\n'), ((1921, 1959), 'orbit_transfer.configs.dataset.mnist_1d.MNIST1DDatasetConfig.from_dict', 'MNIST1DDatasetConfig.from_dict', (['config'], {}), '(config)\n', (1951, 1959), False, 'from orbit_transfer.configs.dataset.mnist_1d import MNIST1DDatasetConfig\n'), ((2024, 2047), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2041, 2047), False, 'import torch\n'), ((2052, 2072), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2066, 2072), True, 'import numpy as np\n'), ((3042, 3061), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (3055, 3061), False, 'import copy\n'), ((3078, 3097), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (3091, 3097), False, 'import copy\n'), ((3117, 3136), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (3130, 3136), False, 'import copy\n'), ((2191, 2230), 'requests.get', 'requests.get', (['url'], {'allow_redirects': '(True)'}), '(url, allow_redirects=True)\n', (2203, 2230), False, 'import requests\n'), ((1017, 1048), 'matplotlib.pyplot.subplot', 'plt.subplot', (['rows', 'cols', '(ix + 1)'], {}), '(rows, cols, ix + 1)\n', (1028, 1048), True, 'import matplotlib.pyplot as plt\n'), ((1631, 1652), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-zoom)', 'zoom'], {}), '(-zoom, zoom)\n', (1639, 1652), True, 'import matplotlib.pyplot as plt\n'), ((1665, 1686), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-zoom)', 'zoom'], {}), '(-zoom, zoom)\n', (1673, 1686), True, 'import matplotlib.pyplot as plt\n'), ((2365, 2381), 'pickle.load', 'pkl.load', (['handle'], {}), '(handle)\n', (2373, 2381), True, 'import pickle as pkl\n'), ((2405, 2434), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2432, 2434), False, 'import tempfile\n'), ((1376, 1409), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 't', '"""wo"""'], {'linewidth': '(6)'}), "(x, t, 'wo', linewidth=6)\n", (1384, 1409), True, 'import matplotlib.pyplot as plt\n'), ((1482, 1515), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 't', '"""k-"""'], {'linewidth': '(2)'}), "(x, t, 'k-', linewidth=2)\n", (1490, 1515), True, 'import matplotlib.pyplot as plt\n'), ((1736, 1754), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]', '[]'], {}), '([], [])\n', (1746, 1754), True, 'import matplotlib.pyplot as plt\n'), ((1756, 1774), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]', '[]'], {}), '([], [])\n', (1766, 1774), True, 'import matplotlib.pyplot as plt\n'), ((2617, 2628), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (2625, 2628), True, 'import pickle as pkl\n'), ((1699, 1708), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1706, 1708), True, 'import matplotlib.pyplot as plt\n'), ((3441, 3461), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3455, 3461), True, 'import numpy as np\n'), ((3490, 3547), 'numpy.random.randint', 'np.random.randint', (['min_shift', 'max_shift', 'data[l].shape[0]'], {}), '(min_shift, max_shift, data[l].shape[0])\n', (3507, 3547), True, 'import numpy as np\n'), ((3631, 3669), 'numpy.roll', 'np.roll', (['data[l][i]', 'shift[i]'], {'axis': '(-1)'}), '(data[l][i], shift[i], axis=-1)\n', (3638, 3669), True, 'import numpy as np\n'), ((3372, 3397), 'numpy.ones', 'np.ones', (['data[l].shape[0]'], {}), '(data[l].shape[0])\n', (3379, 3397), True, 'import numpy as np\n')] |
from rubin_sim.maf.stackers import BaseStacker
import numpy as np
import numpy.lib.recfunctions as rf
__all__ = ['CoaddStacker']
class CoaddStacker(BaseStacker):
"""
Stacker to estimate m5 "coadded" per band and par night
Parameters
----------
list : str, optional
Name of the columns used.
Default : 'observationStartMJD', 'fieldRA', 'fieldDec','filter','fiveSigmaDepth','visitExposureTime','night','observationId', 'numExposures','visitTime'
"""
colsAdded = ['coadd']
def __init__(self, mjdCol='observationStartMJD', RaCol='fieldRA', DecCol='fieldDec', m5Col='fiveSigmaDepth', nightcol='night', filterCol='filter', nightCol='night', numExposuresCol='numExposures', visitTimeCol='visitTime', visitExposureTimeCol='visitExposureTime'):
self.colsReq = [mjdCol, RaCol, DecCol, m5Col, filterCol, nightCol,
numExposuresCol, visitTimeCol, visitExposureTimeCol]
self.RaCol = RaCol
self.DecCol = DecCol
self.nightCol = nightCol
self.filterCol = filterCol
self.m5Col = m5Col
self.numExposuresCol = numExposuresCol
self.visitTimeCol = visitTimeCol
self.visitExposureTimeCol = visitExposureTimeCol
self.units = ['int']
def _run(self, simData, cols_present=False):
"""
Parameters
---------------
simData : simulation data
cols_present: to check whether the field has already been estimated
Returns
-----------
numpy array of initial fields plus modified fields:
- m5Col: "coadded" m5
- numExposuresCol: sum of numExposuresCol
- visitTimeCol: sum of visitTimeCol
- visitExposureTimeCol: sum of visitExposureTimeCol
- all other input fields except band (Ra, Dec, night) : median(field)
"""
if cols_present:
# Column already present in data; assume it is correct and does not need recalculating.
return simData
self.dtype = simData.dtype
r = []
for ra, dec, band in np.unique(simData[[self.RaCol, self.DecCol, self.filterCol]]):
idx = np.abs(simData[self.RaCol]-ra) < 1.e-5
idx &= np.abs(simData[self.DecCol]-dec) < 1.e-5
idx &= simData[self.filterCol] == band
sel = simData[idx]
for night in np.unique(sel[self.nightCol]):
idxb = sel[self.nightCol] == night
r.append(tuple(self.fill(sel[idxb])))
myarray = np.array(r, dtype=self.dtype)
return myarray
def fill(self, tab):
"""
Estimation of new fields (m5 "coadded" values, ...)
Parameters
---------------
tab : array of (initial) data
Returns
-----------
tuple with modified field values:
- m5Col: "coadded" m5
- numExposuresCol: sum of numExposuresCol
- visitTimeCol: sum of visitTimeCol
- visitExposureTimeCol: sum of visitExposureTimeCol
- all other input fields except band (Ra, Dec, night) : median(field)
"""
r = []
for colname in self.dtype.names:
if colname not in [self.m5Col, self.numExposuresCol, self.visitTimeCol, self.visitExposureTimeCol, self.filterCol]:
if colname == 'coadd':
r.append(1)
else:
r.append(np.median(tab[colname]))
if colname == self.m5Col:
r.append(self.m5_coadd(tab[self.m5Col]))
if colname in [self.numExposuresCol, self.visitTimeCol, self.visitExposureTimeCol]:
r.append(np.sum(tab[colname]))
if colname == self.filterCol:
r.append(np.unique(tab[self.filterCol])[0])
return r
def m5_coadd(self, m5):
"""
Estimation of "coadded" m5 values based on:
flux_5sigma = 10**(-0.4*m5)
sigmas = flux_5sigma/5.
sigma_tot = 1./sqrt(np.sum(1/sigmas**2))
flux_tot = 5.*sigma_tot
Parameters
---------------
m5 : set of m5 (five-sigma depths) values
Returns
-----------
"coadded" m5 value
"""
fluxes = 10**(-0.4*m5)
sigmas = fluxes/5.
sigma_tot = 1./np.sqrt(np.sum(1./sigmas**2))
flux_tot = 5.*sigma_tot
return -2.5*np.log10(flux_tot)
| [
"numpy.abs",
"numpy.sum",
"numpy.median",
"numpy.array",
"numpy.log10",
"numpy.unique"
] | [((2089, 2150), 'numpy.unique', 'np.unique', (['simData[[self.RaCol, self.DecCol, self.filterCol]]'], {}), '(simData[[self.RaCol, self.DecCol, self.filterCol]])\n', (2098, 2150), True, 'import numpy as np\n'), ((2532, 2561), 'numpy.array', 'np.array', (['r'], {'dtype': 'self.dtype'}), '(r, dtype=self.dtype)\n', (2540, 2561), True, 'import numpy as np\n'), ((2377, 2406), 'numpy.unique', 'np.unique', (['sel[self.nightCol]'], {}), '(sel[self.nightCol])\n', (2386, 2406), True, 'import numpy as np\n'), ((4390, 4408), 'numpy.log10', 'np.log10', (['flux_tot'], {}), '(flux_tot)\n', (4398, 4408), True, 'import numpy as np\n'), ((2170, 2202), 'numpy.abs', 'np.abs', (['(simData[self.RaCol] - ra)'], {}), '(simData[self.RaCol] - ra)\n', (2176, 2202), True, 'import numpy as np\n'), ((2228, 2262), 'numpy.abs', 'np.abs', (['(simData[self.DecCol] - dec)'], {}), '(simData[self.DecCol] - dec)\n', (2234, 2262), True, 'import numpy as np\n'), ((4315, 4340), 'numpy.sum', 'np.sum', (['(1.0 / sigmas ** 2)'], {}), '(1.0 / sigmas ** 2)\n', (4321, 4340), True, 'import numpy as np\n'), ((3671, 3691), 'numpy.sum', 'np.sum', (['tab[colname]'], {}), '(tab[colname])\n', (3677, 3691), True, 'import numpy as np\n'), ((3430, 3453), 'numpy.median', 'np.median', (['tab[colname]'], {}), '(tab[colname])\n', (3439, 3453), True, 'import numpy as np\n'), ((3760, 3790), 'numpy.unique', 'np.unique', (['tab[self.filterCol]'], {}), '(tab[self.filterCol])\n', (3769, 3790), True, 'import numpy as np\n')] |
import argparse
import pandas as pd
import logging as log
import numpy as np
import os
import re
import threading
def sample_edgelist_job( dataset, df, n, k, sem ):
""""""
with sem:
v_filt = np.array( [False]*n )
if log:
log.info( 'Sampling edges ...' )
v_rand = np.random.choice( np.arange( n ), size=int( n*k ), replace=False )
for e in v_rand:
v_filt[e] = True
sample_dir = os.path.dirname( dataset ) + '-sampled-%s' % k
# e.g. dumps/core-sampled-0.25 gets creted if not present
if not os.path.isdir( sample_dir ):
if log:
log.info( 'Creating directory ..' )
os.mkdir( sample_dir )
df.iloc[v_filt].to_csv( '%s/data.edgelist.csv' % sample_dir, sep=' ', header=False, index=False )
if log:
log.info( 'Done' )
def sample_edgelist( paths, log=None ):
""""""
# ensure it is a list
if not type(paths) is list:
paths = [paths]
for dataset in paths:
if not os.path.isfile( dataset ):
dataset = 'dumps/'+ dataset
if not os.path.isdir( dataset ):
if log:
log.error( '%s is not a directory', dataset )
continue
dataset = dataset + '/data.edgelist.csv'
if not os.path.isfile( dataset ):
if log:
log.error( 'Edgelist file does not exit (was looking in %s). this is a requirement', dataset )
continue
if log:
log.info( 'Reading lines ..' )
df = pd.read_csv( dataset, delim_whitespace=True, header=None )
n = df.shape[0]
# prepare
sem = threading.Semaphore( 10 )
threads = []
for k in np.linspace(0.05, 0.5, num=10): # e.g. [ 0.25, 0.5, 0.75 ]
t = threading.Thread( target = sample_edgelist_job, name = '%s[%s]' % ( os.path.dirname(dataset), k ), args = ( dataset, df, n, k, sem ) )
t.start()
threads.append( t )
# wait for all threads to finish
for t in threads:
t.join()
#
if __name__ == '__main__':
parser = argparse.ArgumentParser( description = 'lodcc - sample edgelist' )
parser.add_argument( '--paths', '-p', nargs='*', required = True, help = '' )
log.basicConfig(
level = log.INFO,
format = '[%(asctime)s] - %(levelname)-8s : %(threadName)s: %(message)s', )
args = vars( parser.parse_args() )
paths = args['paths']
sample_edgelist( paths, log )
log.info( 'done' )
| [
"os.mkdir",
"logging.error",
"argparse.ArgumentParser",
"logging.basicConfig",
"pandas.read_csv",
"os.path.isdir",
"os.path.dirname",
"logging.info",
"os.path.isfile",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"threading.Semaphore"
] | [((2256, 2318), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""lodcc - sample edgelist"""'}), "(description='lodcc - sample edgelist')\n", (2279, 2318), False, 'import argparse\n'), ((2410, 2518), 'logging.basicConfig', 'log.basicConfig', ([], {'level': 'log.INFO', 'format': '"""[%(asctime)s] - %(levelname)-8s : %(threadName)s: %(message)s"""'}), "(level=log.INFO, format=\n '[%(asctime)s] - %(levelname)-8s : %(threadName)s: %(message)s')\n", (2425, 2518), True, 'import logging as log\n'), ((2652, 2668), 'logging.info', 'log.info', (['"""done"""'], {}), "('done')\n", (2660, 2668), True, 'import logging as log\n'), ((210, 231), 'numpy.array', 'np.array', (['([False] * n)'], {}), '([False] * n)\n', (218, 231), True, 'import numpy as np\n'), ((1673, 1729), 'pandas.read_csv', 'pd.read_csv', (['dataset'], {'delim_whitespace': '(True)', 'header': 'None'}), '(dataset, delim_whitespace=True, header=None)\n', (1684, 1729), True, 'import pandas as pd\n'), ((1791, 1814), 'threading.Semaphore', 'threading.Semaphore', (['(10)'], {}), '(10)\n', (1810, 1814), False, 'import threading\n'), ((1856, 1886), 'numpy.linspace', 'np.linspace', (['(0.05)', '(0.5)'], {'num': '(10)'}), '(0.05, 0.5, num=10)\n', (1867, 1886), True, 'import numpy as np\n'), ((268, 298), 'logging.info', 'log.info', (['"""Sampling edges ..."""'], {}), "('Sampling edges ...')\n", (276, 298), True, 'import logging as log\n'), ((338, 350), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (347, 350), True, 'import numpy as np\n'), ((472, 496), 'os.path.dirname', 'os.path.dirname', (['dataset'], {}), '(dataset)\n', (487, 496), False, 'import os\n'), ((601, 626), 'os.path.isdir', 'os.path.isdir', (['sample_dir'], {}), '(sample_dir)\n', (614, 626), False, 'import os\n'), ((715, 735), 'os.mkdir', 'os.mkdir', (['sample_dir'], {}), '(sample_dir)\n', (723, 735), False, 'import os\n'), ((881, 897), 'logging.info', 'log.info', (['"""Done"""'], {}), "('Done')\n", (889, 897), True, 'import logging as log\n'), ((1093, 1116), 'os.path.isfile', 'os.path.isfile', (['dataset'], {}), '(dataset)\n', (1107, 1116), False, 'import os\n'), ((1615, 1643), 'logging.info', 'log.info', (['"""Reading lines .."""'], {}), "('Reading lines ..')\n", (1623, 1643), True, 'import logging as log\n'), ((666, 699), 'logging.info', 'log.info', (['"""Creating directory .."""'], {}), "('Creating directory ..')\n", (674, 699), True, 'import logging as log\n'), ((1180, 1202), 'os.path.isdir', 'os.path.isdir', (['dataset'], {}), '(dataset)\n', (1193, 1202), False, 'import os\n'), ((1395, 1418), 'os.path.isfile', 'os.path.isfile', (['dataset'], {}), '(dataset)\n', (1409, 1418), False, 'import os\n'), ((1250, 1293), 'logging.error', 'log.error', (['"""%s is not a directory"""', 'dataset'], {}), "('%s is not a directory', dataset)\n", (1259, 1293), True, 'import logging as log\n'), ((1466, 1567), 'logging.error', 'log.error', (['"""Edgelist file does not exit (was looking in %s). this is a requirement"""', 'dataset'], {}), "(\n 'Edgelist file does not exit (was looking in %s). this is a requirement',\n dataset)\n", (1475, 1567), True, 'import logging as log\n'), ((2001, 2025), 'os.path.dirname', 'os.path.dirname', (['dataset'], {}), '(dataset)\n', (2016, 2025), False, 'import os\n')] |
"""
Sintef
======
Evaluate initial bubble and droplet sizes from the SINTEF model
This module is deprecated and has been replaced by the `particle_size_models`
and `psf` modules. In order to retain compatibility with previous versions,
we retain the original API of the `sintef` model, but replace all calculations
with calls to the appropriate functions in `particle_size_models` and `psf`.
"""
# <NAME>, September 2013, Texas A&M University <<EMAIL>>.
from __future__ import (absolute_import, division, print_function)
from tamoc import psf
import numpy as np
from copy import copy
from scipy.optimize import fsolve
def modified_We_model(D, rho_gas, m_gas, mu_gas, sigma_gas, rho_oil, m_oil,
mu_oil, sigma_oil, rho):
"""
This function is deprecated: Use psf.sintef() instead.
Compute the initial oil droplet and gas bubble sizes from the SINTEF model
Apply the SINTEF modified Weber number model to estimate the initial
oil and gas particle sizes. This function calculates the adjusted exit
velocity based on a void fraction and buoyancy adjustment per the method
suggested by SINTEF.
Parameters
----------
D : float
Diameter of the release point (m)
rho_gas : float
In-situ density of the gas (kg/m^3)
m_gas : ndarray
Array of mass fluxes for each component of the gas object (kg/s)
mu_gas : float
Dynamic viscosity of gas (Pa s)
sigma_gas : float
Interfacial tension between gas and seawater (N/m)
rho_oil : float
In-situ density of the oil
m_oil : ndarray
Array of mass fluxes for each component of the oil object (kg/s)
mu_oil : float
Dynamic viscosity of oil (Pa s)
sigma_oil : float
Interfacial tension between oil and seawater (N/m)
rho : float
Density of the continuous phase fluid (kg/m^3)
Returns
-------
A tuple containing:
de_gas : float
The volume mean diameter of the gas bubbles (m)
de_oil : float
The volume mean diameter of the oil droplets (m)
"""
# Make sure the masses are in arrays
if not isinstance(m_gas, np.ndarray):
if not isinstance(m_gas, list):
m_gas = np.array([m_gas])
else:
m_gas = np.array(m_gas)
if not isinstance(m_oil, np.ndarray):
if not isinstance(m_oil, list):
m_oil = np.array([m_oil])
else:
m_oil = np.array(m_oil)
# Call the psf functions
mu = 0.0012 # pass mu of seawater (not used)
de_gas, de_max, k, alpha = psf.sintef(D, m_gas, rho_gas, m_oil, rho_oil,
mu_gas, sigma_gas, rho, mu,
fp_type=0, use_d95=False)
de_oil, de_max, k, alpha = psf.sintef(D, m_gas, rho_gas, m_oil, rho_oil,
mu_oil, sigma_oil, rho, mu,
fp_type=1, use_d95=False)
# Return the bubble and droplet sizes
return (de_gas, de_oil)
# Provide tool to estimate the maximum stable particle size
def de_max(sigma, rho_p, rho):
"""
This function is deprecated: Use psf.de_max_oil() instead.
Calculate the maximum stable particle size
Predicts the maximum stable particle size per Clift et al. (1978) via
the equation:
d_max = 4. * np.sqrt(sigma / (g (rho - rho_p)))
Parameters
----------
sigma : float
Interfacial tension between the phase undergoing breakup and the
ambient receiving continuous phase (N/m)
rho_p : float
Density of the phase undergoing breakup (kg/m^3)
rho : float
Density of the ambient receiving continuous phase (kg/m^3)
Returns
-------
d_max : float
Maximum stable particle size (m)
"""
return psf.de_max_oil(rho_p, sigma, rho)
def de_50(U, D, rho_p, mu_p, sigma, rho):
"""
This function is deprecated: Use psf.sintef_d50() instead.
Predict the volume mean diameter from a modified Weber number model
Calculates the SINTEF modified Weber number model for the volume mean
diameter of a blowout release.
Parameters
----------
U : float
Effective exit velocity after void fraction and buoyancy correction
of the phase undergoing breakup (m/s)
D : float
Diameter of the discharge (m)
rho_p : float
Density of the phase undergoing breakup (kg/m^3)
mu_p : float
Dynamic viscosity of the phase undergoing breakup (Pa s)
sigma : float
Interfacial tension between the phase undergoing breakup and the
ambient receiving continuous phase (N/m)
Returns
-------
de_50 : float
The volume mean diameter of the phase undergoing breakup
Notes
-----
This function first checks the We number. If the We is less than the
critical value of 350 required for atomization, then the fluid particle
diameter is estimated as 1.2 D. Otherwise, the SINTEF modified We number
model is used. In both cases, the resulting particle diameter is compared
to the maximum stable particle size per Clif et al. (1978) of
d_max = 4 (sigma/ (g (rho - rho_p)))^(1/2).
The function returns the lesser of the estimated particle size or the
maximum stable particle size.
"""
# Call the psf function
de = psf.sintef_d50(U, D, rho_p, mu_p, sigma, rho)
# Require the diameter to be less than the maximum stable size
dmax = de_max(sigma, rho_p, rho)
if de > dmax:
de = dmax
# Return the result
return de
def rosin_rammler(nbins, d50, md_total, sigma, rho_p, rho):
"""
This function is deprecated: Use psf.rosin_rammler() instead.
Return the volume size distribution from the Rosin Rammler distribution
Returns the fluid particle diameters in the selected number of bins on
a volume basis from the Rosin Rammler distribution with parameters
k = -ln(0.5) and alpha = 1.8.
Parameters
----------
nbins : int
Desired number of size bins in the particle volume size distribution
d50 : float
Volume mean particle diameter (m)
md_total : float
Total particle mass flux (kg/s)
sigma : float
Interfacial tension between the phase undergoing breakup and the
ambient receiving continuous phase (N/m)
rho_p : float
Density of the phase undergoing breakup (kg/m^3)
rho : float
Density of the ambient receiving continuous phase (kg/m^3)
Returns
-------
de : ndarray
Array of particle sizes at the center of each bin in the distribution
(m)
md : ndarray
Total mass flux of particles corresponding to each bin (kg/s)
Notes
-----
This method computes the un-truncated volume size distribution from the
Rosin-Rammler distribution and then enforces that all particle sizes
be less than the maximum stable size by moving mass in larger sizes to
the maximum stable size bin.
References
----------
Johansen, Brandvik, and Farooq (2013), "Droplet breakup in subsea oil
releases - Part 2: Predictions of droplet size distributions with and
without injection of chemical dispersants." Marine Pollution Bulletin,
73: 327-335. doi:10.1016/j.marpolbul.2013.04.012.
"""
# Get the maximum stable size
dmax = de_max(sigma, rho_p, rho)
# Define the parameters of the distribution
k = np.log(0.5)
alpha = 1.8
de, V_frac = psf.rosin_rammler(nbins, d50, k, alpha)
# Compute the mass fraction for each diameter
md = V_frac * md_total
# Truncate the distribution at the maximum stable droplet size
imax = -1
for i in range(len(de)):
if de[i] > dmax:
if imax < 0:
imax = i
de[i] = dmax
else:
md[imax] += md[i]
md[i] = 0.
# Return the particle size distribution
return (de, md)
| [
"tamoc.psf.sintef_d50",
"numpy.log",
"tamoc.psf.rosin_rammler",
"tamoc.psf.sintef",
"numpy.array",
"tamoc.psf.de_max_oil"
] | [((2632, 2735), 'tamoc.psf.sintef', 'psf.sintef', (['D', 'm_gas', 'rho_gas', 'm_oil', 'rho_oil', 'mu_gas', 'sigma_gas', 'rho', 'mu'], {'fp_type': '(0)', 'use_d95': '(False)'}), '(D, m_gas, rho_gas, m_oil, rho_oil, mu_gas, sigma_gas, rho, mu,\n fp_type=0, use_d95=False)\n', (2642, 2735), False, 'from tamoc import psf\n'), ((2854, 2957), 'tamoc.psf.sintef', 'psf.sintef', (['D', 'm_gas', 'rho_gas', 'm_oil', 'rho_oil', 'mu_oil', 'sigma_oil', 'rho', 'mu'], {'fp_type': '(1)', 'use_d95': '(False)'}), '(D, m_gas, rho_gas, m_oil, rho_oil, mu_oil, sigma_oil, rho, mu,\n fp_type=1, use_d95=False)\n', (2864, 2957), False, 'from tamoc import psf\n'), ((3934, 3967), 'tamoc.psf.de_max_oil', 'psf.de_max_oil', (['rho_p', 'sigma', 'rho'], {}), '(rho_p, sigma, rho)\n', (3948, 3967), False, 'from tamoc import psf\n'), ((5542, 5587), 'tamoc.psf.sintef_d50', 'psf.sintef_d50', (['U', 'D', 'rho_p', 'mu_p', 'sigma', 'rho'], {}), '(U, D, rho_p, mu_p, sigma, rho)\n', (5556, 5587), False, 'from tamoc import psf\n'), ((7695, 7706), 'numpy.log', 'np.log', (['(0.5)'], {}), '(0.5)\n', (7701, 7706), True, 'import numpy as np\n'), ((7745, 7784), 'tamoc.psf.rosin_rammler', 'psf.rosin_rammler', (['nbins', 'd50', 'k', 'alpha'], {}), '(nbins, d50, k, alpha)\n', (7762, 7784), False, 'from tamoc import psf\n'), ((2278, 2295), 'numpy.array', 'np.array', (['[m_gas]'], {}), '([m_gas])\n', (2286, 2295), True, 'import numpy as np\n'), ((2330, 2345), 'numpy.array', 'np.array', (['m_gas'], {}), '(m_gas)\n', (2338, 2345), True, 'import numpy as np\n'), ((2448, 2465), 'numpy.array', 'np.array', (['[m_oil]'], {}), '([m_oil])\n', (2456, 2465), True, 'import numpy as np\n'), ((2500, 2515), 'numpy.array', 'np.array', (['m_oil'], {}), '(m_oil)\n', (2508, 2515), True, 'import numpy as np\n')] |
import numpy as np
class FillRaster():
def __init__(self):
self.name = "Fill Raster Function"
self.description = ("")
self.fillValue = 0.
def getParameterInfo(self):
return [
{
'name': 'raster',
'dataType': 'raster',
'value': None,
'required': True,
'displayName': "Input Raster",
'description': ""
},
{
'name': 'value',
'dataType': 'numeric',
'value': 0,
'required': True,
'displayName': "Fill Value",
'description': ("")
},
]
def updateRasterInfo(self, **kwargs):
b = kwargs['raster_info']['bandCount']
self.fillValue = kwargs.get('value', 0.)
kwargs['output_info']['statistics'] = b * ({'minimum': self.fillValue, 'maximum': self.fillValue}, )
kwargs['output_info']['histogram'] = ()
return kwargs
def updatePixels(self, tlc, shape, props, **pixelBlocks):
pixelBlocks['output_pixels'] = np.full(shape, self.fillValue, dtype=props['pixelType'])
return pixelBlocks
| [
"numpy.full"
] | [((1178, 1234), 'numpy.full', 'np.full', (['shape', 'self.fillValue'], {'dtype': "props['pixelType']"}), "(shape, self.fillValue, dtype=props['pixelType'])\n", (1185, 1234), True, 'import numpy as np\n')] |
"""Test derivation of `sispeed`."""
import numpy as np
from iris.cube import Cube, CubeList
from esmvalcore.preprocessor._derive.sithick import DerivedVariable
def test_sispeed_calculation():
"""Test calculation of `sithick`."""
siconc = Cube(np.full((2, 2), 0.5), 'sea_ice_area_fraction', units='1.0')
sivol = Cube(np.full((2, 2), 0.5), 'sea_ice_thickness')
derived_var = DerivedVariable()
sispeed = derived_var.calculate(CubeList((siconc, sivol)))
assert np.all(
sispeed.data == np.ones_like(sispeed.data)
)
def test_sispeed_calculation_percent():
"""Test calculation of `sithick` with sit in %."""
siconc = Cube(np.full((2, 2), 50.), 'sea_ice_area_fraction', units='%')
sivol = Cube(np.full((2, 2), 0.5), 'sea_ice_thickness')
derived_var = DerivedVariable()
sispeed = derived_var.calculate(CubeList((siconc, sivol)))
assert np.all(
sispeed.data == np.ones_like(sispeed.data)
)
| [
"numpy.full",
"iris.cube.CubeList",
"esmvalcore.preprocessor._derive.sithick.DerivedVariable",
"numpy.ones_like"
] | [((395, 412), 'esmvalcore.preprocessor._derive.sithick.DerivedVariable', 'DerivedVariable', ([], {}), '()\n', (410, 412), False, 'from esmvalcore.preprocessor._derive.sithick import DerivedVariable\n'), ((806, 823), 'esmvalcore.preprocessor._derive.sithick.DerivedVariable', 'DerivedVariable', ([], {}), '()\n', (821, 823), False, 'from esmvalcore.preprocessor._derive.sithick import DerivedVariable\n'), ((256, 276), 'numpy.full', 'np.full', (['(2, 2)', '(0.5)'], {}), '((2, 2), 0.5)\n', (263, 276), True, 'import numpy as np\n'), ((333, 353), 'numpy.full', 'np.full', (['(2, 2)', '(0.5)'], {}), '((2, 2), 0.5)\n', (340, 353), True, 'import numpy as np\n'), ((449, 474), 'iris.cube.CubeList', 'CubeList', (['(siconc, sivol)'], {}), '((siconc, sivol))\n', (457, 474), False, 'from iris.cube import Cube, CubeList\n'), ((669, 690), 'numpy.full', 'np.full', (['(2, 2)', '(50.0)'], {}), '((2, 2), 50.0)\n', (676, 690), True, 'import numpy as np\n'), ((744, 764), 'numpy.full', 'np.full', (['(2, 2)', '(0.5)'], {}), '((2, 2), 0.5)\n', (751, 764), True, 'import numpy as np\n'), ((860, 885), 'iris.cube.CubeList', 'CubeList', (['(siconc, sivol)'], {}), '((siconc, sivol))\n', (868, 885), False, 'from iris.cube import Cube, CubeList\n'), ((520, 546), 'numpy.ones_like', 'np.ones_like', (['sispeed.data'], {}), '(sispeed.data)\n', (532, 546), True, 'import numpy as np\n'), ((931, 957), 'numpy.ones_like', 'np.ones_like', (['sispeed.data'], {}), '(sispeed.data)\n', (943, 957), True, 'import numpy as np\n')] |
import json
from collections import Counter
import pickle
import numpy as np
import pandas as pd
from tqdm import tqdm
def read_json(filename):
with open(filename, 'r') as load_f:
file_dict = json.load(load_f)
return file_dict
def trp(l, n):
""" Truncate or pad a list """
r = l[:n]
if len(r) < n:
r.extend(list([0]) * (n - len(r)))
return r
def down_sample(logs, labels, sample_ratio):
print('sampling...')
total_num = len(labels)
all_index = list(range(total_num))
sample_logs = {}
for key in logs.keys():
sample_logs[key] = []
sample_labels = []
sample_num = int(total_num * sample_ratio)
for i in tqdm(range(sample_num)):
random_index = int(np.random.uniform(0, len(all_index)))
for key in logs.keys():
sample_logs[key].append(logs[key][random_index])
sample_labels.append(labels[random_index])
del all_index[random_index]
return sample_logs, sample_labels
# https://stackoverflow.com/questions/15357422/python-determine-if-a-string-should-be-converted-into-int-or-float
def isfloat(x):
try:
a = float(x)
except ValueError:
return False
else:
return True
def isint(x):
try:
a = float(x)
b = int(a)
except ValueError:
return False
else:
return a == b
def split_features(data_path, train_ratio=1, min_len=0):
with open(data_path, 'r') as f:
data = f.readlines()
sample_size = int(len(data) * train_ratio)
data = data[:sample_size]
logkeys = []
for line in data:
line = [ln.split(",") for ln in line.split()]
if len(line) < min_len:
continue
line = np.array(line)
logkey = line.squeeze()
logkeys.append(logkey.tolist())
return logkeys
def sliding_window(data_iter, vocab, window_size, is_train=True):
'''
dataset structure
result_logs(dict):
result_logs['feature0'] = list()
result_logs['feature1'] = list()
...
labels(list)
'''
#event2semantic_vec = read_json(data_dir + 'hdfs/event2semantic_vec.json')
result_logs = {}
result_logs['Sequentials'] = []
result_logs['Quantitatives'] = []
result_logs['Semantics'] = []
result_logs['Parameters'] = []
labels = []
num_sessions = 0
num_classes = len(vocab)
for line in zip(*data_iter):
if num_sessions % 1000 == 0:
print("processed %s lines"%num_sessions, end='\r')
num_sessions += 1
line = [vocab.stoi.get(ln, vocab.unk_index) for ln in line]
session_len = max(len(line), window_size) + 1# predict the next one
padding_size = session_len - len(line)
line = line + [vocab.pad_index] * padding_size
for i in range(session_len - window_size):
Sequential_pattern = line[i:i + window_size]
Semantic_pattern = []
Quantitative_pattern = [0] * num_classes
log_counter = Counter(Sequential_pattern)
for key in log_counter:
Quantitative_pattern[key] = log_counter[key]
Sequential_pattern = np.array(Sequential_pattern)
Quantitative_pattern = np.array(Quantitative_pattern)[:, np.newaxis]
result_logs['Sequentials'].append(Sequential_pattern)
result_logs['Quantitatives'].append(Quantitative_pattern)
result_logs['Semantics'].append(Semantic_pattern)
labels.append(line[i + window_size])
if is_train:
print('number of sessions {}'.format(num_sessions))
print('number of seqs {}'.format(len(result_logs['Sequentials'])))
return result_logs, labels
def session_window(data_dir, datatype, sample_ratio=1):
event2semantic_vec = read_json(data_dir + 'hdfs/event2semantic_vec.json')
result_logs = {}
result_logs['Sequentials'] = []
result_logs['Quantitatives'] = []
result_logs['Semantics'] = []
labels = []
if datatype == 'train':
data_dir += 'hdfs/robust_log_train.csv'
elif datatype == 'val':
data_dir += 'hdfs/robust_log_valid.csv'
elif datatype == 'test':
data_dir += 'hdfs/robust_log_test.csv'
train_df = pd.read_csv(data_dir)
for i in tqdm(range(len(train_df))):
ori_seq = [
int(eventid) for eventid in train_df["Sequence"][i].split(' ')
]
Sequential_pattern = trp(ori_seq, 50)
Semantic_pattern = []
for event in Sequential_pattern:
if event == 0:
Semantic_pattern.append([-1] * 300)
else:
Semantic_pattern.append(event2semantic_vec[str(event - 1)])
Quantitative_pattern = [0] * 29
log_counter = Counter(Sequential_pattern)
for key in log_counter:
Quantitative_pattern[key] = log_counter[key]
Sequential_pattern = np.array(Sequential_pattern) # [:, np.newaxis]
Quantitative_pattern = np.array(Quantitative_pattern)[:, np.newaxis]
result_logs['Sequentials'].append(Sequential_pattern)
result_logs['Quantitatives'].append(Quantitative_pattern)
result_logs['Semantics'].append(Semantic_pattern)
labels.append(int(train_df["label"][i]))
if sample_ratio != 1:
result_logs, labels = down_sample(result_logs, labels, sample_ratio)
# result_logs, labels = up_sample(result_logs, labels)
print('Number of sessions({}): {}'.format(data_dir,
len(result_logs['Semantics'])))
return result_logs, labels
| [
"pandas.read_csv",
"collections.Counter",
"json.load",
"numpy.array"
] | [((4423, 4444), 'pandas.read_csv', 'pd.read_csv', (['data_dir'], {}), '(data_dir)\n', (4434, 4444), True, 'import pandas as pd\n'), ((214, 231), 'json.load', 'json.load', (['load_f'], {}), '(load_f)\n', (223, 231), False, 'import json\n'), ((1808, 1822), 'numpy.array', 'np.array', (['line'], {}), '(line)\n', (1816, 1822), True, 'import numpy as np\n'), ((4956, 4983), 'collections.Counter', 'Counter', (['Sequential_pattern'], {}), '(Sequential_pattern)\n', (4963, 4983), False, 'from collections import Counter\n'), ((5109, 5137), 'numpy.array', 'np.array', (['Sequential_pattern'], {}), '(Sequential_pattern)\n', (5117, 5137), True, 'import numpy as np\n'), ((3159, 3186), 'collections.Counter', 'Counter', (['Sequential_pattern'], {}), '(Sequential_pattern)\n', (3166, 3186), False, 'from collections import Counter\n'), ((3324, 3352), 'numpy.array', 'np.array', (['Sequential_pattern'], {}), '(Sequential_pattern)\n', (3332, 3352), True, 'import numpy as np\n'), ((5188, 5218), 'numpy.array', 'np.array', (['Quantitative_pattern'], {}), '(Quantitative_pattern)\n', (5196, 5218), True, 'import numpy as np\n'), ((3389, 3419), 'numpy.array', 'np.array', (['Quantitative_pattern'], {}), '(Quantitative_pattern)\n', (3397, 3419), True, 'import numpy as np\n')] |
import math
import random
import WilliamsDivergenceMaker as wdm
import BioPythonMaker as bpm
import GeometryMaker as dfm
import HtmlReportMaker as hrm
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
############### REAL data #######################
rep = hrm.HtmlReportMaker("WCCC","Results/kullback_leibler.html",cols=6)
rep.addLineComment('Real pdb data')
strucs = bpm.loadPdbStructures([],'PdbData/',extension='ent',prefix='pdb')
geo_mak = dfm.GeometryMaker(strucs,log=0)
geos = ['N:CA','CA:C','C:O','C:N+1','C-1:N','N:N+1','N:CA:C:N+1']
data = geo_mak.calculateGeometry(geos)
cm = wdm.WilliamsDivergenceMaker(data,geos,bins=10,log=1,norm=False,p_resample=True,pval_iters=1000)
rep.addLineComment('Scatters')
rep.changeColNumber(6)
print('Creating scatters')
for i in range(0,len(geos)):
geoA = geos[i]
for j in range(i+1,len(geos)):
geoB = geos[j]
if geoA != geoB:
print('Scatter',geoA,geoB)
df_rand = cm.randomiseData(data[[geoA,geoB]])
div = cm.getCorrelation([geoA, geoB])
tpl = cm.compareToObserved(data, geoA, geoB)
stat, p_value, A, D, B,phist = div.stat,div.p_value,div.histAB,div.diffAB,div.convAB,div.p_hist
maxV = max(np.max(A),np.max(B))
rep.addLineComment(geoA + ' ' + geoB + ' stat=' + str(round(stat,3)) + ' p_value=' + str(round(p_value,3)))
rep.addPlot2d(data, 'scatter', geo_x=geoA, geo_y=geoB, hue=geoA)
rep.addPlot2d(df_rand, 'scatter', geo_x=geoA, geo_y=geoB, hue=geoA)
if len(phist['divergence_shuffled']) > 0:
A_B, B_A = cm.calculateKullbackLeibler(phist['divergence_shuffled'],phist['divergence_resampled'])
#print(A_B,B_A)
print(str(round(A_B, 4)),str(round(B_A, 4)))
title = 'Kullback-Leibler Divergence=[' + str(round(A_B,4)) + ',' +str(round(B_A,4)) + ']'
rep.addPlot1d(phist,'histogram',geo_x='divergence_shuffled',title='',overlay=True,alpha=0.5)
rep.addPlot1d(phist, 'histogram', geo_x='divergence_resampled', title=title,alpha=0.5,palette='mediumseagreen')
rep.addSurface(A,'Original Data',cmin=0,cmax=maxV,palette='Blues')
rep.addSurface(D, 'Difference Data', cmin=-1*maxV, cmax=maxV, palette='RdBu')
rep.addSurface(B, 'Convolved Data', cmin=0, cmax=maxV, palette='Reds')
rep.printReport()
| [
"WilliamsDivergenceMaker.WilliamsDivergenceMaker",
"HtmlReportMaker.HtmlReportMaker",
"numpy.max",
"BioPythonMaker.loadPdbStructures",
"GeometryMaker.GeometryMaker"
] | [((300, 368), 'HtmlReportMaker.HtmlReportMaker', 'hrm.HtmlReportMaker', (['"""WCCC"""', '"""Results/kullback_leibler.html"""'], {'cols': '(6)'}), "('WCCC', 'Results/kullback_leibler.html', cols=6)\n", (319, 368), True, 'import HtmlReportMaker as hrm\n'), ((412, 480), 'BioPythonMaker.loadPdbStructures', 'bpm.loadPdbStructures', (['[]', '"""PdbData/"""'], {'extension': '"""ent"""', 'prefix': '"""pdb"""'}), "([], 'PdbData/', extension='ent', prefix='pdb')\n", (433, 480), True, 'import BioPythonMaker as bpm\n'), ((488, 520), 'GeometryMaker.GeometryMaker', 'dfm.GeometryMaker', (['strucs'], {'log': '(0)'}), '(strucs, log=0)\n', (505, 520), True, 'import GeometryMaker as dfm\n'), ((630, 735), 'WilliamsDivergenceMaker.WilliamsDivergenceMaker', 'wdm.WilliamsDivergenceMaker', (['data', 'geos'], {'bins': '(10)', 'log': '(1)', 'norm': '(False)', 'p_resample': '(True)', 'pval_iters': '(1000)'}), '(data, geos, bins=10, log=1, norm=False,\n p_resample=True, pval_iters=1000)\n', (657, 735), True, 'import WilliamsDivergenceMaker as wdm\n'), ((1276, 1285), 'numpy.max', 'np.max', (['A'], {}), '(A)\n', (1282, 1285), True, 'import numpy as np\n'), ((1286, 1295), 'numpy.max', 'np.max', (['B'], {}), '(B)\n', (1292, 1295), True, 'import numpy as np\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import numpy as np
import paddle
import pgl
import paddle.nn as nn
import pgl.nn as gnn
import pgl.nn.functional as F
import paddle.static as static
class GNNModel(nn.Layer):
def __init__(self, input_size, output_size, num_layers=3):
super(GNNModel, self).__init__()
self.conv_fn = nn.LayerList()
self.conv_fn.append(gnn.GCNConv(input_size, output_size))
for i in range(num_layers - 1):
self.conv_fn.append(gnn.GCNConv(output_size, output_size))
self.pool_fn = gnn.GraphPool("sum")
def forward(self, num_nodes, edges, feature):
graph = pgl.Graph(num_nodes=num_nodes, edges=edges)
for fn in self.conv_fn:
feature = fn(graph, feature)
output = self.pool_fn(graph, feature)
return output
class StaticGraphOpTest(unittest.TestCase):
def test_static_graph(self):
path = './tmp'
dim = 100
# Load DyGraph Model
paddle.disable_static()
num_nodes = 5
edges = [(0, 1), (1, 2), (3, 4)]
nfeat = np.random.randn(num_nodes, dim).astype("float32")
model = GNNModel(dim, 10)
out = model(
paddle.to_tensor(num_nodes),
paddle.to_tensor(edges), paddle.to_tensor(nfeat))
out = out.numpy()
paddle.save(model.state_dict(),
os.path.join(path, "static_gnn.pdparam"))
paddle.enable_static()
# Run Static Fisrt
model2 = GNNModel(dim, 10)
input_num_nodes = static.data(
name='num_nodes', shape=[-1], dtype='int32')
input_edges = static.data(name='edges', shape=[-1, 2], dtype='int32')
input_feature = static.data(
name="feature", shape=[-1, dim], dtype="float32")
output = model2(input_num_nodes, input_edges, input_feature)
place = paddle.CPUPlace()
exe = static.Executor(place)
exe.run(static.default_startup_program())
prog = static.default_main_program()
state_dict = paddle.load(os.path.join(path, "static_gnn.pdparam"))
model2.set_state_dict(state_dict)
feed_dict = {
"num_nodes": num_nodes,
"edges": np.array(
edges, dtype="int32"),
"feature": nfeat.astype("float32"),
}
out2 = exe.run(prog, feed=feed_dict, fetch_list=[output])[0]
eps = np.sum((out2 - out)**2)
self.assertTrue(eps < 1e-5)
import shutil
shutil.rmtree(path)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"paddle.static.data",
"paddle.static.default_main_program",
"numpy.sum",
"paddle.CPUPlace",
"os.path.join",
"paddle.nn.LayerList",
"paddle.enable_static",
"pgl.nn.GCNConv",
"numpy.random.randn",
"pgl.nn.GraphPool",
"numpy.array",
"paddle.disable_static",
"pgl.Graph",
"pa... | [((3168, 3183), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3181, 3183), False, 'import unittest\n'), ((942, 956), 'paddle.nn.LayerList', 'nn.LayerList', ([], {}), '()\n', (954, 956), True, 'import paddle.nn as nn\n'), ((1157, 1177), 'pgl.nn.GraphPool', 'gnn.GraphPool', (['"""sum"""'], {}), "('sum')\n", (1170, 1177), True, 'import pgl.nn as gnn\n'), ((1245, 1288), 'pgl.Graph', 'pgl.Graph', ([], {'num_nodes': 'num_nodes', 'edges': 'edges'}), '(num_nodes=num_nodes, edges=edges)\n', (1254, 1288), False, 'import pgl\n'), ((1589, 1612), 'paddle.disable_static', 'paddle.disable_static', ([], {}), '()\n', (1610, 1612), False, 'import paddle\n'), ((2039, 2061), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (2059, 2061), False, 'import paddle\n'), ((2152, 2208), 'paddle.static.data', 'static.data', ([], {'name': '"""num_nodes"""', 'shape': '[-1]', 'dtype': '"""int32"""'}), "(name='num_nodes', shape=[-1], dtype='int32')\n", (2163, 2208), True, 'import paddle.static as static\n'), ((2244, 2299), 'paddle.static.data', 'static.data', ([], {'name': '"""edges"""', 'shape': '[-1, 2]', 'dtype': '"""int32"""'}), "(name='edges', shape=[-1, 2], dtype='int32')\n", (2255, 2299), True, 'import paddle.static as static\n'), ((2324, 2385), 'paddle.static.data', 'static.data', ([], {'name': '"""feature"""', 'shape': '[-1, dim]', 'dtype': '"""float32"""'}), "(name='feature', shape=[-1, dim], dtype='float32')\n", (2335, 2385), True, 'import paddle.static as static\n'), ((2485, 2502), 'paddle.CPUPlace', 'paddle.CPUPlace', ([], {}), '()\n', (2500, 2502), False, 'import paddle\n'), ((2517, 2539), 'paddle.static.Executor', 'static.Executor', (['place'], {}), '(place)\n', (2532, 2539), True, 'import paddle.static as static\n'), ((2605, 2634), 'paddle.static.default_main_program', 'static.default_main_program', ([], {}), '()\n', (2632, 2634), True, 'import paddle.static as static\n'), ((3024, 3049), 'numpy.sum', 'np.sum', (['((out2 - out) ** 2)'], {}), '((out2 - out) ** 2)\n', (3030, 3049), True, 'import numpy as np\n'), ((3115, 3134), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (3128, 3134), False, 'import shutil\n'), ((985, 1021), 'pgl.nn.GCNConv', 'gnn.GCNConv', (['input_size', 'output_size'], {}), '(input_size, output_size)\n', (996, 1021), True, 'import pgl.nn as gnn\n'), ((1810, 1837), 'paddle.to_tensor', 'paddle.to_tensor', (['num_nodes'], {}), '(num_nodes)\n', (1826, 1837), False, 'import paddle\n'), ((1851, 1874), 'paddle.to_tensor', 'paddle.to_tensor', (['edges'], {}), '(edges)\n', (1867, 1874), False, 'import paddle\n'), ((1876, 1899), 'paddle.to_tensor', 'paddle.to_tensor', (['nfeat'], {}), '(nfeat)\n', (1892, 1899), False, 'import paddle\n'), ((1988, 2028), 'os.path.join', 'os.path.join', (['path', '"""static_gnn.pdparam"""'], {}), "(path, 'static_gnn.pdparam')\n", (2000, 2028), False, 'import os\n'), ((2556, 2588), 'paddle.static.default_startup_program', 'static.default_startup_program', ([], {}), '()\n', (2586, 2588), True, 'import paddle.static as static\n'), ((2669, 2709), 'os.path.join', 'os.path.join', (['path', '"""static_gnn.pdparam"""'], {}), "(path, 'static_gnn.pdparam')\n", (2681, 2709), False, 'import os\n'), ((2833, 2863), 'numpy.array', 'np.array', (['edges'], {'dtype': '"""int32"""'}), "(edges, dtype='int32')\n", (2841, 2863), True, 'import numpy as np\n'), ((1095, 1132), 'pgl.nn.GCNConv', 'gnn.GCNConv', (['output_size', 'output_size'], {}), '(output_size, output_size)\n', (1106, 1132), True, 'import pgl.nn as gnn\n'), ((1692, 1723), 'numpy.random.randn', 'np.random.randn', (['num_nodes', 'dim'], {}), '(num_nodes, dim)\n', (1707, 1723), True, 'import numpy as np\n')] |
#!/usr/bin/python
from astropy.io import fits
import ccdproc
import numpy as np
def merge_fitsfd(hdulist):
data1 = ccdproc.CCDData(hdulist[1].data, unit="adu")
data1.header = hdulist[1].header
data2 = ccdproc.CCDData(hdulist[2].data, unit="adu")
merged = np.concatenate( (data1, np.fliplr(data2) ), axis=1)
# assume we don't have to change any WCS parameters from ext 1
hdu_new = fits.PrimaryHDU( merged )
hdu_new.header = hdulist[0].header
hdulist_new = fits.HDUList( [hdu_new] )
# Use basename to keep from breaking if the names in imagelist are
# full path rather than just a file
return hdulist_new
| [
"numpy.fliplr",
"astropy.io.fits.PrimaryHDU",
"astropy.io.fits.HDUList",
"ccdproc.CCDData"
] | [((120, 164), 'ccdproc.CCDData', 'ccdproc.CCDData', (['hdulist[1].data'], {'unit': '"""adu"""'}), "(hdulist[1].data, unit='adu')\n", (135, 164), False, 'import ccdproc\n'), ((208, 252), 'ccdproc.CCDData', 'ccdproc.CCDData', (['hdulist[2].data'], {'unit': '"""adu"""'}), "(hdulist[2].data, unit='adu')\n", (223, 252), False, 'import ccdproc\n'), ((390, 413), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['merged'], {}), '(merged)\n', (405, 413), False, 'from astropy.io import fits\n'), ((467, 490), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[hdu_new]'], {}), '([hdu_new])\n', (479, 490), False, 'from astropy.io import fits\n'), ((287, 303), 'numpy.fliplr', 'np.fliplr', (['data2'], {}), '(data2)\n', (296, 303), True, 'import numpy as np\n')] |
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import os
import glob
import cv2
import numpy as np
batch_size = 400
num_classes = 35
epochs = 2000
# input image dimensions
img_rows, img_cols = 28, 28
#ALL DATA
images = []
fileList = []
for filename in glob.glob('/home/issd/AI/sources/user-space/Utku/PLATE_CHARS/*.jpg'):
fileList.append(filename)
fileList.sort()
fileList.sort(key=len)
for i in fileList:
#print(i)
img = cv2.imread(i)
img = np.resize(img, (28,28))
images.append(img)
#print(images)
#print(images[0].shape)
images = np.asarray(images)
#print(images.shape)
labels = np.empty((0,35))
word = ""
with open("text.txt", "r") as file:
file.read(1)
for line in file:
for char in line:
if char != " ":
word += char
if char == " ":
word = ""
word = word.strip()
if word != "":
labels = np.append(labels, [word])
x_train = images[:10000]
x_test = images[10000:10500]
y_train = labels[:10000]
y_test = labels[10000:10500]
print("ytrain: ", y_train)
print("ytest: ", y_test)
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print("x_train1 shape", x_train[0].shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print("x_train is: ", x_train)
print("len of x_train is: ", len(x_train))
print("x_train[0] is: ", x_train[0])
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=None)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
model.save_weights('mert_cnn.h5')
with open('mert_cnn_architecture.json', 'w') as f:
f.write(model.to_json())
| [
"keras.optimizers.Adadelta",
"numpy.resize",
"keras.backend.image_data_format",
"numpy.empty",
"numpy.asarray",
"keras.layers.Dropout",
"keras.layers.Flatten",
"cv2.imread",
"numpy.append",
"keras.layers.Dense",
"keras.layers.Conv2D",
"glob.glob",
"keras.models.Sequential",
"keras.layers.M... | [((456, 524), 'glob.glob', 'glob.glob', (['"""/home/issd/AI/sources/user-space/Utku/PLATE_CHARS/*.jpg"""'], {}), "('/home/issd/AI/sources/user-space/Utku/PLATE_CHARS/*.jpg')\n", (465, 524), False, 'import glob\n'), ((780, 798), 'numpy.asarray', 'np.asarray', (['images'], {}), '(images)\n', (790, 798), True, 'import numpy as np\n'), ((834, 851), 'numpy.empty', 'np.empty', (['(0, 35)'], {}), '((0, 35))\n', (842, 851), True, 'import numpy as np\n'), ((2206, 2254), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (2232, 2254), False, 'import keras\n'), ((2264, 2311), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (2290, 2311), False, 'import keras\n'), ((2321, 2333), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2331, 2333), False, 'from keras.models import Sequential\n'), ((648, 661), 'cv2.imread', 'cv2.imread', (['i'], {}), '(i)\n', (658, 661), False, 'import cv2\n'), ((677, 701), 'numpy.resize', 'np.resize', (['img', '(28, 28)'], {}), '(img, (28, 28))\n', (686, 701), True, 'import numpy as np\n'), ((1361, 1382), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (1380, 1382), True, 'from keras import backend as K\n'), ((2344, 2418), 'keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': 'input_shape'}), "(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)\n", (2350, 2418), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((2464, 2501), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (2470, 2501), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((2513, 2543), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2525, 2543), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((2555, 2568), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (2562, 2568), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((2580, 2589), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2587, 2589), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((2601, 2630), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2606, 2630), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((2642, 2654), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2649, 2654), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((2666, 2706), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (2671, 2706), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((2791, 2818), 'keras.optimizers.Adadelta', 'keras.optimizers.Adadelta', ([], {}), '()\n', (2816, 2818), False, 'import keras\n'), ((1158, 1183), 'numpy.append', 'np.append', (['labels', '[word]'], {}), '(labels, [word])\n', (1167, 1183), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 22 04:03:50 2021
@author: kavya
"""
import scipy.misc
from skimage.transform import resize
import matplotlib
from keras.models import load_model
import matplotlib.pyplot as plt
import numpy as np
import gradio as gr
from sklearn import preprocessing
model = load_model("PDP_NN")
def classify(image):
#print(image.shape)
image = image/255.0
image=image.reshape(28,28)
is_all_zero = np.all((image == 0))
if not is_all_zero:
x, y = np.nonzero(image)
# Using the smallest and largest x and y indices of nonzero elements,
# we can find the desired rectangular bounds.
# And don't forget to add 1 to the top bound to avoid the fencepost problem.
image= image[x.min():x.max()+1, y.min():y.max()+1]
result = np.zeros((image.shape[0]+10,image.shape[1]+10))
result[5:image.shape[0]+5,5:image.shape[1]+5] = image
image = resize(result, (28, 28))
matplotlib.image.imsave('outfile.jpg',image)
image = image.reshape(1,28,28,1)
prediction = model.predict(image).tolist()[0]
print(f"prediction : {prediction}")
return {str(i): prediction[i] for i in range(10)}
sketchpad = gr.inputs.Sketchpad()
label = gr.outputs.Label(num_top_classes=3)
interface = gr.Interface(classify, sketchpad, label, live=True, capture_session=True)
interface.launch(share=True) | [
"keras.models.load_model",
"gradio.Interface",
"gradio.outputs.Label",
"numpy.zeros",
"numpy.nonzero",
"matplotlib.image.imsave",
"skimage.transform.resize",
"gradio.inputs.Sketchpad",
"numpy.all"
] | [((335, 355), 'keras.models.load_model', 'load_model', (['"""PDP_NN"""'], {}), "('PDP_NN')\n", (345, 355), False, 'from keras.models import load_model\n'), ((1233, 1254), 'gradio.inputs.Sketchpad', 'gr.inputs.Sketchpad', ([], {}), '()\n', (1252, 1254), True, 'import gradio as gr\n'), ((1263, 1298), 'gradio.outputs.Label', 'gr.outputs.Label', ([], {'num_top_classes': '(3)'}), '(num_top_classes=3)\n', (1279, 1298), True, 'import gradio as gr\n'), ((1311, 1384), 'gradio.Interface', 'gr.Interface', (['classify', 'sketchpad', 'label'], {'live': '(True)', 'capture_session': '(True)'}), '(classify, sketchpad, label, live=True, capture_session=True)\n', (1323, 1384), True, 'import gradio as gr\n'), ((474, 492), 'numpy.all', 'np.all', (['(image == 0)'], {}), '(image == 0)\n', (480, 492), True, 'import numpy as np\n'), ((534, 551), 'numpy.nonzero', 'np.nonzero', (['image'], {}), '(image)\n', (544, 551), True, 'import numpy as np\n'), ((836, 888), 'numpy.zeros', 'np.zeros', (['(image.shape[0] + 10, image.shape[1] + 10)'], {}), '((image.shape[0] + 10, image.shape[1] + 10))\n', (844, 888), True, 'import numpy as np\n'), ((962, 986), 'skimage.transform.resize', 'resize', (['result', '(28, 28)'], {}), '(result, (28, 28))\n', (968, 986), False, 'from skimage.transform import resize\n'), ((995, 1040), 'matplotlib.image.imsave', 'matplotlib.image.imsave', (['"""outfile.jpg"""', 'image'], {}), "('outfile.jpg', image)\n", (1018, 1040), False, 'import matplotlib\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## File name : detector.py
## Created by: <NAME>
## Date : December 2020
## Purpose : Script contains main functions used in FreeClimber package, as well as added functionality
version = '0.4.0'
publication = False
import os
import sys
import time
import ffmpeg
import numpy as np
import pandas as pd
import trackpy as tp
import subprocess as sp
from scipy.stats import linregress
from scipy.signal import find_peaks,peak_prominences
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.lines import Line2D
## Issue with 'SettingWithCopyWarning' in step_3
pd.options.mode.chained_assignment = None # default='warn'
class detector(object):
'''Particle detection platform for identifying the group climbing velocity of a
group of flies (or particles) in a Drosophila negative geotaxis (climbing) assay.
This platform is designed to take videos of varying background homogeneity and
applying a background subtraction step before extracting the x,y,time coordinates of
spots/flies. Climbing velocities are calculated as the most linear portion of a user-defined
subset of frames by vial (vertical divisions of evenly spaced bins from the min/max
X-range.
'''
def __init__(self, video_file, config_file = None, gui = False, variables = None, debug = False, **kwargs):
'''Initializing detector object
----
Inputs:
video_file (str): Path to video file to process
config_file (str): Path to configuration (.cfg) file
gui (bool): GUI-specific functions
variables: None if importing from a file, or list if doing so manually
debug (bool): Prints out each function as it runs.
**kwargs: Keyword arguments that are unspecified but can be passed to various plot functions
----
Returns:
None -- variables are saved to the detector object
'''
self.debug = debug
if self.debug: print('detector.__init__')
self.config_file = config_file
self.video_file = self.check_video(video_file)
## Load variables
if gui:
self.load_for_gui(variables = variables)
elif not gui:
self.load_for_main(config_file = config_file)
self.check_variable_formats()
## Setting a color map
self.vial_color_map = cm.jet
## Create a conversion factor
if self.convert_to_cm_sec: self.conversion_factor = self.pixel_to_cm / self.frame_rate
else: self.conversion_factor = 1
print('')
self.specify_paths_details(video_file)
self.image_stack = self.video_to_array(video_file,loglevel='panic')
return
## Loading functions
def load_for_gui(self,variables):
'''Loads experimental and detections variables to the detector object for GUI
----
Inputs:
variables (list): List of variables found in the configuration file, order is irrelevant
----
Returns:
None -- variables are saved to the detector object'''
if self.debug: print('detector.load_for_gui')
self.config = None
self.path_project = None
## Exit program if no variables are passed
if variables == None:
print('\n\nExiting program. No variable list loaded')
raise SystemExit
## Pass imported variables to the detector object
if self.debug: print('detector.load_for_gui: --------variables--------')
for item in variables:
if self.debug: print('detector.load_for_gui:',item)
if ~item.startswith(('\s','\t','\n')):
try: exec('self.'+item)
except: print('detector.load_for_gui: !! Could not import ( %s )' % item)
return
def load_for_main(self, config_file = None):
'''Loads experimental and detections variables to the detector object for command
line interface.
----
Inputs:
config_file (str): Path to configuration (.cfg) file
----
Returns:
None -- variables are saved to the detector object'''
if self.debug: print('detector.load_for_main')
## Read in lines from configuration (.cfg) file
try:
with open(config_file,'r') as f:
variables = f.readlines()
f.close()
## Filter, format, and import variables to detector object
if self.debug: print('detector.load_for_main: --------variables--------')
variables = [item.rstrip() for item in variables if not item.startswith(('#','\s','\t','\n'))]
for item in variables:
if self.debug: print('detector.load_for_main:',item)
if ~item.startswith(('\s','\t','\n')):
try: exec('self.'+item)
except: print('detector.load_for_main: !! Could not import ( %s )' % item)
return
## Exit program if issue with the configuration file
except:
print('\n\nExiting program. Could not read in file.cfg, but path and suffix are good--likely a formatting issue')
raise SystemExit
return
## Checking video path is valid
def check_video(self,video_file=None):
'''Checking video path is valid, exiting if not.
Input:
video_file (str): Video file path
----
Returns:
video_file (str): Video file path'''
if self.debug: print('detector.check_video...', end='')
## Check if file path is valid, exit if not
if os.path.isfile(video_file):
return video_file
else:
print('\n\nExiting program. Invalid path to video file: ',video_file)
raise SystemExit
## Specifying variables
def specify_paths_details(self,video_file):
'''Takes in the video file and other imported variables and parses them as needed.
----
Inputs:
video_file (str): Video file path
----
Returns:
None -- Passes parsed variables back to detector object
'''
if self.debug: print('detector.specify_paths_details')
## Set file and path names
folder,name = os.path.split(video_file)
self.name = name[:-5]
self.name_nosuffix = '.'.join(video_file.split('.')[:-1])
## Defining final file names and destinations
file_names = ['data','filtered','diagnostic','slope']
file_suffixes = ['.raw.csv','.filtered.csv','.diagnostic.png','.slopes.csv']
for item,jtem in zip(file_names,file_suffixes):
var_name = 'self.path_'+item
file_path = ''.join([self.name_nosuffix,jtem])
if self.debug: print('detector.specify_paths_details: ' + var_name+"='"+file_path+"'")
exec(var_name+"='"+file_path+"'")
## Project folder specific paths
if self.path_project == None: self.path_project = os.path.join(folder,self.name + '.cfg')
## For future release
# self.path_review_diagnostic = self.path_project + '/review_at_R_%s/review.log' %self.review_R
## Extracting file details and naming individual vials
self.file_details = dict(zip(self.naming_convention.split('_'),self.name.split('_')))
self.experiment_details = self.name.split('_')
self.experiment_details[-1] = '.'.join(self.experiment_details[-1].split('.')[:-1])
self.vial_ID = self.experiment_details[:self.vial_id_vars]
## Creating a list of colors for plotting
self.color_list = [self.vial_color_map(i) for i in np.linspace(0,1,self.vials)]
return
## Checking to make sure variables are entered properly...still more to include
def check_variable_formats(self):
'''Checks to make sure at least some of the variables input formatted properly'''
if self.debug: print('detector.check_variable_formats')
## Vial number
if self.vials < 1:
print('!! Issue with vials: now = 1')
self.vials = 1
## Spot diameter must be odd number
if ~self.diameter%2:
print('!! Issue with diameter: was %s, now %s' %(self.diameter,self.diameter+1))
self.diameter += 1
## Frame rate must be greater than 0
if self.frame_rate <= 0:
print('!! Issue with frame_rate: was %s, now 1' %(self.frame_rate))
self.frame_rate = 1
## Background blank cannot be greater than the frame
if self.blank_0 < self.crop_0:
self.blank_0 = self.crop_0
if self.blank_n > self.crop_n:
self.blank_n = self.crop_n
## Window size vs. frames to test
if (self.crop_n - self.crop_0) < self.window:
# print('!! Issue with window size (%s) being greater than frames (%s). Window size set to 80 percent of desired frames (%s)' %(self.window,self.crop_n - self.crop_0,(.8 * (self.crop_n - self.crop_0))))
self.window = (self.crop_n - self.crop_0) * 0.8
## blank vs. crop frames
if self.blank_0 < self.crop_0:
print('!! Issue with blank frames vs. crop frames. Setting blank_0 (%s) = crop_n (%s)' % (self.blank_0,self.crop_0))
self.blank_0 = self.crop_0
if self.blank_n > self.crop_n:
print('!! Issue with blank frames vs. crop frames. Setting blank_n (%s) = crop_n (%s)' % (self.blank_n,self.crop_n))
self.blank_n = self.crop_n
## Check frame is still valid
if self.check_frame < self.crop_0:
# print('!! Issue with check_frame < crop_0 (min. cropped frame). Now, check_frame = crop_0 = %s' %self.check_frame)
self.check_frame = self.crop_0
if self.check_frame > self.crop_n:
# print('!! Issue with check_frame > crop_n (max cropped frame). Now, check_frame = crop_n = %s' %self.check_frame)
self.check_frame = self.crop_n
return
## Video processing functions
def video_to_array(self, file, **kwargs):
'''Converts video into an nd-array using ffmpeg-python module.
----
Inputs:
file (str): Path to video file
kwargs: Can be used with the ffmpeg.output() argument.
----
Returns:
image_stack (nd-array): nd-array of the video
'''
if self.debug: print('detector.video_to_array')
## Extracting video meta-data
try:
try:
probe = ffmpeg.probe(file)
except:
print('!! Could not read %s into FreeClimber. Likely due to unacceptable video file or FFmpeg not installed' % file)
raise SystemExit
video_info = next(x for x in probe['streams'] if x['codec_type'] == 'video')
self.width = int(video_info['width'])
self.height = int(video_info['height'])
except:
print('!! Could not read in video file metadata')
## Converting video to nd-array
try:
out,err = (ffmpeg
.input(file)
.output('pipe:',format='rawvideo', pix_fmt='rgb24',**kwargs)
.run(capture_stdout=True))
self.n_frames = int(len(out)/self.height/self.width/3)
image_stack = np.frombuffer(out, np.uint8).reshape([-1, self.height, self.width, 3])
except:
print('!! Could not read in video file to an array. Error message (if any):', err)
return image_stack
def crop_and_grayscale(self,video_array,
x = 0 ,x_max = None,
y = 0 ,y_max = None,
first_frame = None,
last_frame = None,
grayscale = True):
'''Crops imported video array to region of interest and converts it to grayscale
----
Inputs:
video_array (nd-array): image_stack generated from video_to_array function
x (int): left-most x-position
x_max (int): right-most x-position
y (int): lowest y-position
y_max (int): highest y-position
first_frame (int): first frame to include
last_frame (int): last frame to include
grayscale (bool): True to convert to gray, False to leave in color.
NOTE: Must be in grayscale for FreeClimber, option is
available for functionality beyond FreeClimber
----
Returns:
clean_stack (nd-array): Cropped and grayscaled (if indicated) video as nd-array'''
if self.debug: print('detector.crop_and_grayscale')
## Conditionals for cropping frames and video length
if first_frame == None: first_frame = self.crop_0
if last_frame == None: last_frame = self.crop_n
if y_max == None: y_max = video_array.shape[2]
if x_max == None: x_max = video_array.shape[1]
if self.debug: print('detector.crop_and_grayscale: Cropping from frame %s to %s' % (first_frame,last_frame))
## Setting only frames and ROI to grayscale
if grayscale:
if self.debug: print('detector.crop_and_grayscale: Converting to grayscale & cropping ROI to (%s x %s)' % (x_max-x,y_max-y))
ch_1 = 0.2989 * video_array[first_frame:last_frame,y : y_max,x : x_max,0]
ch_2 = 0.5870 * video_array[first_frame:last_frame,y : y_max,x : x_max,1]
ch_3 = 0.1140 * video_array[first_frame:last_frame,y : y_max,x : x_max,2]
clean_stack = ch_1.astype(float) + ch_2.astype(float) + ch_3.astype(float)
## Only cropping, no grayscaling
else:
clean_stack = video_array[first_frame:last_frame,y : y_max,x : x_max,:]
if self.debug: print('detector.crop_and_grayscale: Final video array dimensions:',clean_stack.shape)
return clean_stack
## Subtract background
def subtract_background(self,video_array=None):
'''Generate a null background image and subtract that from each frame
----
Inputs:
video_array (nd-array): clean_stack generated from crop_and_grayscale
first_frame (int): First frame to consider for background subtraction
last_frame (int): Last frame to consider for background subtraction
----
Returns:
spot_stack (nd-array): Background-subtracted image stack
background (array): Array containing the pixel intensities for each x,y-coordinate'''
if self.debug: print('detector.subtract_background')
## Setting the last frame to the end if None provided
first_frame = self.blank_0
last_frame = self.blank_n
## Generating a null background image as the median pixel intensity across frames
background = np.median(video_array[first_frame:last_frame,:,:].astype(float), axis=0).astype(int)
if self.debug: print('detector.subtract_background: dimensions:', background.shape)
## Subtracting the null background image from each individual frame
spot_stack = np.subtract(video_array,background)
return spot_stack, background
## Plots and views
def view_ROI(self,image = None, border = True, x0 = 0,x1 = None, y0 = 0,y1 = None,
color = 'r', bin_lines = None, **kwargs):
'''Generates image of the first frame w/a rectangle for the region of interest
----
Inputs:
image (array): Slice (single frame) of the image_stack nd-array.
border (bool): True draws a rectangle over the region of interest
x0 (int): Left-most coordinate
x1 (int): Right-most coordinate
y0 (int): Top-most coordinate (should also be the lowest y-value)
y1 (int): Bottom-most coordinate (should also be the highest y-value)
color (str): Color corresponding with rectangle color for region of interest
bin_lines (bool): True if drawing lines between calculated vials
**kwargs: Arguments for plt.imshow
----
Returns:
None -- Generates an image saved in step_3()
'''
if self.debug: print('detector.view_ROI')
## Defaults to first frame of image stack if None specified
if self.debug: print('detector.view_ROI :: Setting frame')
if image == None:
image = self.image_stack[0]
## Plots the slice of nd-array
if self.debug: print('detector.view_ROI :: Plotting image')
plt.imshow(image,cmap=cm.Greys_r, **kwargs)
## Draws a red rectangle over the region of interest
if self.debug: print('detector.view_ROI :: Draw ROI')
if border:
if x1 == None: x1 = self.image_stack[0].shape[1]
if y1 == None: y1 = self.image_stack[0].shape[0]
plt.hlines(y0,x0,x1, color = color, alpha = .7)
plt.hlines(y1,x0,x1, color = color, alpha = .7)
plt.vlines(x0,y0,y1, color = color, alpha = .7)
plt.vlines(x1,y0,y1, color = color, alpha = .7, label='ROI')
## Draws box to denote where outlier trim lines are
if self.debug: print('detector.view_ROI :: Trim outlier lines (if selected)')
if self.trim_outliers:
lc,rc,tc,bc = self.left_crop,self.right_crop, self.top_crop,self.bottom_crop
plt.vlines(x0+lc,y0+bc,y0+tc,color='c',alpha=.7,linewidth=.5)
plt.vlines(x0+rc,y0+bc,y0+tc,color='c',alpha=.7,linewidth=.5)
plt.hlines(y0+bc,x0+lc,x0+rc,color='c',alpha=.7,linewidth=.5)
plt.hlines(y0+tc,x0+lc,x0+rc,color='c',alpha=.7,linewidth=.5,label='Outlier trim')
## Draws lines between vials
if self.debug: print('detector.view_ROI :: Drawing vial/bin lines')
if bin_lines:
for item in self.bin_lines[:-1]:
plt.vlines(self.x + item, y0, y1, color = 'w', alpha = .8, linewidth = 1)
plt.vlines(self.x + self.bin_lines[-1], y0, y1, color = 'w', alpha = .8,
linewidth = 1, label='Vial boundaries')
plt.legend()
plt.tight_layout()
return
def display_images(self, cropped_converted, background, subtracted, frame=0,**kwargs):
'''Generates a three-part figure for manipulated video frames:
1. Cropped, converted, and grayscaled frame
2. Null background (median pixel intensity across all indicated (blank_) frames)
3. Result of subplots 1 - 2 (background subtracted frame)
----
Inputs:
cropped_converted (nd-array): Cropped and converted nd-array
background (array): Null background array
subtracted (nd-array): Background subtracted nd-array
frame (int): Specific frame/slice of cropped_converted and subtracted
**kwargs: Arguments for plt.imshow
----
Returns:
None -- Generates a plot saved in step_3()
'''
if self.debug: print('detector.displaying_images: ',end='')
plt.figure(figsize = (6,8))
## Displaying the test frame image
plt.subplot(311)
if self.debug: print('| Cropped and converted |',end='')
plt.title('Cropped and converted, frame: %s' % str(frame))
plt.imshow(cropped_converted[frame], cmap = cm.Greys_r, **kwargs)
plt.ylabel('Pixels')
## Displaying the background image
plt.subplot(312)
if self.debug: print(' Background image |',end='')
plt.title('Background image')
plt.imshow(background, cmap = cm.Greys_r, **kwargs)
plt.ylabel('Pixels')
## Displaying the background subtracted, test frame image
plt.subplot(313)
if self.debug: print(' Subtracted background')
plt.title('Subtracted background')
plt.imshow(subtracted[frame], cmap = cm.Greys_r, **kwargs)
plt.xlabel('Pixels')
plt.ylabel('Pixels')
plt.tight_layout()
return
def image_metrics(self, spots, image, metric, colorbar=False, **kwargs):
'''Creates a plot with spot metrics placed over the video image
----
Inputs:
spots (DataFrame): DataFrame (df_big) containing the spots and their metrics
image (array): Image background for scatter point plot
metric (str): Spot metric to filter for
colorbar (bool): Include a color bar legend
**kwargs: Keyword arguments to use with plt.scatter
----
Returns:
None'''
if self.debug: print('detector.image_metrics')
## Create plot
plt.title(metric)
plt.imshow(image, cmap = cm.Greys_r)
plt.scatter(spots.x,spots.y, c = spots[metric], cmap = cm.coolwarm, **kwargs)
## Add in colorbar
if colorbar: plt.colorbar()
## Format plot
plt.ylim(self.h,0)
plt.xlim(0,self.w)
plt.tight_layout()
return
def colored_hist(self, spots, metric, bins=40, predict_threshold=False, threshold=None):
'''Creates a colored histogram to go with image_metrics.
----
Inputs:
spots (DataFrame): DataFrame with spot metrics (df_big)
metric (str): Spot metric to evaluate
bins (int): Number of bins to include for histogram. Default = 40
predict_threshold (bool): Will predict a threshold for 'signal' metric
threshold (int): Filtering threshold
----
Returns:
None'''
if self.debug: print('detector.colored_hist')
## Testing threshold input value
try: threshold = int(threshold)
except: pass
## Assembling histogram parameters
set_cm = plt.cm.get_cmap('coolwarm')
n, bin_assignments, patches = plt.hist(spots[metric],bins = bins)
bin_centers = 0.5 * (bin_assignments[:-1] + bin_assignments[1:])
col = bin_centers - min(bin_centers)
col /= max(col)
## Plotting by color
for c, p in zip(col, patches):
plt.setp(p, 'facecolor', set_cm(c))
## Getting height of vertical line
y_max = np.histogram(spots[metric],bins=bins)[0].max()
## Plotting vertical line for eccentricity and mass
if metric == 'ecc': x_pos = [self.ecc_low,self.ecc_high]
if metric == 'mass': x_pos = [self.minmass]
if metric == 'ecc' or metric == 'mass': plt.vlines(x_pos,0,y_max,color = 'gray')
## Estimate auto-threshold
if predict_threshold:
_threshold = self.find_threshold(spots[metric],bins = bins)
plt.vlines(_threshold,0,y_max,color = 'gray',label='Auto')
## Add in user-defined threshold vs. auto
if isinstance(threshold, int) or isinstance(threshold, float):
plt.vlines(threshold,0,y_max,color = 'k', label='User-defined')
if predict_threshold:
plt.legend(frameon=False)
## Adding y-axis labels
plt.ylabel("Counts")
return
def spot_checker(self, spots, metrics=['signal'], image=None, **kwargs):
'''Generates figure containing subplots for image_metrics and colored_hist for
different spot metrics
----
Inputs:
spots (DataFrame): DataFrame with spot metrics (df_big)
metrics (list): List of the metrics to include when generating the plots
image(array): Background image for plots, default = clean_stack[0]
**kwargs: Keyword arguments to use with plt.imshow in image_metrics function
----
Returns:
None'''
if self.debug: print('detector.spot_checker')
## Setting up figure parameters
subplot = int(str(len(metrics)) + str(2) + str(0))
if image==None: image = self.clean_stack[0]
count = 0
plt.figure(figsize=(4+image.shape[1]/150,len(metrics)*2))
## Plotting each of the detector spot results over the image
for i in range(0,len(metrics)):
## Defining spot metric and whether to auto-threshold
col = metrics[i]
if col=='signal': predict = True
else: predict = False
## Drawing histogram, showing distribution of spots by metric
count += 1
plt.subplot(len(metrics),2,count)
plt.title('Histogram for: %s' % col)
plt.xlabel(col)
self.colored_hist(spots,metric=col, bins = 40,predict_threshold=predict)
## Drawing image plot, colored by metric
count += 1
plt.subplot(len(metrics),2,count)
plt.title('Spot overlay: %s' % col)
self.image_metrics(spots,image, metric=col,**kwargs)
plt.ylabel('Pixels')
if col=='signal':
plt.xlabel('Pixels')
plt.tight_layout()
return
def find_spots(self, stack, diameter = 3, quiet=True,**kwargs):
'''Locates the x,y-coordinates for all spots across frames
----
Inputs:
stack (nd-array): cropped, grayscaled, and background subtracted nd-array
diameter (int): Estimated diameter of a spot, odds only
quiet (bool): True silences the output
**kwargs: Keyword arguments to use with trackpy.batch
----
Returns:
spots (DataFrame): DataFrame containing all the spots from the TrackPy output
'''
if self.debug: print('detector.find_spots')
## Check diameter
diameter = int(diameter)
if ~diameter % 2: diameter = diameter + 1
## Option to silence output
if quiet: tp.quiet()
## Detect spots
spots = tp.batch(stack,diameter = diameter, **kwargs)
## Sorting DataFrame
spots = spots[spots.raw_mass > 0].sort_values(by='frame')
return spots
def particle_finder(self, invert=True, **kwargs):
'''Finds spots and formats the resulting DataFrame. Output can be used with TrackPy.
----
Inputs:
invert (bool): True if light background, False if dark background
**kwargs: Keyword arguments to use with trackpy.batch
----
Returns:
df (DataFrame): DataFrame containing spots and their metrics, becomes df_big'''
if self.debug: print('detector.particle_finder')
## Main spot detection function
df = self.find_spots(stack = self.spot_stack,
quiet=True,invert=True,
diameter=self.diameter,
minmass=self.minmass,
maxsize=self.maxsize)
## Catch if there are no spots detected
if df.shape[0] == 0:
print('!! Skipping video: No spots detected. Try modifying diameter, maxsize, or minmass parameters')
raise SystemExit
## Rounding detector outputs
df['x'] = df.x.round(2)
df['y'] = df.y.round(2)
df['t'] = [round(item/self.frame_rate,3) for item in df.frame]
df['mass'] = df['mass'].astype(int)
df['size'] = df.size.round(3)
df['ecc'] = df.ecc.round(3)
df['signal'] = df.signal.round(2)
df['raw_mass'] = df['mass'].astype(int)
df['ep'] = df.ep.round(1)
df['True_particle'] = np.repeat(True,df.shape[0])
return df
def find_threshold(self,x_array,bins=40):
'''Auto-generates a signal threshold by finding the local minimum between two local
maxima, or takes the average between 0 and the global maximum
----
Inputs:
x_array (list): list of all metric (signal) points to find a threshold for
bins (int): Number of bins to search across, default = 40.
----
Returns:
threshold (int): Auto-generated threshold
'''
if self.debug: print('detector.find_threshold')
## Looking at the distribution of spot metrics as a histogram
x_array = np.histogram(x_array,bins = bins)[0]
## Peak finding with SciPy.signal module
peaks = find_peaks(x_array)[0]
prominences = peak_prominences(x_array,peaks)
threshold = find_peaks(x_array, prominence=np.max(prominences))[0][0]
if self.debug: print(' Threshold =',threshold)
return threshold
def invert_y(self,spots):
'''Inverts spots along the y-axis. Important for converting spots indexed for an image to a plot.
----
Inputs:
spots (DataFrame): DataFrame containing a 'y' column
----
Returns:
inv_y (list): In-place list of the y-coordinates inverted'''
if self.debug: print('detector.invert_y')
## Inverts y-axis
inv_y = abs(spots.y - spots.y.max())
return inv_y
def get_slopes(self):
'''Creates a dictionary with keys for vials and values for the DataFrame sliced by
vial. It will also calculate the local linear regression for each vial and
returns the DataFrame containing all the slopes and linear regression statistics
for that vial.
----
Inputs (Imported from the detector object):
df (DataFrame) : DataFrame sliced by vial
vials (int) : Number of vials in video
window (int) : Window size to calculate local linear regression
vial_ID (str) : Vial-specific ID, taken from the first 'vial_id_vars' of naming convention
----
Returns (Exported tp the detector object):
result (dict) : DataFrame containing the local linear regression statistics
by vial
vial (dict) : Dictionary of DataFrames sliced by vial, keys are vials and values
are DataFrames'''
if self.debug: print('detector.get_slopes')
## Create empty dictionaries
self.vial,self.result = dict(),dict()
## Slicing DataFrame (df.filtered) by vial and assigning slices to dictionary keys (vials)
for i in range(1,self.vials + 2):
try:
## Set dict key to '1' if only 1 vial, otherwise set dict key to vial number
if self.vials == 1 or i == self.vials + 1: self.vial[i] = self.df_filtered
else: self.vial[i] = self.df_filtered[self.df_filtered.vial==i]
## Setting the result to the result from the local linear regression
self.result[i] = self.local_linear_regression(self.vial[i]).iloc[0].tolist()
## Add vial_ID to the result
if i == self.vials + 1: v = 'all'
else: v = i
## Name vial_ID
vial_ID = ['_'.join(self.vial_ID) + '_'+ str(v)]
self.result[i] = vial_ID + self.result[i]
## Rounding results so they are more manageable and require less space.
self.result[i][1:3] = [int(item) for item in self.result[i][1:3]]
self.result[i][3:] = [round(item,4) for item in self.result[i][3:]]
except:
print('Warning:: Could not process vial %s' % i)
return
def get_trim_lines(self,df,edge = 'top',sensitivity=1):
'''Calculates spacial thresholds for cropping outlier points at the edge of window
----
Inputs:
df (DataFrame): DataFrame of all points to consider
edge (str) {'top'|'bottom','left','right'}: Which edge to trim
sensitivity (float): How sensitive to make the thresholding
----
Returns:
crop (float): Cutoff threshold'''
if self.debug: print('detector.get_trim_lines ::')
for _edge in edge:
_list,diff_list = [],[]
# Define axis
if edge == 'top' or edge == 'bottom': axis = 'y'
if edge == 'left' or edge == 'right': axis = 'x'
# Define quantile starting value
if edge == 'left' or edge == 'bottom': quant = 0
if edge == 'right' or edge == 'top': quant = 0.96
## Find quantile boundaries
for i in range(5):
val = df[axis].quantile(quant + i * 0.01)
_list.append(val)
## Get difference between quantile boundaries
for i in range(4):
diff_list.append(abs(_list[i]-_list[i+1]))
## Calculate boundary, cutoff, and thresholds
## --boundary as most extreme value
## --cutoff as median 0-4 or 95-99 percentiles x scalar (sensitivity)
## --threshold as difference between boundary and cutoff
## --crop as value to crop points at
if 'right' in edge or 'top' in edge:
boundary = _list[-1] # Max value
cutoff = np.median(diff_list[:-1]) * sensitivity
threshold = boundary - cutoff
if cutoff < threshold: crop = boundary - cutoff
else: crop = cutoff
if self.debug: print('detector.get_trim_lines ::',edge,'@',crop)
return crop
if 'left' in edge or 'bottom' in edge:
boundary = _list[0] # Min value
cutoff = np.median(diff_list[1:]) * sensitivity
threshold = boundary + cutoff
if cutoff > threshold: crop = boundary + cutoff
else: crop = boundary
if self.debug: print('detector.get_trim_lines ::',edge,'@',crop,'(no crop)')
return crop
def bin_vials(self, df, vials, percentage=1,top=False, bin_lines=None):
'''Bin spots into vials. Function takes into account all points along the x-axis,
and divides them into specified number of bins based on the min and max
points in the array.
----
Inputs:
df (DataFrame):
vials (int): Number of vials in video
Returns:
bin_lines (list): Binning intervals along the x-axis
spot_assignments (pd.Series): '''
if self.debug: print('detector.bin_vials')
## Bin vials, conditional for vial quantity
if vials == 1:
if type(bin_lines) == 'list': bin_lines = bin_lines
else: bin_lines = [df.x.min(),df.x.max()]
spot_assignments = np.repeat(1,df.shape[0])
else: ## More than 1 vial
if type(bin_lines) == 'list': bin_lines = bin_lines
else: bin_lines = pd.cut(df.x,vials,include_lowest=True,retbins=True)[1]
## Assign spots to vials
_labels = range(1,vials+1)
spot_assignments = pd.cut(df.x, bins=bin_lines, labels=_labels)
spot_assignments = pd.Series(spot_assignments).astype('int')
## Checks to make sure all vials have at least one spot. Important if a middle vial is absent or vials binned incorrectly.
counts = np.unique(spot_assignments, return_counts = True)
for v,c in zip(counts[0], counts[1]):
if c == 0:
print('Warning: vial',v,'is empty and cannot be evaluated')
return bin_lines, spot_assignments
def local_linear_regression(self, df, method = 'max_r'):
'''Performs a local linear regression, using a user-defined sliding window (self.window)
----
Inputs:
df (DataFrame): DataFrame containing all formatted points (df_filtered). 'y' needs to be converted from image indexing to plot indexing
method (str): Two options: greatest regression coefficient (max_r) or lowest error (min_err)
----
Returns:
result (DataFrame): Single-row slice of a DataFrame corresponding with the results from the local linear regression
'''
if self.debug: print('detector.local_linear_regression')
## Defining empty variables
result_list, result = [],pd.DataFrame()
llr_columns = ['first_frame','last_frame','slope','intercept','r','pval','err']#,'count_llr','count_all']
_count_all = np.median(df.groupby('frame').frame.count())
## Iterating through the window
frames = (self.crop_n - self.crop_0) - self.window
for i in range(frames):
## Defining search parameters for each iteration
start, stop = int(i),int(i+self.window)
df_window = df[(df.frame >= start) & (df.frame <= stop)]
## Testing if there are enough frames in the slice
if df_window.groupby('frame').y.count().min() == 0:
print('Issue with number of frames with flies detected vs. window size')
print(i, i + self.window, len(df_window.frame.unique()))
continue
## Performing local linear regression
try:
## Grouping points by frame
_frame = df_window.groupby('frame').frame.mean()
_pos = df_window.groupby('frame').y.mean()
_count_llr = np.median(df_window.groupby('frame').frame.count())
## Performing linear regression on subset and formatting output to list
_result = linregress(_frame,_pos)
_result = [start,stop] + np.hstack(_result).tolist() #+ [_count_llr,_count_all]
## If slope is not significantly different from 0, then set slope = 0
if _result[-2] >= 0.05: _result[2] = 0
## Have row of NaN if unable to process
except: _result = [start,stop] + [np.nan,np.nan,np.nan,np.nan]
# except: _result = [start,stop] + [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan]
## Add results list to a list of lists
result_list.append(_result)
## Assembles the list of lists into a DataFrame
result = pd.DataFrame(data=result_list,columns=llr_columns)
## Filtering method
if method == 'max_r': result = result[result.r == result.r.max()]
elif method == 'min_err': result = result[result.err == result.err.min()]
else: print("Unrecognized method, chose either 'max_r' to select the window with the greatest R or 'min_err' to select the window with the lowest error")
return result
def step_1(self, gui = False, grayscale = True):
'''Crops and formats the video, previously loaded during detector initialization.
----
Inputs:
gui (bool): True creates plots for detector optimization
grayscale (bool): True converts video array to grayscale
----
Returns:
None'''
print('-- [ Step 1 ] Cleaning and format image stack')
x,y = self.x,self.y
x_max, y_max = int(x + self.w),int(y + self.h)
stack = self.image_stack
if self.debug: print('detector.step_1 cropped and grayscale: grayscale image:', grayscale)
self.clean_stack = self.crop_and_grayscale(stack,
y=y, y_max=y_max,
x=x, x_max=x_max,
first_frame=self.crop_0,
last_frame=self.crop_n,
grayscale=grayscale)
## Confirm frame ranges
self.check_variable_formats()
if self.blank_0 < self.crop_0:
self.blank_0 = self.crop_0
if self.blank_n > self.crop_n:
self.blank_n = self.crop_n
if grayscale:
if self.debug: print('detector.step_1 cropped and grayscale: grayscale image')
self.clean_stack = self.crop_and_grayscale(stack,
y=y, y_max=y_max,
x=x, x_max=x_max,
first_frame=self.crop_0, last_frame=self.crop_n)
else:
if self.debug: print('detector.step_1 cropped and grayscale: no color image')
self.clean_stack = self.crop_and_grayscale(stack,
y=y, y_max=y_max,
x=x, x_max=x_max,
first_frame=self.crop_0, last_frame=self.crop_n, grayscale=False)
if self.debug: print('detector.step_1 cropped and grayscale dimensions: ', self.clean_stack.shape)
## Subtracts background to generate null background image and spot stack
self.spot_stack,self.background = self.subtract_background(video_array=self.clean_stack)
if self.debug: print('detector.step_1 spot_stack and null background created')
return
def step_2(self):
'''Performs spot detection and manipulates the resulting DataFrames'''
print('-- [ Step 2 ] Identifying spots')
## Particle detection step
self.df_big = self.particle_finder(minmass=self.minmass,diameter=self.diameter,
maxsize=self.maxsize, invert=True)
if self.debug: print(' Identified %s spots' % self.df_big.shape[0])
return
def step_3(self, gui = False):
'''Visualizes spot metrics
----
Inputs:
gui (bool): True creates plots for detector optimization
----
Returns:
None
'''
print('-- [ Step 3 ] Visualize spot metrics ::',gui)
if gui:
## Visualizes spot metrics on plot with accompanying color-coded histogram
self.spot_checker(self.df_big,metrics=['ecc','mass','signal'], alpha=.1)
plot_spot_check = self.name_nosuffix + '.spot_check.png'
plt.savefig(plot_spot_check,dpi=200)
print(' --> Saved:',plot_spot_check.split('/')[-1])
plt.close()
## Creating image plot with rectangle superimposed over first frame
plt.figure()
self.display_images(self.clean_stack,self.background,self.spot_stack,frame=20)
plt.tight_layout()
plot_name = self.name_nosuffix + '.processed.png'
plt.savefig(plot_name, dpi=100)
plt.close()
print(' --> Saved:',plot_name.split('/')[-1])
return
def step_4(self):
'''Filters and processes data detected points'''
## Assigning spots a True/False status based on ecc/eccentricity (circularity)
def ecc_filter(x,low=0,high=1):
'''Simple function for making a vector True/False depending on an upper and lower bound
----
Inputs:
x (numeric): value to perform operation on
low (numeric): Lower bound
high (numeric): Upper bound
Returns:
(bool): True/False'''
if x >= low and x <= high: return True
else: return False
print('-- [ Step 4a ] - Setting spot threshold')
## Auto-detecting threshold
if self.threshold == 'auto': self.threshold = self.find_threshold(self.df_big.signal)
print('-- [ Step 4b ] - Filtering by signal threshold')
## Assigning spots a True/False status based on signal threshold
self.df_big['True_particle'] = [x >= self.threshold for x in self.df_big.signal]
t_or_f = np.unique(self.df_big.True_particle, return_counts=True)
if self.debug: print(' True (%s) and False (%s) spots' % (t_or_f[0],t_or_f[1]))
print('-- [ Step 4c ] - Filtering by eccentricity/circularity')
self.df_big.loc[self.df_big.True_particle == True,'True_particle'] = self.df_big[self.df_big.True_particle==True].ecc.map(lambda x: ecc_filter(x,low=self.ecc_low,high=self.ecc_high))
t_or_f = np.unique(self.df_big.True_particle, return_counts=True)
if self.debug: print(' True (%s) and False (%s) spots'%(t_or_f[0],t_or_f[1]))
## Checking to confirm DataFrame is not empty after filtering
if self.df_big[self.df_big.True_particle].shape[0] == 0:
print('\n\n!! No spots post-filtering, check detector and background subtraction settings for proper optimization')
raise SystemExit
## Pruning errant points on periphery if outliers
print('-- [ Step 4d ] - Trimming outliers (if indicated)')
if self.trim_outliers:
self.left_crop = self.get_trim_lines(self.df_big,edge='left',sensitivity = self.outlier_LR)
self.right_crop = self.get_trim_lines(self.df_big,edge='right',sensitivity = self.outlier_LR)
self.top_crop = self.get_trim_lines(self.df_big,edge='top',sensitivity = self.outlier_TB)
self.bottom_crop = self.get_trim_lines(self.df_big,edge='bottom',sensitivity = self.outlier_TB)
self.df_big = self.df_big[(self.df_big.x >= self.left_crop) & (self.df_big.x <= self.right_crop) &
(self.df_big.y <= self.top_crop) & (self.df_big.y >= self.bottom_crop)]
## Assigning spots to vials, 0 if False AND outside of the True point range
print('-- [ Step 4e ] - Assigning spots to vials')
self.bin_lines, self.df_big.loc[self.df_big['True_particle'],'vial'] = self.bin_vials(self.df_big[self.df_big.True_particle],vials = self.vials)
########################################
## Beginning of publication insert
if publication:
df=self.df_big
self.bin_lines = self.bin_vials(df[df.y > 120], vials= self.vials)[0]
df['vial'] = np.repeat(0,df.shape[0])
vial_assignments = self.bin_vials(df, vials = self.vials, bin_lines = self.bin_lines)[1]
df.loc[(df.x >= self.bin_lines[0]) & (df.x <= self.bin_lines[-1]),'vial'] = vial_assignments
self.df_big = df
## End of publication insert
########################################
self.df_big.loc[self.df_big['True_particle']==False,'vial'] = 0
print('-- [ Step 4f ] - Saving raw data file')
## Saving the TrackPy results, plus filter and vial notations
self.df_big.to_csv(self.path_data, index=None)
print(' --> Saved:',self.path_data.split('/')[-1])
return
def step_5(self):
'''Calculates local linear regressions'''
print('-- [ step 5 ] Setting up DataFrames for local linear regression')
## Filtering spots and pruning unnecessary columns
self.df_filtered = self.df_big[(self.df_big.True_particle) & (self.df_big.vial != 0)]
if self.debug: print('self.df_filtered.shape:',self.df_filtered.shape)
self.df_filtered = self.df_filtered.drop(['ecc','signal','ep','raw_mass','mass','size','True_particle'],axis=1)
self.df_filtered = self.df_filtered.sort_values(by=['vial','frame','y','x'])
## Adding experimental details to DataFrame
self.specify_paths_details(self.video_file)
## Filling in experimental details to DataFrame
for item in self.file_details.keys():
self.df_filtered[item] = np.repeat(self.file_details[item],self.df_filtered.shape[0])
## Invert y-axis -- images indexed upper left to lower right but converting because plots got left left to upper right
self.df_filtered['y'] = self.invert_y(self.df_filtered)
self.df_filtered['y'] = self.df_filtered.y.round(2)
## Convert vial assignments from float to int
self.df_filtered['vial'] = self.df_filtered['vial'].astype('int')
## Save the filtered DataFrame
path_filtered = self.name_nosuffix+'.filtered.csv'
self.df_filtered.to_csv(self.path_filtered, index=False)
print(' --> Saved:',self.path_filtered.split('/')[-1])
return
def step_6(self,gui=False):
'''Creating diagnostic plots to visualize spots at beginning & end of most linear
section, throughout the video, and a vertical velocity plot for each vial.
----
Inputs:
gui (bool): True creates plots for detector optimization
----
Returns:
None'''
print('-- [ step 6a ] Visualize spot metrics ::',gui)
## Check window size is not greater than video length
video_length = self.crop_n - self.crop_0
if self.window > video_length:
print('!! Issue with window size > video length: was %s, now %s' % (self.window, video_length-1))
self.window = video_length - 1
if gui:
## Visualizes the region of interest
plt.figure()
self.view_ROI(border = True,
x0 = self.x, x1 = self.x + self.w,
y0 = self.y, y1 = self.y + self.h,
bin_lines = True)
plot_roi = self.name_nosuffix + '.ROI.png'
plt.savefig(plot_roi,dpi=100)
print(' --> Saved:',plot_roi.split('/')[-1])
plt.close()
print('-- [ step 6b ] Creating diagnostic plot file')
## Set up plots
plt.figure(figsize=(10,8))
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
## Finding the frames that flank the most linear portion
## of the y vs. t curve for all points, not just by vials
if self.debug: print('-- [ step 6b ] Plotting data: Re-running local linear regression on all')
_result = self.local_linear_regression(self.df_filtered)
begin = _result.iloc[0].first_frame.astype(int)
end = _result.iloc[0].last_frame.astype(int)
## For future release
# min_R = _result.iloc[0].r_value ##
## Need only True spots, but not inverted -- df_big
spots = self.df_big[self.df_big.True_particle]
## Creating the diagnostic plot
print('-- [ step 6b1] Plotting image plots with overlaying points')
if self.debug: print("-- [ step 6b1] Plotting data: Plot 1 - Frame %s" % begin)
self.image_plot(df = spots,frame = begin, ax=ax1)
if self.debug: print("-- [ step 6b1] Plotting data: Plot 2 - Frame %s" % end)
self.image_plot(df = spots, ax=ax2, frame = int(str(end)))
if self.debug: print("-- [ step 6b1] Plotting data: Plot 3 - Frame ALL")
self.image_plot(df = spots,ax=ax4, frame = None)
print('-- [ step 6b2] Performing local linear regression')
self.get_slopes()
print('-- [ step 6b3] Plotting local linear regression results')
self.loclin_plot(ax=ax3)
## Saving diagnostic plot
fig.tight_layout()
plt.savefig(self.path_diagnostic,dpi=300, transparent = True)
# plt.savefig(self.path_project + '/diagnostic_plots/' + self.name + '.diagnostic.png',dpi=100, transparent = True)
## Future release
# if min_R < self.review_R:
# plt.savefig(self.path_review_diagnostic,dpi=100, transparent = True)
plt.close()
plt.close()
print(' --> Saved:',self.path_diagnostic.split('/')[-1])
return
def step_7(self):
'''Writing the video's slope file'''
print('-- [ step 7 ] Setting up slopes file')
slope_columns = ['vial_ID','first_frame','last_frame','slope','intercept','r_value','p_value','std_err']#,'count_llr','count_all']
## Converting dictionary of local linear regressions into a DataFrame
self.df_slopes = pd.DataFrame.from_dict(self.result,orient='index',columns = slope_columns)
## Applying conversion factor if indicated, if not it will just be '1'
self.df_slopes['slope'] = self.df_slopes.slope.transform(lambda x: x * (self.conversion_factor)).round(4)
## Adding in experimental details from naming convention into the slopes DataFrame
for item in self.file_details.keys():
self.df_slopes[item] = np.repeat(self.file_details[item],self.df_slopes.shape[0])
## Specifying column names
slope_columns = [item for item in self.file_details.keys()] + slope_columns
self.df_slopes = self.df_slopes[slope_columns]
## Saving slope file
self.df_slopes.to_csv(self.path_slope,index=False)
print(' --> Saved: %s \n' % self.path_slope.split('/')[-1])
plt.close('all')
print(self.df_slopes[['vial_ID','slope','r_value']])
print('\n')
return
def image_plot(self,df,frame=None,ax=None,ylim=[0,1000]):
'''Image subplot for the diagnostic plot
----
Inputs:
df (DataFrame): DataFrame containing all spots
frame (int): Frame to slice df
ax (int): plot coordinate
ylim (2-item list): y-limits
----
Returns:
ax (object): matplotlib object containing plot'''
if self.debug: print('detector.image_plot')
## Get frame number
try: frame = int(frame)
except: frame = None
## Assign plotting parameters depending on which frame(s)
if type(frame) == int and frame in df.frame.unique():
df = df[(df.frame == frame)]
alpha = .25
title = "Frame: %s" % frame
ax.set_title(title)
elif frame == None:
frame = 0
alpha = 0.01
title = 'All x,y-points throughout video'
ax.set_title(title)
elif type(frame) == int and not frame in df.frame.unique():
print('Error: input frame is not accounted for in current DataFrame')
else:
print("Chose a frame value in integer form, or 'None'")
## Issue with plotting if last frame in stack
if frame == self.n_frames: image = self.clean_stack[frame-1]
else: image = self.clean_stack[frame]
## Plotting image
ax.imshow(image,cmap=cm.Greys_r,origin='upper')
ax.set_ylim(self.h,0)
ax.set_xlim(0,self.w)
## Plotting vertical bin lines
ax.vlines(self.bin_lines,0,image.shape[0],alpha = .3)
## Coloring spots by vial
df = df.sort_values(by='vial')
df = df[df.vial != 0]
if self.vials >= 1:
ax.scatter(df.x, df.y,
s = 30,
alpha = alpha,
c = df['vial'],
cmap=self.vial_color_map)
## Getting rid of axis labels
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
return ax
def loclin_plot(self,ax = None):
'''Local linear regression plot: mean y-position vs. frame or time. Colored by vial
and bolded for the most linear section
----
Inputs:
ax (int): plot coordinate
----
Returns:
None'''
if self.debug: print('detector.loclin_plot')
def two_plot(df,vial,label,first,last,ax=None):
'''Adds the bolded flair to the local linear regression plot'''
if self.debug: print('detector.two_plot')
## All points
x = df.groupby('frame').frame.mean()
y = df.groupby('frame').y.mean()
## Convert to cm / sec
if self.convert_to_cm_sec:
x = x / self.frame_rate
y = y / self.pixel_to_cm
ax.plot(x,y,
alpha = .35,
color = self.color_list[vial-1],
label='')
## Only points in the most linear segment
df = df[(df.frame >= first) & (df.frame <= last)]
x = df.groupby('frame').frame.mean()
y = df.groupby('frame').y.mean()
## Convert to cm / sec
if self.convert_to_cm_sec:
x = x / self.frame_rate
y = y / self.pixel_to_cm
ax.plot(x,y,
color = self.color_list[vial-1],
label = label)
return
## Plotting multiple vials' data
if len(self.result) > 1:
for V in range(1,self.vials+1):
l = 'Vial '+str(V)
c = self.color_list[V-1]
_details = self.result[V]
frames = _details[1],_details[2]
two_plot(self.vial[V],
vial = V,
label = l,
first = _details[1],
last = _details[2],
ax=ax)
## Add on labels and legends
ax.legend(loc=2, frameon=False, fontsize='x-small')
label_y,label_x = 'pixels','Frames'
if self.convert_to_cm_sec:
label_x = 'Seconds'
label_y = 'cm'
title = 'Cohort climbing kinematics'
label_y = 'Mean y-position (%s)' % label_y
ax.set_ylim(0,ax.get_ylim()[1])
ax.set(title=title,xlabel = label_x,ylabel=label_y)
if ax == None: return
else: return ax
## Parameter testing is only used in the GUI
def parameter_testing(self, variables, axes):
'''Parameter testing in the GUI and done separately to account for plots with wx'''
if self.debug: print('detector.parameter_testing')
## Running through the first few steps
self.load_for_gui(variables)
## Load in video
self.step_1(gui=True) # Crop and convert video
## Detect spots
self.step_2()
## First optimization plot
self.step_3(gui=True)
## Filters DataFrame of detected spots
self.step_4()
## Executing final steps
self.step_5()
self.step_6(gui=True)
self.step_7()
#### Working through the GUI plots
## Setting plot (upper left) for background image
if self.debug: print('detector.parameter_testing: Subplot 0: Background image')
axes[0].set_title("Background Image")
axes[0].imshow(self.background,cmap=cm.Greys_r)
axes[0].set_xlim(0,self.w)
axes[0].set_ylim(self.h,0)
axes[0].scatter([0,self.w],[0,self.h],alpha=0,marker='.')
## Slice df_big into true vs. false spots
if self.debug: print('detector.parameter_testing: Slicing DataFrames')
spots_false = self.df_big[~self.df_big['True_particle']]
spots_true = self.df_big[self.df_big['True_particle']]
## Binning and coloring spots
# bin_lines,spots_true['vial'] = self.bin_vials(spots_true,vials = self.vials)
# spots_true = spots_true[(spots_true.x >= self.bin_lines.min()) & (spots_true.x <= self.bin_lines.max())]
spots_true['vial'] = np.repeat(0,spots_true.shape[0])
vial_assignments = self.bin_vials(spots_true, vials = self.vials, bin_lines = self.bin_lines)[1]
spots_true.loc[(spots_true.x >= self.bin_lines[0]) & (spots_true.x <= self.bin_lines[-1]),'vial'] = vial_assignments
spots_true.loc[:,'color'] = spots_true.vial.map(dict(zip(range(1,self.vials+1), self.color_list)))
bins=40
## Setting plots for scatterplot overlay on a selected frame
if self.debug: print('detector.parameter_testing: Subplot 1: Test frame')
axes[1].set_title('Frame: '+str(self.check_frame))
axes[1].imshow(self.clean_stack[self.check_frame], cmap = cm.Greys_r)
axes[1].scatter(spots_false[(spots_false.frame==self.check_frame)].x,
spots_false[(spots_false.frame==self.check_frame)].y,
color = 'b',marker ='+',alpha = .5)
a = axes[1].scatter(spots_true[spots_true.frame==self.check_frame].x,
spots_true[spots_true.frame==self.check_frame].y,
c = spots_true[spots_true.frame==self.check_frame].vial,
cmap = self.vial_color_map,
marker ='o',alpha = .8)
a.set_facecolor('none')
axes[1].vlines(self.bin_lines,0,self.df_big.y.max(),color='w')
axes[1].set_xlim(0,self.w)
axes[1].set_ylim(self.h,0)
##########
## Fly counts
# axes[5].plot(spots_true.groupby('frame').frame.mean(),
# spots_true.groupby('frame').frame.count(),
# label = 'Fly count', color = 'g',alpha = .5)
#
# axes[5].hlines(np.median(spots_true.groupby('frame').frame.count()),
# self.df_big.frame.min(),self.df_big.frame.max(),
# linestyle = '--',alpha = .5,
# color = 'gray', label = 'Median no. flies')
# axes[5].set(title = 'Flies per frame',
# ylabel='Flies detected',
# xlabel='Frame')
# axes[5].legend(frameon=False, fontsize = 'small')
##########
df = self.df_filtered.sort_values(by='frame')
for V in range(1,self.vials + 1):
color = self.color_list[V-1]
_df = df[df.vial == V]
## Local linear regression
begin = self.local_linear_regression(_df).iloc[0].first_frame.astype(int)
end = begin + self.window
## Plotting all points
axes[5].plot(_df.groupby('frame').frame.unique(),
_df.groupby('frame').y.count(),alpha = .3, color = color,label='')
## Plotting most linear points
_df = _df[(_df.frame >= begin) & (_df.frame <= end)]
axes[5].plot(_df.groupby('frame').frame.unique(),
_df.groupby('frame').frame.count() ,color = color, alpha = .5)
axes[5].hlines(np.median(_df.groupby('frame').frame.count()),
df.frame.min(),df.frame.max(),
linestyle = '--',alpha = .7,
color = color)
# Deciding number of columns for legend
if self.vials > 10: ncol = 3
elif self.vials > 5: ncol = 2
else: ncol=1
## Setting labels
label_y,label_x = '(pixels)','Frames'
if self.convert_to_cm_sec:
label_x,label_y = 'Seconds','(cm)'
labels = ['Flies detected per frame','Flies detected','Frame']
axes[5].set(title = labels[0], ylabel=labels[1],xlabel=labels[2])
axes[5].set_ylim(ymin = 0,ymax = np.max(_df.groupby('frame').frame.count())*1.2)
# axes[5].legend(frameon=False, fontsize='x-small', ncol=ncol)
custom_lines = [Line2D([0], [0], color='k', linestyle = '--', alpha = .9),
Line2D([0], [0], color='k', linestyle = '-', alpha = .5)]
custom_labels = ['Median', 'All frames']
axes[5].legend(custom_lines, custom_labels,frameon=False, fontsize='x-small', ncol=ncol)
#############
## Mass histogram
axes[3].set_title('Mass Distribution')
axes[3].hist(self.df_big.mass,bins = bins)
y_max = np.histogram(self.df_big.mass,bins=bins)[0].max()
axes[3].vlines(self.minmass,0,y_max)
## Signal histogram
axes[4].set_title('Signal Distribution')
axes[4].hist(self.df_big.signal,bins = bins)
y_max = np.histogram(self.df_big.signal,bins=bins)[0].max()
axes[4].vlines(self.threshold,0,y_max)
## Calculating local linear regression
# self.step_5()
## Setting plots for local linear regression
df = self.df_filtered.sort_values(by='frame')
## Converting to cm per sec if specified
convert_x,convert_y = 1,1
if self.convert_to_cm_sec:
convert_x,convert_y = self.frame_rate,self.pixel_to_cm
## LocLin plot for each vial
for V in range(1,self.vials + 1):
label = 'Vial '+str(V)
color = self.color_list[V-1]
_df = df[df.vial == V]
## Local linear regression
begin = self.local_linear_regression(_df).iloc[0].first_frame.astype(int)
end = begin + self.window
## Plotting all points
axes[2].plot(_df.groupby('frame').frame.mean() / convert_x,
_df.groupby('frame').y.mean() / convert_y,alpha = .35, color = color,label='')
## Plotting most linear points
_df = _df[(_df.frame >= begin) & (_df.frame <= end)]
axes[2].plot(_df.groupby('frame').frame.mean() / convert_x,
_df.groupby('frame').y.mean() / convert_y,color = color, label = label)
# Deciding number of columns for legend
if self.vials > 10: ncol = 3
elif self.vials > 5: ncol = 2
else: ncol=1
## Setting labels
label_y,label_x = '(pixels)','Frames'
if self.convert_to_cm_sec:
label_x,label_y = 'Seconds','(cm)'
labels = ['Mean vertical position over time','Mean y-position %s' % label_y,label_x]
axes[2].set(title = labels[0], ylabel=labels[1],xlabel=labels[2])
axes[2].legend(frameon=False, fontsize='x-small', ncol=ncol)
return | [
"matplotlib.pyplot.title",
"trackpy.quiet",
"os.path.isfile",
"matplotlib.pyplot.figure",
"numpy.histogram",
"scipy.signal.peak_prominences",
"scipy.signal.find_peaks",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"numpy.unique",
"matplotlib.pyplot.xlabel",
"pandas.DataFrame",
"matplotl... | [((5762, 5788), 'os.path.isfile', 'os.path.isfile', (['video_file'], {}), '(video_file)\n', (5776, 5788), False, 'import os\n'), ((6423, 6448), 'os.path.split', 'os.path.split', (['video_file'], {}), '(video_file)\n', (6436, 6448), False, 'import os\n'), ((15495, 15531), 'numpy.subtract', 'np.subtract', (['video_array', 'background'], {}), '(video_array, background)\n', (15506, 15531), True, 'import numpy as np\n'), ((16929, 16973), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': 'cm.Greys_r'}), '(image, cmap=cm.Greys_r, **kwargs)\n', (16939, 16973), True, 'import matplotlib.pyplot as plt\n'), ((18512, 18524), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18522, 18524), True, 'import matplotlib.pyplot as plt\n'), ((18533, 18551), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18549, 18551), True, 'import matplotlib.pyplot as plt\n'), ((19457, 19483), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 8)'}), '(figsize=(6, 8))\n', (19467, 19483), True, 'import matplotlib.pyplot as plt\n'), ((19541, 19557), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (19552, 19557), True, 'import matplotlib.pyplot as plt\n'), ((19698, 19761), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cropped_converted[frame]'], {'cmap': 'cm.Greys_r'}), '(cropped_converted[frame], cmap=cm.Greys_r, **kwargs)\n', (19708, 19761), True, 'import matplotlib.pyplot as plt\n'), ((19772, 19792), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pixels"""'], {}), "('Pixels')\n", (19782, 19792), True, 'import matplotlib.pyplot as plt\n'), ((19845, 19861), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (19856, 19861), True, 'import matplotlib.pyplot as plt\n'), ((19929, 19958), 'matplotlib.pyplot.title', 'plt.title', (['"""Background image"""'], {}), "('Background image')\n", (19938, 19958), True, 'import matplotlib.pyplot as plt\n'), ((19967, 20016), 'matplotlib.pyplot.imshow', 'plt.imshow', (['background'], {'cmap': 'cm.Greys_r'}), '(background, cmap=cm.Greys_r, **kwargs)\n', (19977, 20016), True, 'import matplotlib.pyplot as plt\n'), ((20027, 20047), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pixels"""'], {}), "('Pixels')\n", (20037, 20047), True, 'import matplotlib.pyplot as plt\n'), ((20123, 20139), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (20134, 20139), True, 'import matplotlib.pyplot as plt\n'), ((20203, 20237), 'matplotlib.pyplot.title', 'plt.title', (['"""Subtracted background"""'], {}), "('Subtracted background')\n", (20212, 20237), True, 'import matplotlib.pyplot as plt\n'), ((20246, 20302), 'matplotlib.pyplot.imshow', 'plt.imshow', (['subtracted[frame]'], {'cmap': 'cm.Greys_r'}), '(subtracted[frame], cmap=cm.Greys_r, **kwargs)\n', (20256, 20302), True, 'import matplotlib.pyplot as plt\n'), ((20313, 20333), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Pixels"""'], {}), "('Pixels')\n", (20323, 20333), True, 'import matplotlib.pyplot as plt\n'), ((20342, 20362), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pixels"""'], {}), "('Pixels')\n", (20352, 20362), True, 'import matplotlib.pyplot as plt\n'), ((20372, 20390), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (20388, 20390), True, 'import matplotlib.pyplot as plt\n'), ((21038, 21055), 'matplotlib.pyplot.title', 'plt.title', (['metric'], {}), '(metric)\n', (21047, 21055), True, 'import matplotlib.pyplot as plt\n'), ((21064, 21098), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': 'cm.Greys_r'}), '(image, cmap=cm.Greys_r)\n', (21074, 21098), True, 'import matplotlib.pyplot as plt\n'), ((21109, 21183), 'matplotlib.pyplot.scatter', 'plt.scatter', (['spots.x', 'spots.y'], {'c': 'spots[metric]', 'cmap': 'cm.coolwarm'}), '(spots.x, spots.y, c=spots[metric], cmap=cm.coolwarm, **kwargs)\n', (21120, 21183), True, 'import matplotlib.pyplot as plt\n'), ((21299, 21318), 'matplotlib.pyplot.ylim', 'plt.ylim', (['self.h', '(0)'], {}), '(self.h, 0)\n', (21307, 21318), True, 'import matplotlib.pyplot as plt\n'), ((21326, 21345), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'self.w'], {}), '(0, self.w)\n', (21334, 21345), True, 'import matplotlib.pyplot as plt\n'), ((21353, 21371), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21369, 21371), True, 'import matplotlib.pyplot as plt\n'), ((22179, 22206), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""coolwarm"""'], {}), "('coolwarm')\n", (22194, 22206), True, 'import matplotlib.pyplot as plt\n'), ((22245, 22279), 'matplotlib.pyplot.hist', 'plt.hist', (['spots[metric]'], {'bins': 'bins'}), '(spots[metric], bins=bins)\n', (22253, 22279), True, 'import matplotlib.pyplot as plt\n'), ((23466, 23486), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (23476, 23486), True, 'import matplotlib.pyplot as plt\n'), ((25327, 25345), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (25343, 25345), True, 'import matplotlib.pyplot as plt\n'), ((26195, 26239), 'trackpy.batch', 'tp.batch', (['stack'], {'diameter': 'diameter'}), '(stack, diameter=diameter, **kwargs)\n', (26203, 26239), True, 'import trackpy as tp\n'), ((27853, 27881), 'numpy.repeat', 'np.repeat', (['(True)', 'df.shape[0]'], {}), '(True, df.shape[0])\n', (27862, 27881), True, 'import numpy as np\n'), ((28696, 28728), 'scipy.signal.peak_prominences', 'peak_prominences', (['x_array', 'peaks'], {}), '(x_array, peaks)\n', (28712, 28728), False, 'from scipy.signal import find_peaks, peak_prominences\n'), ((38519, 38570), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'result_list', 'columns': 'llr_columns'}), '(data=result_list, columns=llr_columns)\n', (38531, 38570), True, 'import pandas as pd\n'), ((43902, 43958), 'numpy.unique', 'np.unique', (['self.df_big.True_particle'], {'return_counts': '(True)'}), '(self.df_big.True_particle, return_counts=True)\n', (43911, 43958), True, 'import numpy as np\n'), ((44357, 44413), 'numpy.unique', 'np.unique', (['self.df_big.True_particle'], {'return_counts': '(True)'}), '(self.df_big.True_particle, return_counts=True)\n', (44366, 44413), True, 'import numpy as np\n'), ((49848, 49875), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (49858, 49875), True, 'import matplotlib.pyplot as plt\n'), ((49915, 49945), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)'}), '(nrows=2, ncols=2)\n', (49927, 49945), True, 'import matplotlib.pyplot as plt\n'), ((51403, 51463), 'matplotlib.pyplot.savefig', 'plt.savefig', (['self.path_diagnostic'], {'dpi': '(300)', 'transparent': '(True)'}), '(self.path_diagnostic, dpi=300, transparent=True)\n', (51414, 51463), True, 'import matplotlib.pyplot as plt\n'), ((51764, 51775), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (51773, 51775), True, 'import matplotlib.pyplot as plt\n'), ((51784, 51795), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (51793, 51795), True, 'import matplotlib.pyplot as plt\n'), ((52273, 52347), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['self.result'], {'orient': '"""index"""', 'columns': 'slope_columns'}), "(self.result, orient='index', columns=slope_columns)\n", (52295, 52347), True, 'import pandas as pd\n'), ((53141, 53157), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (53150, 53157), True, 'import matplotlib.pyplot as plt\n'), ((59628, 59661), 'numpy.repeat', 'np.repeat', (['(0)', 'spots_true.shape[0]'], {}), '(0, spots_true.shape[0])\n', (59637, 59661), True, 'import numpy as np\n'), ((7156, 7196), 'os.path.join', 'os.path.join', (['folder', "(self.name + '.cfg')"], {}), "(folder, self.name + '.cfg')\n", (7168, 7196), False, 'import os\n'), ((17258, 17304), 'matplotlib.pyplot.hlines', 'plt.hlines', (['y0', 'x0', 'x1'], {'color': 'color', 'alpha': '(0.7)'}), '(y0, x0, x1, color=color, alpha=0.7)\n', (17268, 17304), True, 'import matplotlib.pyplot as plt\n'), ((17318, 17364), 'matplotlib.pyplot.hlines', 'plt.hlines', (['y1', 'x0', 'x1'], {'color': 'color', 'alpha': '(0.7)'}), '(y1, x0, x1, color=color, alpha=0.7)\n', (17328, 17364), True, 'import matplotlib.pyplot as plt\n'), ((17378, 17424), 'matplotlib.pyplot.vlines', 'plt.vlines', (['x0', 'y0', 'y1'], {'color': 'color', 'alpha': '(0.7)'}), '(x0, y0, y1, color=color, alpha=0.7)\n', (17388, 17424), True, 'import matplotlib.pyplot as plt\n'), ((17438, 17497), 'matplotlib.pyplot.vlines', 'plt.vlines', (['x1', 'y0', 'y1'], {'color': 'color', 'alpha': '(0.7)', 'label': '"""ROI"""'}), "(x1, y0, y1, color=color, alpha=0.7, label='ROI')\n", (17448, 17497), True, 'import matplotlib.pyplot as plt\n'), ((17778, 17852), 'matplotlib.pyplot.vlines', 'plt.vlines', (['(x0 + lc)', '(y0 + bc)', '(y0 + tc)'], {'color': '"""c"""', 'alpha': '(0.7)', 'linewidth': '(0.5)'}), "(x0 + lc, y0 + bc, y0 + tc, color='c', alpha=0.7, linewidth=0.5)\n", (17788, 17852), True, 'import matplotlib.pyplot as plt\n'), ((17852, 17926), 'matplotlib.pyplot.vlines', 'plt.vlines', (['(x0 + rc)', '(y0 + bc)', '(y0 + tc)'], {'color': '"""c"""', 'alpha': '(0.7)', 'linewidth': '(0.5)'}), "(x0 + rc, y0 + bc, y0 + tc, color='c', alpha=0.7, linewidth=0.5)\n", (17862, 17926), True, 'import matplotlib.pyplot as plt\n'), ((17926, 18000), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(y0 + bc)', '(x0 + lc)', '(x0 + rc)'], {'color': '"""c"""', 'alpha': '(0.7)', 'linewidth': '(0.5)'}), "(y0 + bc, x0 + lc, x0 + rc, color='c', alpha=0.7, linewidth=0.5)\n", (17936, 18000), True, 'import matplotlib.pyplot as plt\n'), ((18000, 18100), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(y0 + tc)', '(x0 + lc)', '(x0 + rc)'], {'color': '"""c"""', 'alpha': '(0.7)', 'linewidth': '(0.5)', 'label': '"""Outlier trim"""'}), "(y0 + tc, x0 + lc, x0 + rc, color='c', alpha=0.7, linewidth=0.5,\n label='Outlier trim')\n", (18010, 18100), True, 'import matplotlib.pyplot as plt\n'), ((18366, 18477), 'matplotlib.pyplot.vlines', 'plt.vlines', (['(self.x + self.bin_lines[-1])', 'y0', 'y1'], {'color': '"""w"""', 'alpha': '(0.8)', 'linewidth': '(1)', 'label': '"""Vial boundaries"""'}), "(self.x + self.bin_lines[-1], y0, y1, color='w', alpha=0.8,\n linewidth=1, label='Vial boundaries')\n", (18376, 18477), True, 'import matplotlib.pyplot as plt\n'), ((21244, 21258), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (21256, 21258), True, 'import matplotlib.pyplot as plt\n'), ((22881, 22922), 'matplotlib.pyplot.vlines', 'plt.vlines', (['x_pos', '(0)', 'y_max'], {'color': '"""gray"""'}), "(x_pos, 0, y_max, color='gray')\n", (22891, 22922), True, 'import matplotlib.pyplot as plt\n'), ((23084, 23144), 'matplotlib.pyplot.vlines', 'plt.vlines', (['_threshold', '(0)', 'y_max'], {'color': '"""gray"""', 'label': '"""Auto"""'}), "(_threshold, 0, y_max, color='gray', label='Auto')\n", (23094, 23144), True, 'import matplotlib.pyplot as plt\n'), ((23277, 23341), 'matplotlib.pyplot.vlines', 'plt.vlines', (['threshold', '(0)', 'y_max'], {'color': '"""k"""', 'label': '"""User-defined"""'}), "(threshold, 0, y_max, color='k', label='User-defined')\n", (23287, 23341), True, 'import matplotlib.pyplot as plt\n'), ((24828, 24864), 'matplotlib.pyplot.title', 'plt.title', (["('Histogram for: %s' % col)"], {}), "('Histogram for: %s' % col)\n", (24837, 24864), True, 'import matplotlib.pyplot as plt\n'), ((24877, 24892), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['col'], {}), '(col)\n', (24887, 24892), True, 'import matplotlib.pyplot as plt\n'), ((25117, 25152), 'matplotlib.pyplot.title', 'plt.title', (["('Spot overlay: %s' % col)"], {}), "('Spot overlay: %s' % col)\n", (25126, 25152), True, 'import matplotlib.pyplot as plt\n'), ((25230, 25250), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pixels"""'], {}), "('Pixels')\n", (25240, 25250), True, 'import matplotlib.pyplot as plt\n'), ((26139, 26149), 'trackpy.quiet', 'tp.quiet', ([], {}), '()\n', (26147, 26149), True, 'import trackpy as tp\n'), ((28540, 28572), 'numpy.histogram', 'np.histogram', (['x_array'], {'bins': 'bins'}), '(x_array, bins=bins)\n', (28552, 28572), True, 'import numpy as np\n'), ((28651, 28670), 'scipy.signal.find_peaks', 'find_peaks', (['x_array'], {}), '(x_array)\n', (28661, 28670), False, 'from scipy.signal import find_peaks, peak_prominences\n'), ((34973, 34998), 'numpy.repeat', 'np.repeat', (['(1)', 'df.shape[0]'], {}), '(1, df.shape[0])\n', (34982, 34998), True, 'import numpy as np\n'), ((35289, 35333), 'pandas.cut', 'pd.cut', (['df.x'], {'bins': 'bin_lines', 'labels': '_labels'}), '(df.x, bins=bin_lines, labels=_labels)\n', (35295, 35333), True, 'import pandas as pd\n'), ((35572, 35619), 'numpy.unique', 'np.unique', (['spot_assignments'], {'return_counts': '(True)'}), '(spot_assignments, return_counts=True)\n', (35581, 35619), True, 'import numpy as np\n'), ((36564, 36578), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (36576, 36578), True, 'import pandas as pd\n'), ((42238, 42275), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plot_spot_check'], {'dpi': '(200)'}), '(plot_spot_check, dpi=200)\n', (42249, 42275), True, 'import matplotlib.pyplot as plt\n'), ((42366, 42377), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (42375, 42377), True, 'import matplotlib.pyplot as plt\n'), ((42479, 42491), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (42489, 42491), True, 'import matplotlib.pyplot as plt\n'), ((42595, 42613), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (42611, 42613), True, 'import matplotlib.pyplot as plt\n'), ((42688, 42719), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plot_name'], {'dpi': '(100)'}), '(plot_name, dpi=100)\n', (42699, 42719), True, 'import matplotlib.pyplot as plt\n'), ((42732, 42743), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (42741, 42743), True, 'import matplotlib.pyplot as plt\n'), ((46200, 46225), 'numpy.repeat', 'np.repeat', (['(0)', 'df.shape[0]'], {}), '(0, df.shape[0])\n', (46209, 46225), True, 'import numpy as np\n'), ((47756, 47817), 'numpy.repeat', 'np.repeat', (['self.file_details[item]', 'self.df_filtered.shape[0]'], {}), '(self.file_details[item], self.df_filtered.shape[0])\n', (47765, 47817), True, 'import numpy as np\n'), ((49313, 49325), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (49323, 49325), True, 'import matplotlib.pyplot as plt\n'), ((49615, 49645), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plot_roi'], {'dpi': '(100)'}), '(plot_roi, dpi=100)\n', (49626, 49645), True, 'import matplotlib.pyplot as plt\n'), ((49729, 49740), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (49738, 49740), True, 'import matplotlib.pyplot as plt\n'), ((52723, 52782), 'numpy.repeat', 'np.repeat', (['self.file_details[item]', 'self.df_slopes.shape[0]'], {}), '(self.file_details[item], self.df_slopes.shape[0])\n', (52732, 52782), True, 'import numpy as np\n'), ((63495, 63549), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""k"""', 'linestyle': '"""--"""', 'alpha': '(0.9)'}), "([0], [0], color='k', linestyle='--', alpha=0.9)\n", (63501, 63549), False, 'from matplotlib.lines import Line2D\n'), ((63578, 63631), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""k"""', 'linestyle': '"""-"""', 'alpha': '(0.5)'}), "([0], [0], color='k', linestyle='-', alpha=0.5)\n", (63584, 63631), False, 'from matplotlib.lines import Line2D\n'), ((7829, 7858), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.vials'], {}), '(0, 1, self.vials)\n', (7840, 7858), True, 'import numpy as np\n'), ((10781, 10799), 'ffmpeg.probe', 'ffmpeg.probe', (['file'], {}), '(file)\n', (10793, 10799), False, 'import ffmpeg\n'), ((18280, 18348), 'matplotlib.pyplot.vlines', 'plt.vlines', (['(self.x + item)', 'y0', 'y1'], {'color': '"""w"""', 'alpha': '(0.8)', 'linewidth': '(1)'}), "(self.x + item, y0, y1, color='w', alpha=0.8, linewidth=1)\n", (18290, 18348), True, 'import matplotlib.pyplot as plt\n'), ((23391, 23416), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)'}), '(frameon=False)\n', (23401, 23416), True, 'import matplotlib.pyplot as plt\n'), ((25297, 25317), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Pixels"""'], {}), "('Pixels')\n", (25307, 25317), True, 'import matplotlib.pyplot as plt\n'), ((37864, 37888), 'scipy.stats.linregress', 'linregress', (['_frame', '_pos'], {}), '(_frame, _pos)\n', (37874, 37888), False, 'from scipy.stats import linregress\n'), ((11614, 11642), 'numpy.frombuffer', 'np.frombuffer', (['out', 'np.uint8'], {}), '(out, np.uint8)\n', (11627, 11642), True, 'import numpy as np\n'), ((22604, 22642), 'numpy.histogram', 'np.histogram', (['spots[metric]'], {'bins': 'bins'}), '(spots[metric], bins=bins)\n', (22616, 22642), True, 'import numpy as np\n'), ((33417, 33442), 'numpy.median', 'np.median', (['diff_list[:-1]'], {}), '(diff_list[:-1])\n', (33426, 33442), True, 'import numpy as np\n'), ((33838, 33862), 'numpy.median', 'np.median', (['diff_list[1:]'], {}), '(diff_list[1:])\n', (33847, 33862), True, 'import numpy as np\n'), ((35126, 35180), 'pandas.cut', 'pd.cut', (['df.x', 'vials'], {'include_lowest': '(True)', 'retbins': '(True)'}), '(df.x, vials, include_lowest=True, retbins=True)\n', (35132, 35180), True, 'import pandas as pd\n'), ((35365, 35392), 'pandas.Series', 'pd.Series', (['spot_assignments'], {}), '(spot_assignments)\n', (35374, 35392), True, 'import pandas as pd\n'), ((63948, 63989), 'numpy.histogram', 'np.histogram', (['self.df_big.mass'], {'bins': 'bins'}), '(self.df_big.mass, bins=bins)\n', (63960, 63989), True, 'import numpy as np\n'), ((64190, 64233), 'numpy.histogram', 'np.histogram', (['self.df_big.signal'], {'bins': 'bins'}), '(self.df_big.signal, bins=bins)\n', (64202, 64233), True, 'import numpy as np\n'), ((28779, 28798), 'numpy.max', 'np.max', (['prominences'], {}), '(prominences)\n', (28785, 28798), True, 'import numpy as np\n'), ((11344, 11362), 'ffmpeg.input', 'ffmpeg.input', (['file'], {}), '(file)\n', (11356, 11362), False, 'import ffmpeg\n'), ((37929, 37947), 'numpy.hstack', 'np.hstack', (['_result'], {}), '(_result)\n', (37938, 37947), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
" In this pyfile we will give the solution of linear equation system in details. That is, not only the final result will be given, but also the L, U matrix and the determination of A will be listed here for your convinience. "
import numpy as np
""" Input: Ax=b
A = np.array([[1,3,5],
[2,5,2],
[9,3,4]
]) # Here input your coefficient matrix
b = np.array([10,24,31]) # Here input the vector
"""
def check(A):
row = A.shape[0]
col = A.shape[1]
if row!=col:
print("Input error: A is not a square matrix")
return 0
else:
if np.linalg.det(A)==0:
print("The determination of A is equal to zero")
return 0
else:
if row == 1:
print("The dimension of matrix is 1")
return 0
else:
return row
def Decomposition(A):
if check(A) == 0:
print("Error")
else:
print("det(A)=%r"%np.linalg.det(A))
dim = check(A)
L = np.eye(dim)
U = np.zeros_like(A)
U[0,:]=A[0,:]
L[1:,0]=A[1:,0]/U[0,0]
for r in range(1,dim):
for l in range(1,r):
L[r,l]=1/U[l,l]*(A[r,l]-L[r,:l]@U[:l,l])
for u in range(r,dim):
U[r,u]=A[r,u]-L[r,:r]@U[:r,u]
print("L=\n",L,"\n","U=\n",U)
return L,U
#Decomposition(A)
def Solve(A,b):
L,U = Decomposition(A)
y = np.linalg.solve(L,b)
x = np.linalg.solve(U,y)
print("y=\n",y,"\n","x=\n",x)
return y,x
#Solve(A,b)
| [
"numpy.eye",
"numpy.zeros_like",
"numpy.linalg.solve",
"numpy.linalg.det"
] | [((1526, 1547), 'numpy.linalg.solve', 'np.linalg.solve', (['L', 'b'], {}), '(L, b)\n', (1541, 1547), True, 'import numpy as np\n'), ((1556, 1577), 'numpy.linalg.solve', 'np.linalg.solve', (['U', 'y'], {}), '(U, y)\n', (1571, 1577), True, 'import numpy as np\n'), ((1090, 1101), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (1096, 1101), True, 'import numpy as np\n'), ((1119, 1135), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (1132, 1135), True, 'import numpy as np\n'), ((653, 669), 'numpy.linalg.det', 'np.linalg.det', (['A'], {}), '(A)\n', (666, 669), True, 'import numpy as np\n'), ((1035, 1051), 'numpy.linalg.det', 'np.linalg.det', (['A'], {}), '(A)\n', (1048, 1051), True, 'import numpy as np\n')] |
import os
import sys
ROOT_DIR = os.path.dirname(os.path.abspath(os.path.dirname('__file__')))
sys.path.append(ROOT_DIR)
import scipy.io
import numpy as np
import pandas as pd
import imagesize
import requests
import tarfile
local_file_path = os.path.join(ROOT_DIR, 'resources', 'wiki_crop.tar')
# path where all the unpacked image files will be stored
unpacked_path = os.path.join(ROOT_DIR, 'resources', 'wiki_crop')
def get_data():
url = 'https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/static/wiki_crop.tar'
r = requests.get(url)
with open(local_file_path, 'wb') as f:
f.write(r.content)
get_data()
# Extract the files from the dataset archive
with tarfile.open(local_file_path) as tf:
tf.extractall(path=os.path.join(ROOT_DIR, 'resources'))
mat_path = os.path.join(unpacked_path, 'wiki.mat')
dataset_info = scipy.io.loadmat(mat_path)
dataset_info = dataset_info['wiki'].flatten()
# get the image paths
images = np.concatenate(dataset_info[0][2].flatten())
# get gender data, 0 - female, 1 - male, NaN - unknown
gender = dataset_info[0][3].flatten()
# age = photo_taken - date_birth
# originally date_birth comes in matlab serial number format so we need to convert it to datetime64[D]
date_birth = dataset_info[0][0]
origin = np.datetime64('0000-01-01', 'D') - np.timedelta64(1, 'D')
date_birth = (date_birth * np.timedelta64(1, 'D') + origin).flatten()
photo_taken = dataset_info[0][1].flatten()
photo_taken_format = np.char.add(photo_taken.astype(str), '-07-01')
photo_taken_format = pd.to_datetime(photo_taken_format).values.astype('datetime64[D]')
age = (photo_taken_format - date_birth).astype('int')//365
dataset_df = pd.DataFrame({'image_path': images, 'gender': gender, 'age': age, })
# Drop nan values
original_len = len(dataset_df)
dataset_df = dataset_df.dropna()
print('{} data entries contaning nan values have been dropped.'.format(original_len - len(dataset_df)))
# Find image instances where size is < 100 px
idx_drop = []
for idx, value in dataset_df['image_path'].items():
try:
if imagesize.get(os.path.join(unpacked_path, value))[0] < 100:
idx_drop.append(idx)
except FileNotFoundError:
print(value)
idx_drop.append(idx)
if idx % 5000 == 0:
print('Iteration {}'.format(idx))
print('Indices to drop: {}'.format(len(idx_drop)))
# Drop instances where size is < 100 px
original_len = len(dataset_df)
dataset_df = dataset_df.drop(idx_drop)
print('{} data entries with incorrect image size have been dropped.'.format(original_len - len(dataset_df)))
# Drop entries with broken age
original_len = len(dataset_df)
mask = (dataset_df['age'] > 90) | (dataset_df['age'] < 1)
dataset_df = dataset_df.drop(labels=dataset_df[mask].index)
print('{} data entries with incorrect age have been dropped.'.format(original_len - len(dataset_df)))
dataset_df['gender'] = dataset_df['gender'].astype(int)
dataset_df['age'] = dataset_df['age'].astype(float)
print('-'*30)
print(dataset_df.head())
print()
print('Dataset size: {}'.format(len(dataset_df)))
# Save path in session for csv file
INFO_SAVE_PATH = os.path.join(unpacked_path, 'dataset_info.csv')
dataset_df.to_csv(INFO_SAVE_PATH)
| [
"sys.path.append",
"pandas.DataFrame",
"numpy.datetime64",
"os.path.dirname",
"numpy.timedelta64",
"pandas.to_datetime",
"requests.get",
"tarfile.open",
"os.path.join"
] | [((94, 119), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (109, 119), False, 'import sys\n'), ((242, 294), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""resources"""', '"""wiki_crop.tar"""'], {}), "(ROOT_DIR, 'resources', 'wiki_crop.tar')\n", (254, 294), False, 'import os\n'), ((368, 416), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""resources"""', '"""wiki_crop"""'], {}), "(ROOT_DIR, 'resources', 'wiki_crop')\n", (380, 416), False, 'import os\n'), ((789, 828), 'os.path.join', 'os.path.join', (['unpacked_path', '"""wiki.mat"""'], {}), "(unpacked_path, 'wiki.mat')\n", (801, 828), False, 'import os\n'), ((1667, 1733), 'pandas.DataFrame', 'pd.DataFrame', (["{'image_path': images, 'gender': gender, 'age': age}"], {}), "({'image_path': images, 'gender': gender, 'age': age})\n", (1679, 1733), True, 'import pandas as pd\n'), ((3110, 3157), 'os.path.join', 'os.path.join', (['unpacked_path', '"""dataset_info.csv"""'], {}), "(unpacked_path, 'dataset_info.csv')\n", (3122, 3157), False, 'import os\n'), ((528, 545), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (540, 545), False, 'import requests\n'), ((680, 709), 'tarfile.open', 'tarfile.open', (['local_file_path'], {}), '(local_file_path)\n', (692, 709), False, 'import tarfile\n'), ((1266, 1298), 'numpy.datetime64', 'np.datetime64', (['"""0000-01-01"""', '"""D"""'], {}), "('0000-01-01', 'D')\n", (1279, 1298), True, 'import numpy as np\n'), ((1301, 1323), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (1315, 1323), True, 'import numpy as np\n'), ((64, 91), 'os.path.dirname', 'os.path.dirname', (['"""__file__"""'], {}), "('__file__')\n", (79, 91), False, 'import os\n'), ((740, 775), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""resources"""'], {}), "(ROOT_DIR, 'resources')\n", (752, 775), False, 'import os\n'), ((1527, 1561), 'pandas.to_datetime', 'pd.to_datetime', (['photo_taken_format'], {}), '(photo_taken_format)\n', (1541, 1561), True, 'import pandas as pd\n'), ((1351, 1373), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (1365, 1373), True, 'import numpy as np\n'), ((2070, 2104), 'os.path.join', 'os.path.join', (['unpacked_path', 'value'], {}), '(unpacked_path, value)\n', (2082, 2104), False, 'import os\n')] |
import argparse
import os
import fnet.data
import fnet.fnet_model
from fnet.functions import compute_dataset_min_max_ranges, pearsonr
from fnet.transforms import Propper
import json
import logging
import numpy as np
import pdb
import sys
import time
import torch
import warnings
import optuna
import math
import joblib
from datetime import datetime
class Trainer(object):
"""This class holds all training related functions and parameters
Parameters
----------
config : dict
The config dictionary ususally loaded from config.yaml holding all training
and data related parameters
fine_tune : bool
Whether we will be fine-tuning an existing model or training a model from scratch
path_run_dir: str
The path where training outputs will be stored, either
output_path/dataset/run/fine_tuned or output_path/dataset/run/train_from_scratch
path_dataset_train_csv : str
The path to the csv file listing the images used for the training set
path_dataset_val_csv: str
The path to the csv file listing the images used for the validation set
verbose : bool
Whether or not to print training updates
Attributes
----------
config : dict
The config dictionary ususally loaded from config.yaml holding all training
and data related parameters
fine_tune : bool
Whether we will be fine-tuning an existing model or training a model from scratch
path_run_dir: str
The path where training outputs will be stored, either
output_path/dataset/run/fine_tuned or output_path/dataset/run/train_from_scratch
path_dataset_train_csv : str
The path to the csv file listing the images used for the training set
path_dataset_val_csv: str
The path to the csv file listing the images used for the validation set
verbose : bool
Whether or not to print training updates
devic: str
The current device we are running on, either cpu or gpu0, etc.
trial_id: int
The current run of the hyperparameter search
"""
def __init__(self, config, fine_tune=False, path_run_dir=None, path_dataset_train_csv=None, path_dataset_val_csv=None, verbose=False):
self.config = config
self.fine_tune = fine_tune
self.path_run_dir = path_run_dir
self.path_dataset_train_csv = path_dataset_train_csv
self.path_dataset_val_csv = path_dataset_val_csv
self.verbose = verbose
self.config['gpu_ids'] = [self.config['gpu_ids']] if isinstance(self.config['gpu_ids'], int) else self.config['gpu_ids']
self.device = torch.device('cuda', self.config['gpu_ids'][0]) if self.config['gpu_ids'][0] >= 0 else torch.device('cpu')
self.trial_id = 0
#Setup logging
self.setup_logger()
def reset_trial_id(self):
'''
This function resets the trial id to zero - used every time a hyperparameter
search is completed
'''
self.trial_id = 0
def set_run_dir(self, path_run_dir):
'''
This function sets a new path_run_dir
Parameters
----------
path_run_dir : str
The new run directory
'''
self.path_run_dir = path_run_dir
def set_train_val_sets(self, path_dataset_train_csv, path_dataset_val_csv):
'''
This function sets new csv files for the training and validation set
Parameters
----------
path_dataset_train_csv : str
The path to the newcsv file listing the images used for the training set
path_dataset_val_csv: str
The path to the newcsv file listing the images used for the validation set
'''
self.path_dataset_train_csv = path_dataset_train_csv
self.path_dataset_val_csv = path_dataset_val_csv
def get_dataloader(self, remaining_iterations, validation=False):
'''
This function returns the dataloader used during training
Parameters
----------
remaining_iterations : int
The number of iterations remaining for training - if training from scratch will
be equal to self.config['training']['n_iter']
validation: bool
Whether to return the training or validation dataloader
Returns
-------
torch.utils.data.DataLoader
The dataloader - either for training or validation
'''
min_max_bright, min_max_infection, min_max_dapi = compute_dataset_min_max_ranges(self.path_dataset_train_csv, self.path_dataset_val_csv)
min_max_bright_norm, _, _ = compute_dataset_min_max_ranges(self.path_dataset_train_csv, self.path_dataset_val_csv, norm=True)
transform_signal=[]
for t in self.config['preprocessing']['transform_signal']:
if t=='fnet.transforms.AdaptRange':
t = 'fnet.transforms.AdaptRange({:f},{:f})'.format(min_max_bright_norm[0], min_max_bright_norm[1])
transform_signal.append(eval(t))
transform_target = [eval(t) for t in self.config['preprocessing']['transform_target']]
transform_thresh = []
if validation:
propper = Propper(action='+')
print(propper)
transform_signal.append(propper)
transform_target.append(propper)
transform_thresh.append(propper)
ds = getattr(fnet.data, self.config['class_dataset'])(
path_csv = self.path_dataset_train_csv if not validation else self.path_dataset_val_csv,
transform_source = transform_signal,
transform_target = transform_target,
transform_thresh = transform_thresh,
min_max_bright = min_max_bright,
min_max_dapi = min_max_dapi,
min_max_infection = min_max_infection,
augmentations = self.config['training']['augmentations'] if not validation else False
)
#print(ds)
if not validation:
assert len(ds)>=self.config['buffer_size'], 'buffer_size cannot be larger than the training data. Please set buffer_size in the config smaller or equal to your training data size and try again. Exiting.'
ds_patch = fnet.data.BufferedPatchDataset(
dataset = ds,
patch_size = self.config['patch_size'],
buffer_size = self.config['buffer_size'],
buffer_switch_frequency = self.config['buffer_switch_frequency'],
npatches = remaining_iterations*self.config['batch_size'], #None, #
verbose = False,
shuffle_images = self.config['shuffle_images'],
threshold_backround = self.config['threshold_backround'], #0.2
resampling_probability = self.config['resampling_probability'], #0.7,
**self.config['bpds_kwargs'],
)
else:
ds_patch = fnet.data.AllPatchesDataset(
dataset = ds,
patch_size = self.config['patch_size'],
buffer_size = len(ds),
buffer_switch_frequency = -1,
verbose = False
)
dataloader = torch.utils.data.DataLoader(
ds_patch, #ds
batch_size = self.config['batch_size'],
)
return dataloader
def update_config(self, config):
'''
This function will update the config dictionary
This function will usually be called before train_best to update the config with
the best hyperparameters found during train_for_search and train a model with these
Parameters
----------
config : dict
The new config dictionary used for training
'''
self.config = config
def setup_logger(self):
'''
This function sets up a run log
'''
self.logger = logging.getLogger('model training')
self.logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(os.path.join(self.path_run_dir, 'run.log'), mode='a')
sh = logging.StreamHandler(sys.stdout)
fh.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))
self.logger.addHandler(fh)
self.logger.addHandler(sh)
def train_for_search(self, trial):
'''
This function perfrorms a single trial of the hyperparameter search
The function will select hyperparameters for the trial, train a model,
stop the training if performance is low and return the best pearson
correlation coefficient on the validation set
Parameters
----------
trial : optuna.trial
The current trial of the hyperparameter search
Returns
-------
float
The best pearson correlation coefficient achieved on the validation set during
training - the value we are trying to maximize during our hyperparameter search
'''
self.trial_id += 1
print('Starting trial {}/{}'.format(self.trial_id, self.config['training']['num_trials']))
# define hyperparameters to tune
if self.fine_tune:
self.config['num_freeze_layers'] = trial.suggest_int('freeze_layers', 1,106) #97
else:
self.config['depth'] = trial.suggest_int('depth', 2, 6) #3
patch_l = trial.suggest_categorical('patch', [128,256])
self.config['patch_size'] = [patch_l, patch_l] #[256, 256]
self.config['lr'] = trial.suggest_loguniform("lr", 1e-5, 1e-1) #0.00369
self.config['resampling_probability'] = trial.suggest_float('resample', 0.5, 0.9) #0.6263
self.config['threshold_backround'] = trial.suggest_float('threshold', 0.01, 0.5) #0.3463
self.config['dropout'] = trial.suggest_float('dropout', 0.1, 0.5) #0.1078
self.config['loss_weight'] = trial.suggest_float('loss_weight', 0.5, 0.9)
#Set random seed
if self.config['seed'] is not None:
np.random.seed(self.config['seed'])
torch.manual_seed(self.config['seed'])
torch.cuda.manual_seed_all(self.config['seed'])
#Instantiate Model
if self.fine_tune:
saved_model_path = self.config['path_model_dir'][0]
else:
saved_model_path = self.path_run_dir
if os.path.exists(os.path.join(saved_model_path, 'model.p')):
model = fnet.load_model_from_dir(saved_model_path, gpu_ids=self.config['gpu_ids'], in_channels=self.config['in_channels'], out_channels=self.config['out_channels'])
self.logger.info('model loaded from: {:s}'.format(saved_model_path))
# freeze first layers
for freeze_idx, param in enumerate(model.net.parameters()):
if freeze_idx<self.config['num_freeze_layers']:
param.requires_grad = False
else:
model = fnet.fnet_model.Model(
nn_module=self.config['nn_module'],
lr=self.config['lr'],
gpu_ids=self.config['gpu_ids'],
dropout= self.config['dropout'],
in_channels=self.config['in_channels'],
out_channels=self.config['out_channels'],
depth = self.config['depth'],
loss_weight=self.config['loss_weight'],
min_dif=self.config['min_dif'],
max_dif=self.config['max_dif']
)
n_remaining_iterations = max(0, (self.config['training']['n_iter'] - model.count_iter))
dataloader_train = self.get_dataloader(n_remaining_iterations)
if self.path_dataset_val_csv is not None:
dataloader_val = self.get_dataloader(n_remaining_iterations, validation=True)
criterion_val = model.criterion_fn(reduction='none')
loss_train = 0
pearson_train = 0
for i, (signal, target, dapi_signal, dif_dapi_inf, _) in enumerate(dataloader_train, model.count_iter): #dna_channel
if self.config['in_channels']==2:
signal = torch.cat([signal, dapi_signal], dim=1)
loss_batch, pearson_batch = model.do_train_iter(signal, target, dif_dapi_inf)
loss_train += loss_batch
pearson_train += pearson_batch
if ((i + 1) % self.config['print_iter'] == 0) or ((i + 1) == self.config['training']['n_iter']):
if self.verbose: print('For {}/{} iterations, average training loss: {:.3f} and average Pearson Correlation Coefficient: {:3f}'.format(i, n_remaining_iterations, loss_train/self.config['print_iter'], pearson_train/self.config['print_iter']))
loss_train = 0
pearson_train = 0
if self.path_dataset_val_csv is not None:
loss_val = 0
pearson = 0
pearson_idx =0
for idx_val, sample in enumerate(dataloader_val):
patch, is_last = sample['patch'], sample['is_last']
signal_val = patch[0].to(device=self.device)
target_val = patch[1].to(device=self.device)
dif_dapi_inf = patch[-2].to(device=self.device)
if self.config['in_channels']==2:
signal_val = torch.cat([signal_val, patch[2]], dim=1)
pred_val = model.predict(signal_val)
dif_mean = torch.mean(dif_dapi_inf, dim=(1,2,3))
for it,dif_mean_it in enumerate(dif_mean):
if dif_mean_it>self.config['min_dif'] and dif_mean_it<self.config['max_dif']:
dif_mean[it] = self.config['loss_weight']
else:
dif_mean[it] = 1-self.config['loss_weight']
#dif_mean = torch.tensor(dif_mean, dtype=torch.float32, device=self.device)
dif_mean = dif_mean.to(device=self.device)
loss_val_batch = criterion_val(pred_val, target_val)
loss_val_batch = torch.mean(loss_val_batch, dim=(1,2,3)) * dif_mean
loss_val += torch.mean(loss_val_batch).item()
pearson_val = pearsonr(pred_val, target_val)
if not math.isnan(pearson_val):
pearson += pearson_val
pearson_idx += 1
loss_val = loss_val/idx_val #len(dataloader_val)
print('Validation loss: {:.3f}'.format(loss_val))
if pearson_idx==0:
pearson=0
print('Validation Pearson Correlation Coefficient is nan everywhere.')
else:
pearson=pearson/pearson_idx
print('Validation Pearson Correlation Coefficient: {:.3f}'.format(pearson))
trial.report(loss_val, i)
# Handle pruning based on the intermediate value.
if trial.should_prune():
raise optuna.exceptions.TrialPruned()
return pearson
def train_best(self):
'''
This function will train and save a model
The model will be trained based on the parameters specified in self.config
'''
#Set random seed
if self.config['seed'] is not None:
np.random.seed(self.config['seed'])
torch.manual_seed(self.config['seed'])
torch.cuda.manual_seed_all(self.config['seed'])
#Instantiate Model
if self.fine_tune:
saved_model_path = self.config['path_model_dir'][0]
else:
saved_model_path = self.path_run_dir
path_model = os.path.join(self.path_run_dir, 'model.p')
if os.path.exists(os.path.join(saved_model_path,'model.p')):
model = fnet.load_model_from_dir(saved_model_path, gpu_ids=self.config['gpu_ids'], in_channels=self.config['in_channels'], out_channels=self.config['out_channels'])
self.logger.info('model loaded from: {:s}'.format(saved_model_path))
# freeze first layers
for freeze_idx, param in enumerate(model.net.parameters()):
if freeze_idx<self.config['num_freeze_layers']:
param.requires_grad = False
else:
model = fnet.fnet_model.Model(
nn_module=self.config['nn_module'],
lr=self.config['lr'],
gpu_ids=self.config['gpu_ids'],
dropout= self.config['dropout'],
in_channels=self.config['in_channels'],
out_channels=self.config['out_channels'],
depth = self.config['depth'],
loss_weight=self.config['loss_weight'],
min_dif=self.config['min_dif'],
max_dif=self.config['max_dif']
)
self.logger.info('Model instianted from: {:s}'.format(self.config['nn_module']))
self.logger.info(model)
#Load saved history if it already exists
path_losses_csv = os.path.join(self.path_run_dir, 'losses.csv')
if os.path.exists(path_losses_csv):
fnetlogger = fnet.FnetLogger(path_losses_csv)
self.logger.info('History loaded from: {:s}'.format(path_losses_csv))
else:
fnetlogger = fnet.FnetLogger(columns=['num_iter', 'loss_batch'])
n_remaining_iterations = max(0, (self.config['training']['n_iter'] - model.count_iter))
dataloader_train = self.get_dataloader(n_remaining_iterations)
with open(os.path.join(self.path_run_dir, 'train_options.json'), 'w') as fo:
json.dump(self.config, fo, indent=4, sort_keys=True)
loss_train = 0
pearson_train = 0
for i, (signal, target, dapi_signal, dif_dapi_inf, _) in enumerate(dataloader_train, model.count_iter): #dna_channel
if self.config['in_channels']==2:
signal = torch.cat([signal, dapi_signal], dim=1)
loss_batch, pearson_batch = model.do_train_iter(signal, target, dif_dapi_inf)
fnetlogger.add({'num_iter': i + 1, 'loss_batch': loss_batch})
loss_train += loss_batch
pearson_train += pearson_batch
if ((i + 1) % self.config['print_iter'] == 0) or ((i + 1) == self.config['training']['n_iter']):
fnetlogger.to_csv(path_losses_csv)
self.logger.info('loss log saved to: {:s}'.format(path_losses_csv))
print('For {}/{} iterations, average training loss: {:.3f} and average Pearson Correlation Coefficient: {:3f}'.format(i, n_remaining_iterations, loss_train/self.config['print_iter'], pearson_train/self.config['print_iter']))
loss_train = 0
pearson_train = 0
self.logger.info('model saved to: {:s}'.format(path_model))
model.save_state(path_model) | [
"torch.mean",
"json.dump",
"fnet.functions.pearsonr",
"fnet.transforms.Propper",
"numpy.random.seed",
"math.isnan",
"torch.utils.data.DataLoader",
"torch.manual_seed",
"logging.StreamHandler",
"os.path.exists",
"fnet.functions.compute_dataset_min_max_ranges",
"torch.cat",
"optuna.exceptions.... | [((4537, 4628), 'fnet.functions.compute_dataset_min_max_ranges', 'compute_dataset_min_max_ranges', (['self.path_dataset_train_csv', 'self.path_dataset_val_csv'], {}), '(self.path_dataset_train_csv, self.\n path_dataset_val_csv)\n', (4567, 4628), False, 'from fnet.functions import compute_dataset_min_max_ranges, pearsonr\n'), ((4660, 4762), 'fnet.functions.compute_dataset_min_max_ranges', 'compute_dataset_min_max_ranges', (['self.path_dataset_train_csv', 'self.path_dataset_val_csv'], {'norm': '(True)'}), '(self.path_dataset_train_csv, self.\n path_dataset_val_csv, norm=True)\n', (4690, 4762), False, 'from fnet.functions import compute_dataset_min_max_ranges, pearsonr\n'), ((7255, 7330), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['ds_patch'], {'batch_size': "self.config['batch_size']"}), "(ds_patch, batch_size=self.config['batch_size'])\n", (7282, 7330), False, 'import torch\n'), ((7960, 7995), 'logging.getLogger', 'logging.getLogger', (['"""model training"""'], {}), "('model training')\n", (7977, 7995), False, 'import logging\n'), ((8140, 8173), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (8161, 8173), False, 'import logging\n'), ((16211, 16253), 'os.path.join', 'os.path.join', (['self.path_run_dir', '"""model.p"""'], {}), "(self.path_run_dir, 'model.p')\n", (16223, 16253), False, 'import os\n'), ((17607, 17652), 'os.path.join', 'os.path.join', (['self.path_run_dir', '"""losses.csv"""'], {}), "(self.path_run_dir, 'losses.csv')\n", (17619, 17652), False, 'import os\n'), ((17664, 17695), 'os.path.exists', 'os.path.exists', (['path_losses_csv'], {}), '(path_losses_csv)\n', (17678, 17695), False, 'import os\n'), ((2646, 2693), 'torch.device', 'torch.device', (['"""cuda"""', "self.config['gpu_ids'][0]"], {}), "('cuda', self.config['gpu_ids'][0])\n", (2658, 2693), False, 'import torch\n'), ((2733, 2752), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2745, 2752), False, 'import torch\n'), ((5243, 5262), 'fnet.transforms.Propper', 'Propper', ([], {'action': '"""+"""'}), "(action='+')\n", (5250, 5262), False, 'from fnet.transforms import Propper\n'), ((8073, 8115), 'os.path.join', 'os.path.join', (['self.path_run_dir', '"""run.log"""'], {}), "(self.path_run_dir, 'run.log')\n", (8085, 8115), False, 'import os\n'), ((8198, 8244), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(message)s"""'], {}), "('%(asctime)s - %(message)s')\n", (8215, 8244), False, 'import logging\n'), ((10064, 10099), 'numpy.random.seed', 'np.random.seed', (["self.config['seed']"], {}), "(self.config['seed'])\n", (10078, 10099), True, 'import numpy as np\n'), ((10112, 10150), 'torch.manual_seed', 'torch.manual_seed', (["self.config['seed']"], {}), "(self.config['seed'])\n", (10129, 10150), False, 'import torch\n'), ((10163, 10210), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (["self.config['seed']"], {}), "(self.config['seed'])\n", (10189, 10210), False, 'import torch\n'), ((10432, 10473), 'os.path.join', 'os.path.join', (['saved_model_path', '"""model.p"""'], {}), "(saved_model_path, 'model.p')\n", (10444, 10473), False, 'import os\n'), ((15849, 15884), 'numpy.random.seed', 'np.random.seed', (["self.config['seed']"], {}), "(self.config['seed'])\n", (15863, 15884), True, 'import numpy as np\n'), ((15897, 15935), 'torch.manual_seed', 'torch.manual_seed', (["self.config['seed']"], {}), "(self.config['seed'])\n", (15914, 15935), False, 'import torch\n'), ((15948, 15995), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (["self.config['seed']"], {}), "(self.config['seed'])\n", (15974, 15995), False, 'import torch\n'), ((16289, 16330), 'os.path.join', 'os.path.join', (['saved_model_path', '"""model.p"""'], {}), "(saved_model_path, 'model.p')\n", (16301, 16330), False, 'import os\n'), ((18206, 18258), 'json.dump', 'json.dump', (['self.config', 'fo'], {'indent': '(4)', 'sort_keys': '(True)'}), '(self.config, fo, indent=4, sort_keys=True)\n', (18215, 18258), False, 'import json\n'), ((12167, 12206), 'torch.cat', 'torch.cat', (['[signal, dapi_signal]'], {'dim': '(1)'}), '([signal, dapi_signal], dim=1)\n', (12176, 12206), False, 'import torch\n'), ((15515, 15546), 'optuna.exceptions.TrialPruned', 'optuna.exceptions.TrialPruned', ([], {}), '()\n', (15544, 15546), False, 'import optuna\n'), ((18127, 18180), 'os.path.join', 'os.path.join', (['self.path_run_dir', '"""train_options.json"""'], {}), "(self.path_run_dir, 'train_options.json')\n", (18139, 18180), False, 'import os\n'), ((18523, 18562), 'torch.cat', 'torch.cat', (['[signal, dapi_signal]'], {'dim': '(1)'}), '([signal, dapi_signal], dim=1)\n', (18532, 18562), False, 'import torch\n'), ((13668, 13707), 'torch.mean', 'torch.mean', (['dif_dapi_inf'], {'dim': '(1, 2, 3)'}), '(dif_dapi_inf, dim=(1, 2, 3))\n', (13678, 13707), False, 'import torch\n'), ((14595, 14625), 'fnet.functions.pearsonr', 'pearsonr', (['pred_val', 'target_val'], {}), '(pred_val, target_val)\n', (14603, 14625), False, 'from fnet.functions import compute_dataset_min_max_ranges, pearsonr\n'), ((13501, 13541), 'torch.cat', 'torch.cat', (['[signal_val, patch[2]]'], {'dim': '(1)'}), '([signal_val, patch[2]], dim=1)\n', (13510, 13541), False, 'import torch\n'), ((14411, 14452), 'torch.mean', 'torch.mean', (['loss_val_batch'], {'dim': '(1, 2, 3)'}), '(loss_val_batch, dim=(1, 2, 3))\n', (14421, 14452), False, 'import torch\n'), ((14657, 14680), 'math.isnan', 'math.isnan', (['pearson_val'], {}), '(pearson_val)\n', (14667, 14680), False, 'import math\n'), ((14498, 14524), 'torch.mean', 'torch.mean', (['loss_val_batch'], {}), '(loss_val_batch)\n', (14508, 14524), False, 'import torch\n')] |
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import gym
from env_2 import Reacher_for2 as Reacher
from copy import copy
import argparse
ITR=1000 # number of tasks
EP_MAX = 20 # number of episodes for single task, generally 2000 steps for 2 joints and 5000 steps for 3 joints have a good performance
EP_LEN = 20 # number of steps for each episode
GAMMA = 0.9
A_LR = 1e-4
C_LR = 2e-4
BATCH = 64
A_UPDATE_STEPS = 3
C_UPDATE_STEPS = 3
S_DIM, A_DIM = 8,2
TRAIN_INTERVAL = 10
METHOD = [
dict(name='kl_pen', kl_target=0.01, lam=0.5), # KL penalty
dict(name='clip', epsilon=0.2), # Clipped surrogate objective, find this is better
][1] # choose the method for optimization
model_path='./maml_model/maml'
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=False)
parser.add_argument('--test', dest='test', action='store_true', default=True)
args = parser.parse_args()
class PPO(object):
def __init__(self):
# config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
# config.gpu_options.per_process_gpu_memory_fraction = 0.6
# self.sess = tf.Session(config=config)
# force cpu
# config = tf.ConfigProto(
# device_count = {'GPU': 0}
# )
# self.sess = tf.Session(config=config)
self.sess = tf.Session()
self.tfs = tf.placeholder(tf.float32, [None, S_DIM], 'state')
self.critic_weights = {}
self.actor_weights = {}
self.actor_weights_ = {}
self.construct_weights()
with tf.variable_scope('adam'):
self.actor_optimizer = tf.train.AdamOptimizer(A_LR)
self.critic_optimizer = tf.train.AdamOptimizer(C_LR)
with tf.variable_scope('actor_inputs/', reuse = tf.AUTO_REUSE): # / force the name to be reused
self.tfa = tf.placeholder(tf.float32, [None, A_DIM], 'action')
self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
self.tflam = tf.placeholder(tf.float32, None, 'lambda')
with tf.variable_scope('critic_inputs/', reuse = tf.AUTO_REUSE):
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.first_forward_critic(self.critic_weights)
self.first_forward_actor(self.actor_weights, self.actor_weights_, self.critic_weights)
# self.sess.run(tf.global_variables_initializer())
self.actor_var = [v for v in tf.trainable_variables() if v.name.split('/')[0] == "pi"]
self.critic_var= [v for v in tf.trainable_variables() if v.name.split('/')[0] == "critic"]
tf.summary.FileWriter("log/", self.sess.graph)
def first_forward_critic(self, critic_weights):
'''
define operations for the critic part
for the first time task-specific-update
'''
# critic
with tf.variable_scope('critic'):
# l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu)
# self.v = tf.layers.dense(l1, 1)
self.v = self._build_cnet('critic', weights = critic_weights)
self.advantage = self.tfdc_r - self.v
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = self.critic_optimizer.minimize(self.closs)
self.critic_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='critic')
def second_forward_critic(self, critic_weights):
'''
define operations for the critic part
for the second time meta-update
'''
# critic
with tf.variable_scope('critic'):
# l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu)
# self.v = tf.layers.dense(l1, 1)
self.v_ = self._build_cnet('critic', weights = critic_weights)
self.advantage_ = self.tfdc_r - self.v_
self.closs_ = tf.reduce_mean(tf.square(self.advantage_))
self.ctrain_op_ = self.critic_optimizer.minimize(self.closs_)
self.critic_params_ = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='critic')
def first_forward_actor(self, actor_weights, actor_weights_, critic_weights):
'''
define operations for the actor part
for the first time task-specific-update
'''
# actor
self.pi, self.pi_params = self._build_anet('pi', trainable=True, weights = actor_weights)
oldpi, oldpi_params = self._build_anet('oldpi', trainable=False, weights = actor_weights_)
with tf.variable_scope('sample_action'):
self.sample_op = tf.squeeze(self.pi.sample(1), axis=0) # choosing action
with tf.variable_scope('update_oldpi'):
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(self.pi_params, oldpi_params)]
with tf.variable_scope('loss'):
with tf.variable_scope('surrogate'):
# ratio = tf.exp(pi.log_prob(self.tfa) - oldpi.log_prob(self.tfa))
ratio = self.pi.prob(self.tfa) / oldpi.prob(self.tfa)
surr = ratio * self.tfadv
if METHOD['name'] == 'kl_pen':
kl = tf.distributions.kl_divergence(oldpi, self.pi)
self.kl_mean = tf.reduce_mean(kl)
self.aloss = -(tf.reduce_mean(surr - self.tflam * kl))
else: # clipping method, find this is better
self.aloss = -tf.reduce_mean(tf.minimum(
surr,
tf.clip_by_value(ratio, 1.-METHOD['epsilon'], 1.+METHOD['epsilon'])*self.tfadv))
# add entropy to boost exploration
# entropy=pi.entropy()
# self.aloss-=0.1*entropy
with tf.variable_scope('atrain'):
self.atrain_op = self.actor_optimizer.minimize(self.aloss)
def second_forward_actor(self, actor_weights, actor_weights_, critic_weights):
'''
define operations for the actor part
for the second time meta-update
'''
# actor
self.pi_, self.pi_params_ = self._build_anet('pi', trainable=True, weights = actor_weights)
oldpi_, oldpi_params_ = self._build_anet('oldpi', trainable=False, weights = actor_weights_)
with tf.variable_scope('sample_action'):
self.sample_op_ = tf.squeeze(self.pi_.sample(1), axis=0) # choosing action
with tf.variable_scope('update_oldpi'):
self.update_oldpi_op_ = [oldp.assign(p) for p, oldp in zip(self.pi_params_, oldpi_params_)]
with tf.variable_scope('loss'):
with tf.variable_scope('surrogate'):
# ratio = tf.exp(pi.log_prob(self.tfa) - oldpi.log_prob(self.tfa))
ratio_ = self.pi_.prob(self.tfa) / oldpi_.prob(self.tfa)
surr_ = ratio_ * self.tfadv
if METHOD['name'] == 'kl_pen':
# self.tflam = tf.placeholder(tf.float32, None, 'lambda')
kl_ = tf.distributions.kl_divergence(oldpi_, self.pi_)
self.kl_mean_ = tf.reduce_mean(kl_)
self.aloss_ = -(tf.reduce_mean(surr_ - self.tflam * kl_))
else: # clipping method, find this is better
self.aloss_ = -tf.reduce_mean(tf.minimum(
surr_,
tf.clip_by_value(ratio_, 1.-METHOD['epsilon'], 1.+METHOD['epsilon'])*self.tfadv))
# add entropy to boost exploration
# entropy=pi.entropy()
# self.aloss-=0.1*entropy
with tf.variable_scope('atrain'):
self.atrain_op_ = self.actor_optimizer.minimize(self.aloss_)
def construct_weights(self, ):
'''
define weights
'''
c_hidden1 = 100
a_hidden1 = 100
with tf.variable_scope('critic'):
self.critic_weights['w1'] = tf.Variable(tf.truncated_normal([S_DIM, c_hidden1], stddev = 0.1))
self.critic_weights['b1'] = tf.Variable(tf.truncated_normal([c_hidden1], stddev = 0.1))
self.critic_weights['w2'] = tf.Variable(tf.truncated_normal([c_hidden1, 1], stddev = 0.1))
self.critic_weights['b2'] = tf.Variable(tf.truncated_normal([1], stddev = 0.1))
with tf.variable_scope('pi'):
self.actor_weights['w1'] = tf.Variable(tf.truncated_normal([S_DIM, a_hidden1], stddev = 0.1), trainable = True)
self.actor_weights['b1'] = tf.Variable(tf.truncated_normal([a_hidden1], stddev = 0.1), trainable = True)
self.actor_weights['w2'] = tf.Variable(tf.truncated_normal([a_hidden1, A_DIM], stddev = 0.1), trainable = True)
self.actor_weights['b2'] = tf.Variable(tf.truncated_normal([A_DIM], stddev = 0.1), trainable = True)
self.actor_weights['w3'] = tf.Variable(tf.truncated_normal([a_hidden1, A_DIM], stddev = 0.1), trainable = True)
self.actor_weights['b3'] = tf.Variable(tf.truncated_normal([A_DIM], stddev = 0.1), trainable = True)
with tf.variable_scope('oldpi'):
self.actor_weights_['w1'] = tf.Variable(tf.truncated_normal([S_DIM, a_hidden1], stddev = 0.1), trainable = False)
self.actor_weights_['b1'] = tf.Variable(tf.truncated_normal([a_hidden1], stddev = 0.1), trainable = False)
self.actor_weights_['w2'] = tf.Variable(tf.truncated_normal([a_hidden1, A_DIM], stddev = 0.1), trainable = False)
self.actor_weights_['b2'] = tf.Variable(tf.truncated_normal([A_DIM], stddev = 0.1), trainable = False)
self.actor_weights_['w3'] = tf.Variable(tf.truncated_normal([a_hidden1, A_DIM], stddev = 0.1), trainable = False)
self.actor_weights_['b3'] = tf.Variable(tf.truncated_normal([A_DIM], stddev = 0.1), trainable = False)
def update(self, s, a, r):
'''
meta policy update
'''
self.second_forward_actor(self.new_a_weights, self.new_a_weights, self.new_c_weights)
self.second_forward_critic(self.new_c_weights)
# self.forward(self.actor_weights, self.actor_weights_, self.critic_weights)
self.sess.run(self.update_oldpi_op)
adv = self.sess.run(self.advantage_, {self.tfs: s, self.tfdc_r: r})
print(adv.shape, adv.dtype)
adv = (adv - adv.mean())/(adv.std()+1e-6) # sometimes helpful
print(a.shape, a.dtype)
# update actor
if METHOD['name'] == 'kl_pen':
for i in range(A_UPDATE_STEPS):
# print('updata: ',i)
_, kl = self.sess.run(
[self.atrain_op, self.kl_mean],
{self.tfs: s, self.tfa: a, self.tfadv: adv, self.tflam: METHOD['lam']})
if kl > 4*METHOD['kl_target']: # this in in google's paper
break
if kl < METHOD['kl_target'] / 1.5: # adaptive lambda, this is in OpenAI's paper
METHOD['lam'] /= 2
elif kl > METHOD['kl_target'] * 1.5:
METHOD['lam'] *= 2
METHOD['lam'] = np.clip(METHOD['lam'], 1e-4, 10) # sometimes explode, this clipping is my solution
else: # clipping method, find this is better (OpenAI's paper)
[self.sess.run(self.atrain_op_, {self.tfs: s, self.tfa: a, self.tfadv: adv}) for _ in range(A_UPDATE_STEPS)]
# print([v.name for v in self.critic_var],self.sess.run(self.critic_var))
# update critic
[self.sess.run(self.ctrain_op_, {self.tfs: s, self.tfdc_r: r}) for _ in range(C_UPDATE_STEPS)]
def sum_gradients_update(self, s, a, r, stepsize):
'''
task specific policy update, not directly update the weights but derive the new weights variables/dictionary
'''
# self.sess.run(self.update_oldpi_op)
# self.first_forward_actor(self.actor_weights, self.actor_weights_, self.critic_weights)
# self.first_forward_critic(self.critic_weights)
adv = self.sess.run(self.advantage, {self.tfs: s, self.tfdc_r: r})
adv = (adv - adv.mean())/(adv.std()+1e-6) # sometimes helpful
# update actor
if METHOD['name'] == 'kl_pen':
for i in range(A_UPDATE_STEPS):
# print('updata: ',i)
_, kl = self.sess.run(
[self.atrain_op, self.kl_mean],
{self.tfs: s, self.tfa: a, self.tfadv: adv, self.tflam: METHOD['lam']})
if kl > 4*METHOD['kl_target']: # this in in google's paper
break
if kl < METHOD['kl_target'] / 1.5: # adaptive lambda, this is in OpenAI's paper
METHOD['lam'] /= 2
elif kl > METHOD['kl_target'] * 1.5:
METHOD['lam'] *= 2
METHOD['lam'] = np.clip(METHOD['lam'], 1e-4, 10) # sometimes explode, this clipping is my solution
else: # clipping method, find this is better (OpenAI's paper)
# [self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv}) for _ in range(A_UPDATE_STEPS)]
# self.tfs = s
# self.tfa = a
# self.tfadv = adv
a_grads_form = tf.gradients(self.aloss, list(self.actor_weights.values()))
# a_grads_value = a_grads_form.eval(feed_dict={self.tfs: s, self.tfa: a, self.tfadv: adv})
a_grads = dict(zip(self.actor_weights.keys(), a_grads_form))
self.new_a_weights = dict(zip(self.actor_weights.keys(), [self.actor_weights[key] - stepsize * a_grads[key]
for key in self.actor_weights.keys()]))
# self.tfa = tf.placeholder(tf.float32, [None, A_DIM], 'action')
# self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
# self.tfs = tf.placeholder(tf.float32, [None, S_DIM], 'state')
# print(self.critic_var)
# print(self.actor_var)
# print([v.name for v in self.critic_var],self.sess.run(self.critic_var))
# update critic
# [self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r}) for _ in range(C_UPDATE_STEPS)]
# self.tfs = s
# self.tfdc_r = r
c_grads_form = tf.gradients(self.closs, list(self.critic_weights.values()))
# c_grads_value = c_grads_form.eval()
c_grads = dict(zip(self.critic_weights.keys(), c_grads_form))
self.new_c_weights = dict(zip(self.critic_weights.keys(), [self.critic_weights[key] - stepsize * c_grads[key]
for key in self.critic_weights.keys()]))
# self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
# self.tfs = tf.placeholder(tf.float32, [None, S_DIM], 'state')
# self.actor_weights = new_a_weights
# self.critic_weights = new_c_weights
# return new_a_weights, new_c_weights
def _build_anet(self, name, trainable, weights):
'''
build the actor network with defined weights
'''
with tf.variable_scope(name):
# l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu, trainable=trainable)
# # l1 = tf.layers.batch_normalization(tf.layers.dense(self.tfs, 100, tf.nn.relu, trainable=trainable), training=True)
# '''the action mean mu is set to be scale 10 instead of 360, avoiding useless shaking and one-step to goal!'''
# mu = 10.*tf.layers.dense(l1, A_DIM, tf.nn.tanh, trainable=trainable)
# sigma = tf.layers.dense(l1, A_DIM, tf.nn.sigmoid, trainable=trainable) # softplus to make it positive
a_l1 = tf.nn.relu(tf.matmul(self.tfs, weights['w1'])+weights['b1'])
mu = 10.*tf.nn.tanh(tf.matmul(a_l1, weights['w2'])+weights['b2'])
sigma = tf.nn.sigmoid(tf.matmul(a_l1, weights['w3'])+weights['b3'])
# in case that sigma is 0
sigma +=1e-4
self.mu=mu
self.sigma=sigma
norm_dist = tf.distributions.Normal(loc=mu, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
def _build_cnet(self, name, weights):
'''
build the critic network with defined weights
'''
with tf.variable_scope(name):
c_l1 = tf.nn.relu(tf.matmul(self.tfs, weights['w1']) + weights['b1'])
output = tf.matmul(c_l1, weights['w2']) + weights['b2']
return output
def choose_action(self, s):
s = s[np.newaxis, :]
# a ,mu, sigma= self.sess.run([self.sample_op, self.mu, self.sigma], {self.tfs: s})
a= self.sess.run(self.sample_op, {self.tfs: s})
# print('s: ',s)
# print('a: ', a)
# print('mu, sigma: ', mu,sigma)
return np.clip(a[0], -360, 360)
def get_v(self, s):
if s.ndim < 2: s = s[np.newaxis, :]
return self.sess.run(self.v, {self.tfs: s})[0, 0]
def save_model(self, ):
actor_weights=self.sess.run(self.actor_var)
critic_weights=self.sess.run(self.critic_var)
return actor_weights, critic_weights
# def value(self, ):
# return self.sess.run(self.actor_weights_before)[-1]
def restore_model(self, a_te, c_te):
# restore the actor
with tf.variable_scope('pi'):
self.restore_pi = [pi.assign(te) for pi, te in zip(self.pi_params, np.array(a_te))]
self.sess.run(self.restore_pi)
self.sess.run(self.update_oldpi_op)
# restore the critic
with tf.variable_scope('critic'):
self.restore_critic =[cri.assign(te) for cri, te in zip(self.critic_params, np.array(c_te))]
self.sess.run(self.restore_critic)
def save(self, path):
saver = tf.train.Saver()
saver.save(self.sess, path)
def load(self, path):
saver=tf.train.Saver()
saver.restore(self.sess, path)
def sample_task():
range_pose=0.3
target_pose=(2*np.random.rand(2)-1)*range_pose + [0.5, 0.5] # around center (0.5,0.5), range 0.3
screen_size=1000
target_pose=target_pose*screen_size
env=Reacher(target_pos=target_pose, render=True)
return env, target_pose
def meta_update(ppo):
train_set_a = []
train_set_s = []
train_set_r = []
for ep in range(TRAIN_INTERVAL):
s = env.reset()
s=s/100. # scale the inputs
buffer_s, buffer_a, buffer_r = [], [], []
ep_r = 0
for t in range(EP_LEN): # steps in one episode
# env.render()
a = ppo.choose_action(s)
s_, r, done = env.step(a)
s_=s_/100.
buffer_s.append(s)
buffer_a.append(a)
# print('r, norm_r: ', r, (r+8)/8)
'''the normalization makes reacher's reward almost same and not work'''
# buffer_r.append((r+8)/8) # normalize reward, find to be useful
buffer_r.append(r)
s = s_
ep_r += r
# update ppo
if ((t+1) % BATCH == 0 or t == EP_LEN-1):
v_s_ = ppo.get_v(s_)
discounted_r = []
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, np.newaxis]
buffer_s, buffer_a, buffer_r = [], [], []
train_set_a.append(ba)
train_set_r.append(br)
train_set_s.append(bs)
# print(bs)
train_set_a = np.array(train_set_a).reshape(-1, len(ba[-1]))
train_set_s = np.array(train_set_s).reshape(-1, len(bs[-1]))
train_set_r = np.array(train_set_r).reshape(-1, len(br[-1]))
ppo.update(train_set_s, train_set_a, train_set_r)
return ppo
ppo = PPO()
ppo.sess.run(tf.global_variables_initializer())
if args.train:
# env=Reacher(render=True)
stepsize0=0.01
test_env, t=sample_task()
itr_test_rewards=[]
for itr in range (ITR):
# randomly sample a task (different target position)
np.random.seed(itr)
env, target_position =sample_task()
print('Task {}: target position {}'.format(itr+1, target_position))
all_ep_r = []
train_set_a = []
train_set_s = []
train_set_r = []
# inner policy update
for ep in range(EP_MAX): # EP_MAX: how many episodes for training one tasks
# print(actor_weights_before[-1])
s = env.reset()
s=s/100. # scale the inputs
buffer_s, buffer_a, buffer_r = [], [], []
ep_r = 0
for t in range(EP_LEN): # steps in one episode
# env.render()
a = ppo.choose_action(s)
s_, r, done = env.step(a)
s_=s_/100.
buffer_s.append(s)
buffer_a.append(a)
# print('r, norm_r: ', r, (r+8)/8)
'''the normalization makes reacher's reward almost same and not work'''
# buffer_r.append((r+8)/8) # normalize reward, find to be useful
buffer_r.append(r)
s = s_
ep_r += r
# store samples in memory
if ((t+1) % BATCH == 0 or t == EP_LEN-1):
v_s_ = ppo.get_v(s_)
discounted_r = []
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, np.newaxis]
buffer_s, buffer_a, buffer_r = [], [], []
train_set_a.append(ba)
train_set_r.append(br)
train_set_s.append(bs)
# ppo.update(bs, ba, br)
if ep%TRAIN_INTERVAL == 0:
train_set_a = np.array(train_set_a).reshape(-1, len(ba[-1]))
train_set_s = np.array(train_set_s).reshape(-1, len(bs[-1]))
train_set_r = np.array(train_set_r).reshape(-1, len(br[-1]))
print('inner policy update begin')
ppo.sum_gradients_update(train_set_s, train_set_a, train_set_r, stepsize0)
print('inner policy update finish')
print('meta policy update begin')
ppo = meta_update(ppo)
print('meta policy update finish')
train_set_a = []
train_set_s = []
train_set_r = []
if ep == 0: all_ep_r.append(ep_r)
else: all_ep_r.append(all_ep_r[-1]*0.9 + ep_r*0.1)
print(
'Ep: %i' % ep,
"|Ep_r: %.2f" % ep_r,
("|Lam: %.4f" % METHOD['lam']) if METHOD['name'] == 'kl_pen' else '',
)
# if ep % 500==0:
# plt.plot(np.arange(len(all_ep_r)), all_ep_r)
# plt.xlabel('Episode');plt.ylabel('Moving averaged episode reward');plt.savefig('./ppo_reptile.png')
# meta policy update, same as inner loop for FOMAML
# stepsize=stepsize0*(1-itr/ITR) # decayed learning rate/step size, so not learn after several steps.
# test 1 episode on test_env
actor_weights_test, critic_weights_test= ppo.save_model() # save the model and restore it after test
all_ep_r = []
print('-------------- TEST --------------- ')
for ep in range(EP_MAX):
s = test_env.reset()
s=s/100. # scale the inputs
buffer_s, buffer_a, buffer_r = [], [], []
ep_r = 0
for t in range(EP_LEN): # in one episode
a = ppo.choose_action(s)
s_, r, done = test_env.step(a)
s_=s_/100.
buffer_s.append(s)
buffer_a.append(a)
# print('r, norm_r: ', r, (r+8)/8)
'''the normalization makes reacher's reward almost same and not work'''
# buffer_r.append((r+8)/8) # normalize reward, find to be useful
buffer_r.append(r)
s = s_
ep_r += r
# update ppo
if (t+1) % BATCH == 0 or t == EP_LEN-1:
v_s_ = ppo.get_v(s_)
discounted_r = []
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, np.newaxis]
buffer_s, buffer_a, buffer_r = [], [], []
ppo.update(bs, ba, br)
all_ep_r.append(ep_r)
# if ep == 0: all_ep_r.append(ep_r)
# else: all_ep_r.append(all_ep_r[-1]*0.9 + ep_r*0.1)
# print(
# 'Ep: %i' % ep,
# "|Ep_r: %i" % ep_r,
# ("|Lam: %.4f" % METHOD['lam']) if METHOD['name'] == 'kl_pen' else '',
# )
# if ep % 500==0:
# plt.plot(np.arange(len(all_ep_r)), all_ep_r)
# plt.xlabel('Episode');plt.ylabel('Moving averaged episode reward');plt.savefig('./ppo_reptile.png')
# restore before test
ppo.restore_model(actor_weights_test, critic_weights_test)
itr_test_rewards.append(np.average(np.array(all_ep_r)))
plt.plot(np.arange(len(itr_test_rewards)), itr_test_rewards)
plt.savefig('./ppo_maml.png')
if itr%10 == 0:
ppo.save(model_path)
if args.test:
stepsize0=0.1
test_env, t=sample_task()
all_ep_r = []
ppo.load(model_path)
print('-------------- TEST --------------- ')
for ep in range(EP_MAX):
print('Episode: ', ep)
s = test_env.reset()
s=s/100. # scale the inputs
buffer_s, buffer_a, buffer_r = [], [], []
ep_r = 0
for t in range(EP_LEN): # in one episode
a = ppo.choose_action(s)
s_, r, done = test_env.step(a)
s_=s_/100.
buffer_s.append(s)
buffer_a.append(a)
# print('r, norm_r: ', r, (r+8)/8)
'''the normalization makes reacher's reward almost same and not work'''
# buffer_r.append((r+8)/8) # normalize reward, find to be useful
buffer_r.append(r)
s = s_
ep_r += r
# update ppo
if (t+1) % BATCH == 0 or t == EP_LEN-1:
v_s_ = ppo.get_v(s_)
discounted_r = []
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, np.newaxis]
buffer_s, buffer_a, buffer_r = [], [], []
ppo.update(bs, ba, br)
all_ep_r.append(ep_r)
# if ep == 0: all_ep_r.append(ep_r)
# else: all_ep_r.append(all_ep_r[-1]*0.9 + ep_r*0.1)
# print(
# 'Ep: %i' % ep,
# "|Ep_r: %i" % ep_r,
# ("|Lam: %.4f" % METHOD['lam']) if METHOD['name'] == 'kl_pen' else '',
# )
# if ep % 500==0:
# plt.plot(np.arange(len(all_ep_r)), all_ep_r)
# plt.xlabel('Episode');plt.ylabel('Moving averaged episode reward');plt.savefig('./ppo_reptile.png')
# restore before test
plt.plot(np.arange(len(all_ep_r)), all_ep_r)
plt.savefig('./ppo_maml_test.png') | [
"numpy.random.seed",
"argparse.ArgumentParser",
"tensorflow.trainable_variables",
"tensorflow.clip_by_value",
"tensorflow.get_collection",
"numpy.clip",
"tensorflow.matmul",
"tensorflow.truncated_normal",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"env_2.Reacher_for2",
"tensorflow.... | [((774, 860), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train or test neural net motor controller."""'}), "(description=\n 'Train or test neural net motor controller.')\n", (797, 860), False, 'import argparse\n'), ((18078, 18122), 'env_2.Reacher_for2', 'Reacher', ([], {'target_pos': 'target_pose', 'render': '(True)'}), '(target_pos=target_pose, render=True)\n', (18085, 18122), True, 'from env_2 import Reacher_for2 as Reacher\n'), ((19855, 19888), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (19886, 19888), True, 'import tensorflow as tf\n'), ((27715, 27749), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./ppo_maml_test.png"""'], {}), "('./ppo_maml_test.png')\n", (27726, 27749), True, 'import matplotlib.pyplot as plt\n'), ((1465, 1477), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1475, 1477), True, 'import tensorflow as tf\n'), ((1497, 1547), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, S_DIM]', '"""state"""'], {}), "(tf.float32, [None, S_DIM], 'state')\n", (1511, 1547), True, 'import tensorflow as tf\n'), ((2750, 2796), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""log/"""', 'self.sess.graph'], {}), "('log/', self.sess.graph)\n", (2771, 2796), True, 'import tensorflow as tf\n'), ((3426, 3490), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""critic"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='critic')\n", (3443, 3490), True, 'import tensorflow as tf\n'), ((4122, 4186), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""critic"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='critic')\n", (4139, 4186), True, 'import tensorflow as tf\n'), ((15961, 16021), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'name'}), '(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)\n', (15978, 16021), True, 'import tensorflow as tf\n'), ((16713, 16737), 'numpy.clip', 'np.clip', (['a[0]', '(-360)', '(360)'], {}), '(a[0], -360, 360)\n', (16720, 16737), True, 'import numpy as np\n'), ((17691, 17707), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (17705, 17707), True, 'import tensorflow as tf\n'), ((17793, 17809), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (17807, 17809), True, 'import tensorflow as tf\n'), ((20107, 20126), 'numpy.random.seed', 'np.random.seed', (['itr'], {}), '(itr)\n', (20121, 20126), True, 'import numpy as np\n'), ((25663, 25692), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./ppo_maml.png"""'], {}), "('./ppo_maml.png')\n", (25674, 25692), True, 'import matplotlib.pyplot as plt\n'), ((1692, 1717), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""adam"""'], {}), "('adam')\n", (1709, 1717), True, 'import tensorflow as tf\n'), ((1754, 1782), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['A_LR'], {}), '(A_LR)\n', (1776, 1782), True, 'import tensorflow as tf\n'), ((1819, 1847), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['C_LR'], {}), '(C_LR)\n', (1841, 1847), True, 'import tensorflow as tf\n'), ((1862, 1917), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""actor_inputs/"""'], {'reuse': 'tf.AUTO_REUSE'}), "('actor_inputs/', reuse=tf.AUTO_REUSE)\n", (1879, 1917), True, 'import tensorflow as tf\n'), ((1977, 2028), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, A_DIM]', '"""action"""'], {}), "(tf.float32, [None, A_DIM], 'action')\n", (1991, 2028), True, 'import tensorflow as tf\n'), ((2054, 2104), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]', '"""advantage"""'], {}), "(tf.float32, [None, 1], 'advantage')\n", (2068, 2104), True, 'import tensorflow as tf\n'), ((2130, 2172), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'None', '"""lambda"""'], {}), "(tf.float32, None, 'lambda')\n", (2144, 2172), True, 'import tensorflow as tf\n'), ((2188, 2244), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""critic_inputs/"""'], {'reuse': 'tf.AUTO_REUSE'}), "('critic_inputs/', reuse=tf.AUTO_REUSE)\n", (2205, 2244), True, 'import tensorflow as tf\n'), ((2282, 2335), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]', '"""discounted_r"""'], {}), "(tf.float32, [None, 1], 'discounted_r')\n", (2296, 2335), True, 'import tensorflow as tf\n'), ((2997, 3024), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""critic"""'], {}), "('critic')\n", (3014, 3024), True, 'import tensorflow as tf\n'), ((3685, 3712), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""critic"""'], {}), "('critic')\n", (3702, 3712), True, 'import tensorflow as tf\n'), ((4615, 4649), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sample_action"""'], {}), "('sample_action')\n", (4632, 4649), True, 'import tensorflow as tf\n'), ((4755, 4788), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""update_oldpi"""'], {}), "('update_oldpi')\n", (4772, 4788), True, 'import tensorflow as tf\n'), ((4905, 4930), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), "('loss')\n", (4922, 4930), True, 'import tensorflow as tf\n'), ((5784, 5811), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""atrain"""'], {}), "('atrain')\n", (5801, 5811), True, 'import tensorflow as tf\n'), ((6308, 6342), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sample_action"""'], {}), "('sample_action')\n", (6325, 6342), True, 'import tensorflow as tf\n'), ((6450, 6483), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""update_oldpi"""'], {}), "('update_oldpi')\n", (6467, 6483), True, 'import tensorflow as tf\n'), ((6603, 6628), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), "('loss')\n", (6620, 6628), True, 'import tensorflow as tf\n'), ((7572, 7599), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""atrain"""'], {}), "('atrain')\n", (7589, 7599), True, 'import tensorflow as tf\n'), ((7830, 7857), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""critic"""'], {}), "('critic')\n", (7847, 7857), True, 'import tensorflow as tf\n'), ((8275, 8298), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""pi"""'], {}), "('pi')\n", (8292, 8298), True, 'import tensorflow as tf\n'), ((9029, 9055), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""oldpi"""'], {}), "('oldpi')\n", (9046, 9055), True, 'import tensorflow as tf\n'), ((11049, 11083), 'numpy.clip', 'np.clip', (["METHOD['lam']", '(0.0001)', '(10)'], {}), "(METHOD['lam'], 0.0001, 10)\n", (11056, 11083), True, 'import numpy as np\n'), ((12753, 12787), 'numpy.clip', 'np.clip', (["METHOD['lam']", '(0.0001)', '(10)'], {}), "(METHOD['lam'], 0.0001, 10)\n", (12760, 12787), True, 'import numpy as np\n'), ((14943, 14966), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (14960, 14966), True, 'import tensorflow as tf\n'), ((15898, 15942), 'tensorflow.distributions.Normal', 'tf.distributions.Normal', ([], {'loc': 'mu', 'scale': 'sigma'}), '(loc=mu, scale=sigma)\n', (15921, 15942), True, 'import tensorflow as tf\n'), ((16189, 16212), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (16206, 16212), True, 'import tensorflow as tf\n'), ((17214, 17237), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""pi"""'], {}), "('pi')\n", (17231, 17237), True, 'import tensorflow as tf\n'), ((17463, 17490), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""critic"""'], {}), "('critic')\n", (17480, 17490), True, 'import tensorflow as tf\n'), ((19582, 19603), 'numpy.array', 'np.array', (['train_set_a'], {}), '(train_set_a)\n', (19590, 19603), True, 'import numpy as np\n'), ((19647, 19668), 'numpy.array', 'np.array', (['train_set_s'], {}), '(train_set_s)\n', (19655, 19668), True, 'import numpy as np\n'), ((19712, 19733), 'numpy.array', 'np.array', (['train_set_r'], {}), '(train_set_r)\n', (19720, 19733), True, 'import numpy as np\n'), ((2583, 2607), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (2605, 2607), True, 'import tensorflow as tf\n'), ((2678, 2702), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (2700, 2702), True, 'import tensorflow as tf\n'), ((3298, 3323), 'tensorflow.square', 'tf.square', (['self.advantage'], {}), '(self.advantage)\n', (3307, 3323), True, 'import tensorflow as tf\n'), ((3990, 4016), 'tensorflow.square', 'tf.square', (['self.advantage_'], {}), '(self.advantage_)\n', (3999, 4016), True, 'import tensorflow as tf\n'), ((4949, 4979), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""surrogate"""'], {}), "('surrogate')\n", (4966, 4979), True, 'import tensorflow as tf\n'), ((5240, 5286), 'tensorflow.distributions.kl_divergence', 'tf.distributions.kl_divergence', (['oldpi', 'self.pi'], {}), '(oldpi, self.pi)\n', (5270, 5286), True, 'import tensorflow as tf\n'), ((5318, 5336), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['kl'], {}), '(kl)\n', (5332, 5336), True, 'import tensorflow as tf\n'), ((6647, 6677), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""surrogate"""'], {}), "('surrogate')\n", (6664, 6677), True, 'import tensorflow as tf\n'), ((7018, 7066), 'tensorflow.distributions.kl_divergence', 'tf.distributions.kl_divergence', (['oldpi_', 'self.pi_'], {}), '(oldpi_, self.pi_)\n', (7048, 7066), True, 'import tensorflow as tf\n'), ((7099, 7118), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['kl_'], {}), '(kl_)\n', (7113, 7118), True, 'import tensorflow as tf\n'), ((7911, 7962), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[S_DIM, c_hidden1]'], {'stddev': '(0.1)'}), '([S_DIM, c_hidden1], stddev=0.1)\n', (7930, 7962), True, 'import tensorflow as tf\n'), ((8018, 8062), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[c_hidden1]'], {'stddev': '(0.1)'}), '([c_hidden1], stddev=0.1)\n', (8037, 8062), True, 'import tensorflow as tf\n'), ((8118, 8165), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[c_hidden1, 1]'], {'stddev': '(0.1)'}), '([c_hidden1, 1], stddev=0.1)\n', (8137, 8165), True, 'import tensorflow as tf\n'), ((8221, 8257), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[1]'], {'stddev': '(0.1)'}), '([1], stddev=0.1)\n', (8240, 8257), True, 'import tensorflow as tf\n'), ((8351, 8402), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[S_DIM, a_hidden1]'], {'stddev': '(0.1)'}), '([S_DIM, a_hidden1], stddev=0.1)\n', (8370, 8402), True, 'import tensorflow as tf\n'), ((8475, 8519), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[a_hidden1]'], {'stddev': '(0.1)'}), '([a_hidden1], stddev=0.1)\n', (8494, 8519), True, 'import tensorflow as tf\n'), ((8592, 8643), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[a_hidden1, A_DIM]'], {'stddev': '(0.1)'}), '([a_hidden1, A_DIM], stddev=0.1)\n', (8611, 8643), True, 'import tensorflow as tf\n'), ((8716, 8756), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[A_DIM]'], {'stddev': '(0.1)'}), '([A_DIM], stddev=0.1)\n', (8735, 8756), True, 'import tensorflow as tf\n'), ((8829, 8880), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[a_hidden1, A_DIM]'], {'stddev': '(0.1)'}), '([a_hidden1, A_DIM], stddev=0.1)\n', (8848, 8880), True, 'import tensorflow as tf\n'), ((8953, 8993), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[A_DIM]'], {'stddev': '(0.1)'}), '([A_DIM], stddev=0.1)\n', (8972, 8993), True, 'import tensorflow as tf\n'), ((9109, 9160), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[S_DIM, a_hidden1]'], {'stddev': '(0.1)'}), '([S_DIM, a_hidden1], stddev=0.1)\n', (9128, 9160), True, 'import tensorflow as tf\n'), ((9235, 9279), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[a_hidden1]'], {'stddev': '(0.1)'}), '([a_hidden1], stddev=0.1)\n', (9254, 9279), True, 'import tensorflow as tf\n'), ((9354, 9405), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[a_hidden1, A_DIM]'], {'stddev': '(0.1)'}), '([a_hidden1, A_DIM], stddev=0.1)\n', (9373, 9405), True, 'import tensorflow as tf\n'), ((9480, 9520), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[A_DIM]'], {'stddev': '(0.1)'}), '([A_DIM], stddev=0.1)\n', (9499, 9520), True, 'import tensorflow as tf\n'), ((9595, 9646), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[a_hidden1, A_DIM]'], {'stddev': '(0.1)'}), '([a_hidden1, A_DIM], stddev=0.1)\n', (9614, 9646), True, 'import tensorflow as tf\n'), ((9721, 9761), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[A_DIM]'], {'stddev': '(0.1)'}), '([A_DIM], stddev=0.1)\n', (9740, 9761), True, 'import tensorflow as tf\n'), ((16317, 16347), 'tensorflow.matmul', 'tf.matmul', (['c_l1', "weights['w2']"], {}), "(c_l1, weights['w2'])\n", (16326, 16347), True, 'import tensorflow as tf\n'), ((25565, 25583), 'numpy.array', 'np.array', (['all_ep_r'], {}), '(all_ep_r)\n', (25573, 25583), True, 'import numpy as np\n'), ((5368, 5406), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(surr - self.tflam * kl)'], {}), '(surr - self.tflam * kl)\n', (5382, 5406), True, 'import tensorflow as tf\n'), ((7151, 7191), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(surr_ - self.tflam * kl_)'], {}), '(surr_ - self.tflam * kl_)\n', (7165, 7191), True, 'import tensorflow as tf\n'), ((15550, 15584), 'tensorflow.matmul', 'tf.matmul', (['self.tfs', "weights['w1']"], {}), "(self.tfs, weights['w1'])\n", (15559, 15584), True, 'import tensorflow as tf\n'), ((15712, 15742), 'tensorflow.matmul', 'tf.matmul', (['a_l1', "weights['w3']"], {}), "(a_l1, weights['w3'])\n", (15721, 15742), True, 'import tensorflow as tf\n'), ((16244, 16278), 'tensorflow.matmul', 'tf.matmul', (['self.tfs', "weights['w1']"], {}), "(self.tfs, weights['w1'])\n", (16253, 16278), True, 'import tensorflow as tf\n'), ((17921, 17938), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (17935, 17938), True, 'import numpy as np\n'), ((19281, 19300), 'numpy.vstack', 'np.vstack', (['buffer_s'], {}), '(buffer_s)\n', (19290, 19300), True, 'import numpy as np\n'), ((19302, 19321), 'numpy.vstack', 'np.vstack', (['buffer_a'], {}), '(buffer_a)\n', (19311, 19321), True, 'import numpy as np\n'), ((26948, 26967), 'numpy.vstack', 'np.vstack', (['buffer_s'], {}), '(buffer_s)\n', (26957, 26967), True, 'import numpy as np\n'), ((26969, 26988), 'numpy.vstack', 'np.vstack', (['buffer_a'], {}), '(buffer_a)\n', (26978, 26988), True, 'import numpy as np\n'), ((15632, 15662), 'tensorflow.matmul', 'tf.matmul', (['a_l1', "weights['w2']"], {}), "(a_l1, weights['w2'])\n", (15641, 15662), True, 'import tensorflow as tf\n'), ((17318, 17332), 'numpy.array', 'np.array', (['a_te'], {}), '(a_te)\n', (17326, 17332), True, 'import numpy as np\n'), ((17580, 17594), 'numpy.array', 'np.array', (['c_te'], {}), '(c_te)\n', (17588, 17594), True, 'import numpy as np\n'), ((19323, 19345), 'numpy.array', 'np.array', (['discounted_r'], {}), '(discounted_r)\n', (19331, 19345), True, 'import numpy as np\n'), ((21629, 21648), 'numpy.vstack', 'np.vstack', (['buffer_s'], {}), '(buffer_s)\n', (21638, 21648), True, 'import numpy as np\n'), ((21650, 21669), 'numpy.vstack', 'np.vstack', (['buffer_a'], {}), '(buffer_a)\n', (21659, 21669), True, 'import numpy as np\n'), ((22016, 22037), 'numpy.array', 'np.array', (['train_set_a'], {}), '(train_set_a)\n', (22024, 22037), True, 'import numpy as np\n'), ((22093, 22114), 'numpy.array', 'np.array', (['train_set_s'], {}), '(train_set_s)\n', (22101, 22114), True, 'import numpy as np\n'), ((22170, 22191), 'numpy.array', 'np.array', (['train_set_r'], {}), '(train_set_r)\n', (22178, 22191), True, 'import numpy as np\n'), ((24686, 24705), 'numpy.vstack', 'np.vstack', (['buffer_s'], {}), '(buffer_s)\n', (24695, 24705), True, 'import numpy as np\n'), ((24707, 24726), 'numpy.vstack', 'np.vstack', (['buffer_a'], {}), '(buffer_a)\n', (24716, 24726), True, 'import numpy as np\n'), ((26990, 27012), 'numpy.array', 'np.array', (['discounted_r'], {}), '(discounted_r)\n', (26998, 27012), True, 'import numpy as np\n'), ((21671, 21693), 'numpy.array', 'np.array', (['discounted_r'], {}), '(discounted_r)\n', (21679, 21693), True, 'import numpy as np\n'), ((24728, 24750), 'numpy.array', 'np.array', (['discounted_r'], {}), '(discounted_r)\n', (24736, 24750), True, 'import numpy as np\n'), ((5570, 5643), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['ratio', "(1.0 - METHOD['epsilon'])", "(1.0 + METHOD['epsilon'])"], {}), "(ratio, 1.0 - METHOD['epsilon'], 1.0 + METHOD['epsilon'])\n", (5586, 5643), True, 'import tensorflow as tf\n'), ((7357, 7431), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['ratio_', "(1.0 - METHOD['epsilon'])", "(1.0 + METHOD['epsilon'])"], {}), "(ratio_, 1.0 - METHOD['epsilon'], 1.0 + METHOD['epsilon'])\n", (7373, 7431), True, 'import tensorflow as tf\n')] |
#<NAME>
#<EMAIL>
import pandas as pd
import numpy as np
'''The code is divided by question, answering five questions:
1- Do you want to see rows of city data
2- What are the most popular hours, months, or days (after filtering according to the user's desire)?
3- What are most popular station and trips
4- What are total and average trip duration?
5- What is the user type, gender, and birth years, according to availability '''
cont = 'y'
#A while loop to repeat indefinitely
while cont == 'y':
#The user chooses from menu
menu = input('Please, choose the desired city:\n1- Chicago, 2- New York City, 3- Washington: ')
#This function deals with invalid inputs since it is repeated a lot in the code
def input_error(valid_list, variable, valid_input):
'''Takes the input and tests if it is correct or mistaken, and
redefines the input variable giving a message to user to give
the correct inputs, then returns the variable
3 Args
1- valid_list: a list with the valid possible input
2- variable: the tested variable
3- valid_input: a string containing the accepted inputs '''
while variable not in valid_list:
variable = input('Error! Enter ' + valid_input + ': ')
return variable
#Checking erroneous input
menu = input_error(['1', '2', '3'], menu, 'a valid integer <1-3>')
#Using simple integer instead of having to enter the city name
CITY_DATA = {'1': 'chicago.csv', '2': 'new_york_city.csv', '3': 'washington.csv'}
#Loading data into the dataframe
city = pd.read_csv(CITY_DATA[menu])
#Question (Q)1 starts
#Asking user if they wish to display five rows
display = input('Would you like to check five rows of the data? y/n: ')
#Checking erroneous input
display = input_error(['y', 'n'], display.lower(), 'y/n')
#This loop continues displaying data rows as user desires
for row in range(0, len(city), 5):
if display == 'y':
print('\nData rows:\n', city.loc[row:row+4])
#Although highly improbable, in case the user continues until displaying all rows
#-6 since rows start from zero
if row == len(city)-6:
print(city.loc[row:row+4])
print('You have been shown all the data rows')
break
#Asking user if they want to display more rows
display = input('Continue displaying the next five rows? y/n: ')
#Checking erroneous input
display = input_error(['y', 'n'], display.lower(), 'y/n')
#Q1 ends
#Q2 Starts
#Popular times
#Asking user if they want to filter
filter = input('Filter start time data? y/n: ')
filter = input_error(['y', 'n'], filter.lower(), 'y/n')
#These two functions help with the next section
#A function to create month, day, and hour columns since this occurs a lot
def time_columns(column):
#From Start time, month, day, and hour extracted
column = pd.to_datetime(column)
month = column.dt.month
day = column.dt.weekday_name
hour = column.dt.hour
return month, day, hour
#Since it repeats twice, a function to return day list and dictionary
def day_processing():
day_list = ['1- Saturday', '2- Sunday',
'3- Monday', '4- Tuesday',
'5- Wednesday', '6- Thursday',
'7- Friday']
#Creating a dictionary to correspond input integer with day name
day_dic = {'1': 'Saturday', '2': 'Sunday',
'3': 'Monday', '4': 'Tuesday',
'5': 'Wednesday', '6': 'Thursday',
'7': 'Friday'}
return day_list, day_dic
#Filter is chosen?
if filter == 'y':
#Filter by month?
month_filter = input('Would you like to filter by month? y/n: ')
month_filter = input_error(['y', 'n'], month_filter.lower(), 'y/n')
#Filter by month is chosen
if month_filter == 'y':
month_list = ['1- January', '2- February',
'3- March', '4- April',
'5- May', '6- June']
#Choose a month from list
month_choose = input('Choose the month {}: '.format(month_list))
#Using numpy for easier creating of the string list of integers
month_choose = input_error([str(num) for num in np.arange(1, 7)], month_choose,
'valid integer <1-6>')
#Creating required columns using the function
city['month'], city['day'], city['hour'] = time_columns(city['Start Time'])
city = city[city['month'] == int(month_choose)]
#Further filter by day?
day_filter = input('Would you like to filter by day? y/n: ')
day_filter = input_error(['y', 'n'], day_filter.lower(), 'y/n')
if day_filter == 'y':
#Using the function to create the needed list and dictionary
day_list, day_dic = day_processing()
#The day choice input 1-7
day_choose = input('Choose the day {}: '.format(day_list))
day_choose = input_error([str(num) for num in np.arange(1, 8)], day_choose,
'valid integer <1-7>')
#Changing dataframe to contain only chosen day along with chosen month
city = city[city['day'] == day_dic[day_choose]]
#Most poular hour
#Dropping NaN
city['hour'].dropna()
print('\nMost popular hour in chosen month and day:\n', city['hour'].mode()[0])
#No day filter
elif day_filter == 'n':
#Displaying day and hour
#Dropping NaN
city['day'].dropna()
city['hour'].dropna()
print('\nMost popular day in chosen month:\n', city['day'].mode()[0])
print('\nMost popular hour in chosen month:\n', city['hour'].mode()[0])
#No month filter
elif month_filter == 'n':
#Ask about day filter
day_filter = input('Would you like to filter by day? y/n: ')
day_filter = input_error(['y', 'n'], day_filter.lower(), 'y/n')
#Day filter chosen
if day_filter == 'y':
#Using the function to create the needed list and dictionary
day_list, day_dic = day_processing()
#The day choice input 1-7
day_choose = input('Choose the day {}: '.format(day_list))
day_choose = input_error([str(num) for num in np.arange(1, 8)], day_choose,
'valid integer <1-7>')
#Creating required columns using the function
city['month'], city['day'], city['hour'] = time_columns(city['Start Time'])
city = city[city['day'] == day_dic[day_choose]]
city['month'].dropna()
city['hour'].dropna()
#Printed if no day filtering
print('\nMost popular month containing chosen day:\n', city['month'].mode()[0])
print('\nMost popular hour in chosen day:\n', city['hour'].mode()[0])
#This part is executed if no filter is chosen
elif filter == 'n':
#Dropping NaN
city['Start Time'].dropna()
#Creating required columns for each time format
city['month'], city['day'], city['hour'] = time_columns(city['Start Time'])
#Printing most popular day, month and hour
print('\nMost popular month:\n', city['month'].mode()[0])
print('\nMost popular day:\n', city['day'].mode()[0])
print('\nMost popular hour:\n', city['hour'].mode()[0])
#Q2 ends
#Q3 starts
#Stations and trips
#Checking for NaN
city['Start Station'].fillna('Unspecified Station')
city['End Station'].fillna('Unspecified Station')
#Creating a new column for trips
city['Trip'] = city['Start Station'] + ' To ' + city['End Station']
#Disaplaying most popular start, end, and trip
print('\nMost popular start station(s):\n', city['Start Station'].mode()[0])
print('\nMost popular end station(s):\n', city['End Station'].mode()[0])
print('\nMost popular trip(s):\n', city['Trip'].mode()[0])
#Q3 ends
#Q4 starts
#Trip duration
#Check for any NaN values and replace
city['Trip Duration'].fillna(0)
#Store the total travel time in a variable for easiness of use in format function
total = city['Trip Duration'].sum()
#Total time is very large in seconds, so it is useful to turn into minutes and hours
print('\nTotal traveling time is {} seconds,i.e {} minutes, i.e {} hours.'.format(total, total/60, total/3600))
#Average time is easy to read, so left in seconds
print('Average traveling time is {} seconds'.format(city['Trip Duration'].mean()))
#Q4 ends
#Q5 starts
#User info
#Check for any NaN values and replace
city['User Type'].fillna('Unspecified')
#Display each user type count
print('\nUser type counts:\n', city['User Type'].value_counts())
#Displaying gender and birth years info for chicago and NYC
if menu in ['1', '2']:
#Check for NaN
city['Gender'].fillna('Unspecified')
#Display each gender count
print('\nGender counts:\n', city['Gender'].value_counts())
#Birth years
#Dropping NaN
city['Birth Year'].dropna(axis = 0)
#Showing most common, earliest, and latest birth years
print('\nMost common birth year(s):\n', city['Birth Year'].mode()[0])
print('Earliest birth year:\n', min(city['Birth Year']))
print('Most recent birth year:\n', max(city['Birth Year']))
#Q5 ends
#The user is asked if they wish to continue:
cont = input('Continue? y/n: ')
cont = input_error(['y', 'n'], cont.lower(), 'y/n')
| [
"pandas.read_csv",
"pandas.to_datetime",
"numpy.arange"
] | [((1647, 1675), 'pandas.read_csv', 'pd.read_csv', (['CITY_DATA[menu]'], {}), '(CITY_DATA[menu])\n', (1658, 1675), True, 'import pandas as pd\n'), ((3120, 3142), 'pandas.to_datetime', 'pd.to_datetime', (['column'], {}), '(column)\n', (3134, 3142), True, 'import pandas as pd\n'), ((4683, 4698), 'numpy.arange', 'np.arange', (['(1)', '(7)'], {}), '(1, 7)\n', (4692, 4698), True, 'import numpy as np\n'), ((5526, 5541), 'numpy.arange', 'np.arange', (['(1)', '(8)'], {}), '(1, 8)\n', (5535, 5541), True, 'import numpy as np\n'), ((7013, 7028), 'numpy.arange', 'np.arange', (['(1)', '(8)'], {}), '(1, 8)\n', (7022, 7028), True, 'import numpy as np\n')] |
from unittest import TestCase
import numpy as np
from mock import patch
from btb.hyper_parameter import HyperParameter, ParamTypes
from btb.tuning.uniform import Uniform
class TestUniform(TestCase):
# METHOD: predict(self):
# VALIDATE:
# * np.random.rand is called with the right values
@patch('btb.tuning.uniform.np.random')
def test_predict(self, np_random_mock):
# Set-up
np_random_mock.rand.return_value = np.array([.4, .6, .8])
tunables = (
('a_float_param', HyperParameter(ParamTypes.FLOAT, [1., 2.])),
('an_int_param', HyperParameter(ParamTypes.INT, [1, 5])),
)
tuner = Uniform(tunables)
# Run
x = np.array([
[1., 1],
[1.2, 2],
[1.4, 3],
])
predicted = tuner.predict(x)
# Assert
expected = np.array([.4, .6, .8])
np.testing.assert_array_equal(predicted, expected)
np_random_mock.rand.assert_called_once_with(3, 1)
| [
"numpy.testing.assert_array_equal",
"mock.patch",
"btb.tuning.uniform.Uniform",
"numpy.array",
"btb.hyper_parameter.HyperParameter"
] | [((314, 351), 'mock.patch', 'patch', (['"""btb.tuning.uniform.np.random"""'], {}), "('btb.tuning.uniform.np.random')\n", (319, 351), False, 'from mock import patch\n'), ((457, 482), 'numpy.array', 'np.array', (['[0.4, 0.6, 0.8]'], {}), '([0.4, 0.6, 0.8])\n', (465, 482), True, 'import numpy as np\n'), ((673, 690), 'btb.tuning.uniform.Uniform', 'Uniform', (['tunables'], {}), '(tunables)\n', (680, 690), False, 'from btb.tuning.uniform import Uniform\n'), ((718, 758), 'numpy.array', 'np.array', (['[[1.0, 1], [1.2, 2], [1.4, 3]]'], {}), '([[1.0, 1], [1.2, 2], [1.4, 3]])\n', (726, 758), True, 'import numpy as np\n'), ((879, 904), 'numpy.array', 'np.array', (['[0.4, 0.6, 0.8]'], {}), '([0.4, 0.6, 0.8])\n', (887, 904), True, 'import numpy as np\n'), ((911, 961), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['predicted', 'expected'], {}), '(predicted, expected)\n', (940, 961), True, 'import numpy as np\n'), ((532, 576), 'btb.hyper_parameter.HyperParameter', 'HyperParameter', (['ParamTypes.FLOAT', '[1.0, 2.0]'], {}), '(ParamTypes.FLOAT, [1.0, 2.0])\n', (546, 576), False, 'from btb.hyper_parameter import HyperParameter, ParamTypes\n'), ((606, 644), 'btb.hyper_parameter.HyperParameter', 'HyperParameter', (['ParamTypes.INT', '[1, 5]'], {}), '(ParamTypes.INT, [1, 5])\n', (620, 644), False, 'from btb.hyper_parameter import HyperParameter, ParamTypes\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-arguments
"""
Plotting functions
========================================================
**Module name:** :mod:`qmlt.numerical.plot`
.. currentmodule:: qmlt.numerical.plot
.. codeauthor:: <NAME> <<EMAIL>>
This module contains the functions required to plot the parameters
of the numeric learner.
These are auxillary functions, it is recommended you instead use the
plotting method available in the numeric learner, which will provide
live plots of the training progress and monitored parameters. This can be
turned on by passing the ``plot`` key to the hyperparameters dictionary.
For example,
>>> hyperparams = {'circuit': circuit,
'log_every': 10,
'plot': True
}
Here, the integer value of ``log_every`` specifies at how many global steps
the live plots should be updated. When the training is complete, the terminal
will show the message
.. code-block:: console
Training complete. Close the live plot window to exit.
To use auxillary plotting functions on a logfile:
>>> from qmlt.numerical import plot
>>> plot.plot_parameter(numerical, y='loss')
You can also chain together plots by passing through the returned
axes, to display multiple parameters on one plot:
>>> ax = plot.plot_parameter(numerical, y='loss')
>>> ax = plot.plot_parameter(numerical, y='cost', ax=ax)
>>> ax = plot.plot_parameter(numerical, y='regul', ax=ax,
... legend=True, save_filename="test.png")
Finally, you can also automatically plot all parameters against the global
step, on one figure as multiple subplots:
>>> plot.plot_all("numerical/logsNUM/log.csv")
Plotting functions
------------------
.. autosummary::
plot_parameter
plot_all
Auxillary functions
-------------------
.. autosummary::
_squareish
_plot
Code details
------------
"""
import os
from itertools import zip_longest
import numpy as np
try:
import matplotlib as mpl
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
from matplotlib import pyplot as plt
except:
raise ImportError("To use the plotting functions, matplotlib must be installed")
def _squareish(n):
"""Factors an integer to two integers that closesly approximates a square
Args:
n (int): integer to factor.
Returns:
tuple(int, int): the squareish integers.
"""
if n == 1:
return (1, 1)
if n == 2:
return (2, 1)
nsqrt = np.ceil(np.sqrt(n))
solution = False
x = int(nsqrt)
while not solution:
y = int(n/x)
if y * x == float(n):
solution = True
else:
x -= 1
if n > 1:
if x == 1 or y == 1:
x = 3
y = int(np.ceil(n/3))
return x, y
def _plot(x, y, ax=None, xlabel=None, ylabel=None, **plot_kw):
r"""Produces a line plot visualizing settings, training progress, or monitored parameters.
Args:
x (array): the data to plot on the x-axis.
y (array): the data to plot on the y-axis.
xlabel (str): the x-axis label.
ylabel (str): the y-axis label.
**plot_kw: additional keyword arguments to be passed to ``plt.plot``.
Returns:
axes: returns a tuple containing the figure and axes.
"""
if ax is None:
ax = plt.gca()
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_title(ylabel)
ax.plot(x, y, **plot_kw)
return ax
def plot_parameter(logfile, x='global_step', y='loss', save_filename=None,
ax=None, legend=False, style='ggplot', legend_kw=None,
fig_kw=None, savefig_kw=None, **plot_kw): # pragma: no cover
r"""Produces a line plot visualizing settings, training progress, or monitored parameters.
Args:
logfile (str): the location of the logfile containing the training progress
and parameter values for each global step.
x (str): the parameter to plot on the x-axis. By default the global step.
y (str): the parameter to plot on the y-axis. By default the loss.
save_filename (str): string containing the output image filename, including
all relevant paths and file extensions. All image filetypes supported
by matplotlib are supported here. By default, the plot is *not* saved.
ax (matplotlib.axes.Axes): a matplotlib axes object. If none is provided,
this is created automatically.
legend (bool): If True, a legend is added containing the y parameter names.
style (str): a supported matplotlib style sheet. To see the available
styles on your system, please refer to the output of
``matplotlib.pyplot.style.available``.
legend_kw (dict): dictionary of additional matplotlib keyword arguments
to apss to ``matplotlib.pyplot.legend``.
fig_kw (dict): dictionary of additional matplotlib keyword arguments
to apss to ``matplotlib.figure.Figure``.
savefig_kw (dict): dictionary of additional matplotlib keyword arguments
to apss to ``matplotlib.figure.Figure.savefig``.
**plot_kw: additional keyword arguments to be passed to ``matplotlib.pyplot.plot``.
Returns:
matplotlib.axes.Axes: returns the plotting axes.
"""
# pragma: no cover
if fig_kw is None:
fig_kw = {'figsize': (12, 8)}
if savefig_kw is None:
savefig_kw = {}
if legend_kw is None:
legend_kw = {}
data = np.genfromtxt(logfile, dtype=float, delimiter=',', names=True)
params = data.dtype.names
if x not in params:
raise ValueError("The x-axis parameter {} does not exist.".format(x))
if y not in params:
raise ValueError("The y-axis parameter {} does not exist.".format(y))
plt.style.use(style)
if ax is None:
_, ax = plt.subplots(1, 1, squeeze=True, **fig_kw)
if legend:
ax = _plot(data[x], data[y], label=y, ylabel="", ax=ax, **plot_kw)
plt.legend()
else:
ax = _plot(data[x], data[y], label=y, ylabel=y, xlabel=x, ax=ax, **plot_kw)
if save_filename is not None:
plt.savefig(save_filename, **savefig_kw)
return ax
def plot_all(logfile, x='global_step', y=None, save_filename=None,
figax=None, style='ggplot', fig_kw=None, savefig_kw=None, **plot_kw): # pragma: no cover
r"""Produces a figure containing line plots visualizing settings,
training progress, or monitored parameters.
Args:
filename (str): string containing the output image filename, including
all relevant paths and file extensions. All image filetypes supported
by matplotlib are supported here.
logfile (str): the location of the logfile containing the training progress
and parameter values for each global step.
x (str): the parameter to plot on the x-axes. By default the global step.
y Sequence[str]: the parameters to plot on the figure. By default, all will be plotted.
save_filename (str): string containing the output image filename, including
all relevant paths and file extensions. All image filetypes supported
by matplotlib are supported here. By default, the plot is *not* saved.
figax (tuple): a tuple containing the figure and the plotting axes. Created
by default if not provided.
style (str): a supported matplotlib style sheet. To see the available
styles on your system, please refer to the output of
``matplotlib.pyplot.style.available``.
fig_kw (dict): dictionary of additional matplotlib keyword arguments
to apss to ``matplotlib.figure.Figure``.
savefig_kw (dict): dictionary of additional matplotlib keyword arguments
to apss to ``matplotlib.figure.Figure.savefig``.
**plot_kw: additional keyword arguments to be passed to ``matplotlib.pyplot.plot``.
Returns:
tuple: returns a tuple containing the figure and the plotting axes.
"""
if fig_kw is None:
fig_kw = {'figsize': (12, 8)}
if savefig_kw is None:
savefig_kw = {}
data = np.genfromtxt(logfile, dtype=float, delimiter=',', names=True)
params = data.dtype.names
if x not in params:
raise ValueError("The x-axis parameter {} does not exist.".format(x))
xdata = data[x]
if y is None:
ydata = [data[p] for p in params if p != x]
ylabels = [p for p in params if p != x]
else:
try:
ydata = [data[p] for p in y]
except ValueError:
raise ValueError("parameter name does not exist in logfile.")
ylabels = y
rows, cols = _squareish(len(ydata))
plt.style.use(style)
if figax is None:
fig, ax = plt.subplots(rows, cols, sharex=True, sharey=False, **fig_kw)
else:
fig, ax = figax
for idx, (yd, yl, a) in enumerate(zip_longest(ydata, ylabels, ax.ravel())):
# get 2D grid location
loc = np.array(np.unravel_index([idx], (rows, cols))).flatten()
# only label x-axis if on the bottom row
if yd is not None:
if loc[0] == rows - 1:
a = _plot(xdata, yd, xlabel=x, ylabel=yl, ax=a, **plot_kw)
else:
a = _plot(xdata, yd, ylabel=yl, ax=a, **plot_kw)
else:
a.axis('off')
plt.tight_layout()
if save_filename is not None:
fig.savefig(save_filename, **savefig_kw)
return fig, ax
| [
"numpy.ceil",
"matplotlib.pyplot.legend",
"numpy.genfromtxt",
"matplotlib.pyplot.subplots",
"numpy.unravel_index",
"os.environ.get",
"matplotlib.pyplot.style.use",
"matplotlib.use",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] | [((6258, 6320), 'numpy.genfromtxt', 'np.genfromtxt', (['logfile'], {'dtype': 'float', 'delimiter': '""","""', 'names': '(True)'}), "(logfile, dtype=float, delimiter=',', names=True)\n", (6271, 6320), True, 'import numpy as np\n'), ((6562, 6582), 'matplotlib.pyplot.style.use', 'plt.style.use', (['style'], {}), '(style)\n', (6575, 6582), True, 'from matplotlib import pyplot as plt\n'), ((8945, 9007), 'numpy.genfromtxt', 'np.genfromtxt', (['logfile'], {'dtype': 'float', 'delimiter': '""","""', 'names': '(True)'}), "(logfile, dtype=float, delimiter=',', names=True)\n", (8958, 9007), True, 'import numpy as np\n'), ((9512, 9532), 'matplotlib.pyplot.style.use', 'plt.style.use', (['style'], {}), '(style)\n', (9525, 9532), True, 'from matplotlib import pyplot as plt\n'), ((10169, 10187), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10185, 10187), True, 'from matplotlib import pyplot as plt\n'), ((2606, 2635), 'os.environ.get', 'os.environ.get', (['"""DISPLAY"""', '""""""'], {}), "('DISPLAY', '')\n", (2620, 2635), False, 'import os\n'), ((2720, 2734), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (2727, 2734), True, 'import matplotlib as mpl\n'), ((3181, 3191), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (3188, 3191), True, 'import numpy as np\n'), ((4030, 4039), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4037, 4039), True, 'from matplotlib import pyplot as plt\n'), ((6619, 6661), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'squeeze': '(True)'}), '(1, 1, squeeze=True, **fig_kw)\n', (6631, 6661), True, 'from matplotlib import pyplot as plt\n'), ((6761, 6773), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6771, 6773), True, 'from matplotlib import pyplot as plt\n'), ((6911, 6951), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_filename'], {}), '(save_filename, **savefig_kw)\n', (6922, 6951), True, 'from matplotlib import pyplot as plt\n'), ((9573, 9634), 'matplotlib.pyplot.subplots', 'plt.subplots', (['rows', 'cols'], {'sharex': '(True)', 'sharey': '(False)'}), '(rows, cols, sharex=True, sharey=False, **fig_kw)\n', (9585, 9634), True, 'from matplotlib import pyplot as plt\n'), ((3451, 3465), 'numpy.ceil', 'np.ceil', (['(n / 3)'], {}), '(n / 3)\n', (3458, 3465), True, 'import numpy as np\n'), ((9804, 9841), 'numpy.unravel_index', 'np.unravel_index', (['[idx]', '(rows, cols)'], {}), '([idx], (rows, cols))\n', (9820, 9841), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
# define target list
mmol=653.19 # g/mol
Th_length=341. # [Aa]
# target masses in GeV and number of nucleons
# Hydrogen
m_H=0.9389
A_H=1.
Z_H=1.
n_H=7.
# Boron
m_B=10.07
A_B=(10.*20.+11.*80.)/100.
Z_B=5.
n_B=11.
# Oxygen
m_O=14.903
A_O=(99.76*16+.04*17+.2*18)/100
Z_O=8.
n_O=22.
# Strontium
m_Sr=81.62
A_Sr=(84.*0.56+86.*9.86+87.*7.00+88.*82.58)/100.
Z_Sr=38.
n_Sr=2.
# lists
mT_list=[m_H,m_B,m_O,m_Sr]
AT_list=[A_H,A_B,A_O,A_Sr]
ZT_list=[Z_H,Z_B,Z_O,Z_Sr]
nameT_list=['H','B','O','Sr']
massFrac_list=np.array([m_H*n_H,m_B*n_B,m_O*n_O,m_Sr*n_Sr])/(m_H*n_H+m_B*n_B+m_O*n_O+m_Sr*n_Sr)
| [
"numpy.array"
] | [((553, 609), 'numpy.array', 'np.array', (['[m_H * n_H, m_B * n_B, m_O * n_O, m_Sr * n_Sr]'], {}), '([m_H * n_H, m_B * n_B, m_O * n_O, m_Sr * n_Sr])\n', (561, 609), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# PROJECT : JAS1101 Final Project
#
# ----------------------------------------------------------------------------
# Docstring
"""Get the Proper Motion Scale Size of Each Globular Cluster.
Warnings
--------
SEE normalize_pm notebook for exploration of Gaussianity (assumed here)
because it is NOT true.
Routine Listings
----------------
"""
__author__ = ["<NAME>", "<NAME>", "<NAME>"]
# __all__ = [
# ""
# ]
###############################################################################
# IMPORTS
# GENERAL
import os
import pathlib
import warnings
import argparse
from typing import Optional, Sequence
import tqdm
import numpy as np
import scipy.stats as stats
import scipy.optimize as optimize
import astropy.units as u
from astropy.table import Table, QTable
from astropy.stats import SigmaClip
import matplotlib.pyplot as plt
import seaborn as sns
# PROJECT-SPECIFIC
from .util import gaussfitter
###############################################################################
# PARAMETERS
warnings.simplefilter("always", UserWarning)
DATA = str(pathlib.Path(__file__).parent.absolute()) + "/data/"
FIGURES = str(pathlib.Path(__file__).parent.absolute()) + "/figures/"
if not os.path.isdir(FIGURES):
os.mkdir(FIGURES)
###############################################################################
# CODE
###############################################################################
def read_globular_cluster_table(file: str) -> QTable:
"""Read GC data table.
Reads the GC table and assigns units
Parameters
----------
file
"""
# read table
df = QTable.read(file, format="ascii.commented_header")
# units dictionary
units = {
"x": u.deg,
"y": u.deg,
"pmx": u.mas / u.yr,
"pmy": u.mas / u.yr,
"pmx_e": u.mas / u.yr,
"pmy_e": u.mas / u.yr,
"g_mag": u.mag,
"bp_rp": u.mag,
}
# assign units
for name, unit in units.items():
df[name].unit = unit
return df
# /def
# testing
def read_summary_table(file: str) -> QTable:
"""Read summary table to be in Astropy format.
Parameters
----------
file: str
file to read with QTable
Returns
-------
df : QTable
"""
# read table, in Table format for better editing access
df = Table.read(file, format="ascii.commented_header")
df.add_index("Name") # index by name
# units dictionary
units = {
"ra": u.deg,
"dec": u.deg,
"dist": u.kpc,
"vlos": u.km / u.s,
"vloserr": u.km / u.s,
"sigma": u.km / u.s,
"rmax": u.arcmin,
"pmra": u.mas / u.yr,
"pmdec": u.mas / u.yr,
"pmra_e": u.mas / u.yr,
"pmdec_e": u.mas / u.yr,
"rscale": u.arcmin,
"pmdisp": u.mas / u.yr,
"pmscale": u.mas / u.yr,
"pmscale_e": u.mas / u.yr,
}
# assign units
for name, unit in units.items():
if name in df.columns: # needed b/c creating columns
df[name].unit = unit
return QTable(df)
# /def
# ------------------------------------------------------------------------
# https://scipy-cookbook.readthedocs.io/items/FittingData.html
def gaussian(
height: float,
center_x: float,
center_y: float,
width_x: float,
width_y: float,
):
"""Returns a gaussian function with the given parameters.
Parameters
----------
height : float
center_x: float
center_y: float
width_x: float
width_y: float
Returns
-------
Gaussian: FunctionType
"""
width_x = float(width_x)
width_y = float(width_y)
def Gaussian(x: Sequence, y: Sequence) -> Sequence:
"""Gaussian function of x, y with preloaded center and widths.
Parameters
----------
x, y : array-like
positions
Returns
-------
array-like
"""
return height * np.exp(
-(
((center_x - x) / width_x) ** 2
+ ((center_y - y) / width_y) ** 2
)
/ 2
)
# /def
return Gaussian
# /def
def moments(data):
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution by calculating its
moments """
total = data.sum()
X, Y = np.indices(data.shape)
x = (X * data).sum() / total
y = (Y * data).sum() / total
col = data[:, int(y)]
width_x = np.sqrt(
np.abs((np.arange(col.size) - y) ** 2 * col).sum() / col.sum()
)
row = data[int(x), :]
width_y = np.sqrt(
np.abs((np.arange(row.size) - x) ** 2 * row).sum() / row.sum()
)
height = data.max()
return height, x, y, width_x, width_y
# /def
def fitgaussian(data):
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution found by a fit"""
params = moments(data)
errorfunction = lambda p: np.ravel(
gaussian(*p)(*np.indices(data.shape)) - data
)
p, cov, infodict, *errmsg = optimize.leastsq(
errorfunction, params, full_output=True
)
return p, cov, infodict, errmsg
# /def
# ------------------------------------------------------------------------
def scale_values_2d(name, df, threshold=0.8, sigma=4):
"""scale_values_2d
Use Sturge’s Rule to determine number of bins
TODO
----
don't choose arbitrary threshold
don't use histogram?
not arbitrary rotation threshold
"""
ismember = df["memberprob"] > threshold
pmx = df["pmx"][ismember].to_value("mas / yr")
pmy = df["pmy"][ismember].to_value("mas / yr")
# Sigma Clip major outliers
sigclip = SigmaClip(sigma=sigma, maxiters=1.0)
resx = sigclip(pmx)
resy = sigclip(pmy)
pmx = resx.data[~resx.mask & ~resy.mask]
pmy = resy.data[~resx.mask & ~resy.mask]
# -----------
# plot normality test
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(6, 3))
stats.probplot(pmx, dist="norm", plot=ax0)
stats.probplot(pmy, dist="norm", plot=ax1)
plt.tight_layout()
plt.savefig(FIGURES + f"{name}_QQ.pdf")
plt.close()
# -----------
# Now histogram
# need equi-spaced bins
# TODO error estimate from bin size
data, *edges = np.histogram2d(
pmx, pmy, bins=int(1 + 3.222 * np.log(len(pmx))), density=True
)
# fit 2D Gaussian, with freedom of rotation
params, cov, infodict, errmsg = gaussfitter.gaussfit(
data, circle=0, rotate=1, vheight=1, return_all=1
)
height, amp, x, y, width_x, width_y, rota = params
labels = ("height", "amp", "x", "y", "width_x", "width_y", "rota")
# Check if need to do a rotated system. get better results if don't.
if rota < 2: # not rotated
amp = None
rota = 0
params, cov, infodict, errmsg = fitgaussian(data)
height, x, y, width_x, width_y = params
labels = ("height", "x", "y", "width_x", "width_y")
# -----------
# plot 2D Gaussian
plt.matshow(data, cmap=plt.cm.gist_earth_r)
if rota == 0:
fit = gaussian(*params)
else:
fit = gaussfitter.twodgaussian(params, circle=0, rotate=1, vheight=1)
plt.contour(fit(*np.indices(data.shape)), cmap=plt.cm.copper)
ax = plt.gca()
rota %= 360 # shift back to 0 - 360 degree rotation
plt.text(
0.95,
0.05,
"""
x : %.1f
y : %.1f
rot : %.1f
width_x : %.1f
width_y : %.1f"""
% (x, y, rota, width_x, width_y),
fontsize=16,
horizontalalignment="right",
verticalalignment="bottom",
transform=ax.transAxes,
)
plt.savefig(FIGURES + f"{name}_2D.pdf")
plt.close()
# -----------
if cov is not None:
sns.heatmap(np.log10(np.abs(cov)), cmap="viridis_r")
plt.xticks(plt.xticks()[0], labels)
plt.yticks(plt.yticks()[0], labels)
plt.savefig(FIGURES + f"{name}_cov.pdf")
plt.close()
# -----------
return width_x, width_y, cov, labels, edges
# /def
# ------------------------------------------------------------------------
def average_scale_value(width_x, width_y, edges_x, edges_y):
flag = False
if not np.allclose(np.diff(edges_x)[:-1], np.diff(edges_x)[1:]):
warnings.warn("x edges are not equally spaced")
flag = True
if not np.allclose(np.diff(edges_y)[:-1], np.diff(edges_y)[1:]):
warnings.warn("y edges are not equally spaced")
flag = True
pm_per_bin_x = np.diff(edges_x)[0] * u.mas / u.yr
pm_per_bin_y = np.diff(edges_y)[0] * u.mas / u.yr
pm_scale = (width_x * pm_per_bin_x + width_y * pm_per_bin_y) / 2
# eror estimate
pm_scale_err = np.abs(width_x * pm_per_bin_x - width_y * pm_per_bin_y) / 2
return pm_scale, pm_scale_err, flag
# /def
###############################################################################
# Command Line
###############################################################################
def make_parser(inheritable=False):
"""Expose parser for ``main``.
Parameters
----------
inheritable: bool
whether the parser can be inherited from (default False).
if True, sets ``add_help=False`` and ``conflict_hander='resolve'``
Returns
-------
parser: ArgumentParser
"""
parser = argparse.ArgumentParser(
description="fit_pm_scale",
add_help=~inheritable,
conflict_handler="resolve" if ~inheritable else "error",
)
# parser.add_argument(
# "figure_dir",
# type=str,
# default="figures",
# help="The data directory",
# )
# parser.add_argument(
# "--output_dir",
# type=str,
# default="../../data",
# help="The data directory",
# )
# parser.add_argument(
# "--data_dir",
# type=str,
# default="data",
# help="The input data directory",
# )
return parser
# /def
# ------------------------------------------------------------------------
def main(
args: Optional[list] = None, opts: Optional[argparse.Namespace] = None
):
"""Script Function.
Parameters
----------
args : list, optional
an optional single argument that holds the sys.argv list,
except for the script name (e.g., argv[1:])
opts : Namespace, optional
pre-constructed results of parsed args
if not None, used ONLY if args is None
"""
# deal with arguments
if opts is not None and args is None:
pass
else:
if opts is not None:
warnings.warn("Not using `opts` because `args` are given")
parser = make_parser()
opts = parser.parse_args(args)
# get options
# data_dir: str = opts.data_dir # where the data is stored
# result_dir = str(
# pathlib.Path(data_dir).parent
# ) # where to store the formatted output
# ensure paths end in '/'
# data_dir = data_dir if data_dir.endswith("/") else data_dir + "/"
# result_dir = result_dir if result_dir.endswith("/") else result_dir + "/"
# read pr
# testingoperty summary table
summary = read_summary_table(DATA + "summary.txt")
summary["pmscale"] = np.NaN * u.mas / u.yr
summary["pmscale_e"] = np.NaN * u.mas / u.yr
# globular clusters
files = os.listdir(DATA + 'gcts')
files = [f for f in files if f.endswith(".txt")]
for file in tqdm.tqdm(files):
name = file[: -len(".txt")] # GC name
gc = read_globular_cluster_table(DATA + 'gcts/' + file)
# compute scale parameter
width_x, width_y, cov, labels, edges = scale_values_2d(
name, gc, threshold=0.8, sigma=4
)
pm_scale, pm_scale_err, flag = average_scale_value(
width_x, width_y, *edges
)
if flag:
warnings.warn(name + " raised the previous warning")
if np.isnan(pm_scale):
warnings.warn(name + " has NaN pm scale")
# write to table
summary.loc[name]["pmscale"] = np.round(pm_scale, 3)
summary.loc[name]["pmscale_e"] = np.round(pm_scale_err, 3)
# # /for
# save whole summary table
summary.write(
DATA + "summary.txt",
format="ascii.commented_header",
overwrite=True,
)
return
# /def
# ------------------------------------------------------------------------
# if __name__ == "__main__":
# print("Running fit_pm_scale script.")
# main(args=None, opts=None)
# print("finished.")
# # /if
###############################################################################
# END
| [
"os.mkdir",
"numpy.abs",
"argparse.ArgumentParser",
"numpy.isnan",
"scipy.optimize.leastsq",
"pathlib.Path",
"numpy.arange",
"numpy.exp",
"matplotlib.pyplot.gca",
"astropy.table.QTable.read",
"matplotlib.pyplot.tight_layout",
"numpy.round",
"warnings.simplefilter",
"matplotlib.pyplot.close... | [((1126, 1170), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""', 'UserWarning'], {}), "('always', UserWarning)\n", (1147, 1170), False, 'import warnings\n'), ((1315, 1337), 'os.path.isdir', 'os.path.isdir', (['FIGURES'], {}), '(FIGURES)\n', (1328, 1337), False, 'import os\n'), ((1343, 1360), 'os.mkdir', 'os.mkdir', (['FIGURES'], {}), '(FIGURES)\n', (1351, 1360), False, 'import os\n'), ((1729, 1779), 'astropy.table.QTable.read', 'QTable.read', (['file'], {'format': '"""ascii.commented_header"""'}), "(file, format='ascii.commented_header')\n", (1740, 1779), False, 'from astropy.table import Table, QTable\n'), ((2451, 2500), 'astropy.table.Table.read', 'Table.read', (['file'], {'format': '"""ascii.commented_header"""'}), "(file, format='ascii.commented_header')\n", (2461, 2500), False, 'from astropy.table import Table, QTable\n'), ((3185, 3195), 'astropy.table.QTable', 'QTable', (['df'], {}), '(df)\n', (3191, 3195), False, 'from astropy.table import Table, QTable\n'), ((4467, 4489), 'numpy.indices', 'np.indices', (['data.shape'], {}), '(data.shape)\n', (4477, 4489), True, 'import numpy as np\n'), ((5181, 5238), 'scipy.optimize.leastsq', 'optimize.leastsq', (['errorfunction', 'params'], {'full_output': '(True)'}), '(errorfunction, params, full_output=True)\n', (5197, 5238), True, 'import scipy.optimize as optimize\n'), ((5829, 5865), 'astropy.stats.SigmaClip', 'SigmaClip', ([], {'sigma': 'sigma', 'maxiters': '(1.0)'}), '(sigma=sigma, maxiters=1.0)\n', (5838, 5865), False, 'from astropy.stats import SigmaClip\n'), ((6073, 6107), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(6, 3)'}), '(1, 2, figsize=(6, 3))\n', (6085, 6107), True, 'import matplotlib.pyplot as plt\n'), ((6112, 6154), 'scipy.stats.probplot', 'stats.probplot', (['pmx'], {'dist': '"""norm"""', 'plot': 'ax0'}), "(pmx, dist='norm', plot=ax0)\n", (6126, 6154), True, 'import scipy.stats as stats\n'), ((6159, 6201), 'scipy.stats.probplot', 'stats.probplot', (['pmy'], {'dist': '"""norm"""', 'plot': 'ax1'}), "(pmy, dist='norm', plot=ax1)\n", (6173, 6201), True, 'import scipy.stats as stats\n'), ((6206, 6224), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6222, 6224), True, 'import matplotlib.pyplot as plt\n'), ((6229, 6268), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(FIGURES + f'{name}_QQ.pdf')"], {}), "(FIGURES + f'{name}_QQ.pdf')\n", (6240, 6268), True, 'import matplotlib.pyplot as plt\n'), ((6273, 6284), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6282, 6284), True, 'import matplotlib.pyplot as plt\n'), ((7161, 7204), 'matplotlib.pyplot.matshow', 'plt.matshow', (['data'], {'cmap': 'plt.cm.gist_earth_r'}), '(data, cmap=plt.cm.gist_earth_r)\n', (7172, 7204), True, 'import matplotlib.pyplot as plt\n'), ((7420, 7429), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7427, 7429), True, 'import matplotlib.pyplot as plt\n'), ((7493, 7742), 'matplotlib.pyplot.text', 'plt.text', (['(0.95)', '(0.05)', '("""\n x : %.1f\n y : %.1f\n rot : %.1f\n width_x : %.1f\n width_y : %.1f"""\n % (x, y, rota, width_x, width_y))'], {'fontsize': '(16)', 'horizontalalignment': '"""right"""', 'verticalalignment': '"""bottom"""', 'transform': 'ax.transAxes'}), '(0.95, 0.05, \n """\n x : %.1f\n y : %.1f\n rot : %.1f\n width_x : %.1f\n width_y : %.1f"""\n % (x, y, rota, width_x, width_y), fontsize=16, horizontalalignment=\n \'right\', verticalalignment=\'bottom\', transform=ax.transAxes)\n', (7501, 7742), True, 'import matplotlib.pyplot as plt\n'), ((7803, 7842), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(FIGURES + f'{name}_2D.pdf')"], {}), "(FIGURES + f'{name}_2D.pdf')\n", (7814, 7842), True, 'import matplotlib.pyplot as plt\n'), ((7847, 7858), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7856, 7858), True, 'import matplotlib.pyplot as plt\n'), ((9495, 9630), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""fit_pm_scale"""', 'add_help': '(~inheritable)', 'conflict_handler': "('resolve' if ~inheritable else 'error')"}), "(description='fit_pm_scale', add_help=~inheritable,\n conflict_handler='resolve' if ~inheritable else 'error')\n", (9518, 9630), False, 'import argparse\n'), ((11509, 11534), 'os.listdir', 'os.listdir', (["(DATA + 'gcts')"], {}), "(DATA + 'gcts')\n", (11519, 11534), False, 'import os\n'), ((11605, 11621), 'tqdm.tqdm', 'tqdm.tqdm', (['files'], {}), '(files)\n', (11614, 11621), False, 'import tqdm\n'), ((8061, 8101), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(FIGURES + f'{name}_cov.pdf')"], {}), "(FIGURES + f'{name}_cov.pdf')\n", (8072, 8101), True, 'import matplotlib.pyplot as plt\n'), ((8110, 8121), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8119, 8121), True, 'import matplotlib.pyplot as plt\n'), ((8435, 8482), 'warnings.warn', 'warnings.warn', (['"""x edges are not equally spaced"""'], {}), "('x edges are not equally spaced')\n", (8448, 8482), False, 'import warnings\n'), ((8580, 8627), 'warnings.warn', 'warnings.warn', (['"""y edges are not equally spaced"""'], {}), "('y edges are not equally spaced')\n", (8593, 8627), False, 'import warnings\n'), ((8867, 8922), 'numpy.abs', 'np.abs', (['(width_x * pm_per_bin_x - width_y * pm_per_bin_y)'], {}), '(width_x * pm_per_bin_x - width_y * pm_per_bin_y)\n', (8873, 8922), True, 'import numpy as np\n'), ((12091, 12109), 'numpy.isnan', 'np.isnan', (['pm_scale'], {}), '(pm_scale)\n', (12099, 12109), True, 'import numpy as np\n'), ((12230, 12251), 'numpy.round', 'np.round', (['pm_scale', '(3)'], {}), '(pm_scale, 3)\n', (12238, 12251), True, 'import numpy as np\n'), ((12293, 12318), 'numpy.round', 'np.round', (['pm_scale_err', '(3)'], {}), '(pm_scale_err, 3)\n', (12301, 12318), True, 'import numpy as np\n'), ((4077, 4162), 'numpy.exp', 'np.exp', (['(-(((center_x - x) / width_x) ** 2 + ((center_y - y) / width_y) ** 2) / 2)'], {}), '(-(((center_x - x) / width_x) ** 2 + ((center_y - y) / width_y) ** 2) / 2\n )\n', (4083, 4162), True, 'import numpy as np\n'), ((10762, 10820), 'warnings.warn', 'warnings.warn', (['"""Not using `opts` because `args` are given"""'], {}), "('Not using `opts` because `args` are given')\n", (10775, 10820), False, 'import warnings\n'), ((12026, 12078), 'warnings.warn', 'warnings.warn', (["(name + ' raised the previous warning')"], {}), "(name + ' raised the previous warning')\n", (12039, 12078), False, 'import warnings\n'), ((12123, 12164), 'warnings.warn', 'warnings.warn', (["(name + ' has NaN pm scale')"], {}), "(name + ' has NaN pm scale')\n", (12136, 12164), False, 'import warnings\n'), ((7366, 7388), 'numpy.indices', 'np.indices', (['data.shape'], {}), '(data.shape)\n', (7376, 7388), True, 'import numpy as np\n'), ((7933, 7944), 'numpy.abs', 'np.abs', (['cov'], {}), '(cov)\n', (7939, 7944), True, 'import numpy as np\n'), ((7984, 7996), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (7994, 7996), True, 'import matplotlib.pyplot as plt\n'), ((8028, 8040), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {}), '()\n', (8038, 8040), True, 'import matplotlib.pyplot as plt\n'), ((8381, 8397), 'numpy.diff', 'np.diff', (['edges_x'], {}), '(edges_x)\n', (8388, 8397), True, 'import numpy as np\n'), ((8404, 8420), 'numpy.diff', 'np.diff', (['edges_x'], {}), '(edges_x)\n', (8411, 8420), True, 'import numpy as np\n'), ((8526, 8542), 'numpy.diff', 'np.diff', (['edges_y'], {}), '(edges_y)\n', (8533, 8542), True, 'import numpy as np\n'), ((8549, 8565), 'numpy.diff', 'np.diff', (['edges_y'], {}), '(edges_y)\n', (8556, 8565), True, 'import numpy as np\n'), ((8668, 8684), 'numpy.diff', 'np.diff', (['edges_x'], {}), '(edges_x)\n', (8675, 8684), True, 'import numpy as np\n'), ((8722, 8738), 'numpy.diff', 'np.diff', (['edges_y'], {}), '(edges_y)\n', (8729, 8738), True, 'import numpy as np\n'), ((1184, 1206), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (1196, 1206), False, 'import pathlib\n'), ((1251, 1273), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (1263, 1273), False, 'import pathlib\n'), ((5112, 5134), 'numpy.indices', 'np.indices', (['data.shape'], {}), '(data.shape)\n', (5122, 5134), True, 'import numpy as np\n'), ((4621, 4640), 'numpy.arange', 'np.arange', (['col.size'], {}), '(col.size)\n', (4630, 4640), True, 'import numpy as np\n'), ((4747, 4766), 'numpy.arange', 'np.arange', (['row.size'], {}), '(row.size)\n', (4756, 4766), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.