code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
from astropy.io import ascii, votable
from astropy.time import Time
import os, requests, sys, warnings
from io import StringIO
# global variables
# convert units from radians to degree
r2d = 180/np.pi
# convert units from degree to radians
d2r = np.pi/180
def getmultiwave(dataset, url='http://hla.stsci.edu/cgi-bin/getdata.cgi'):
"""Extract multiwave catalog from the Hubble Legacy Archive (HLA) for the given dataset.
Typical dataset name is hst_10188_10_acs_wfc (with no filter).
:param dataset: Image name.
:param url: URL of the HLA server to extract the image from.
:type dataset: str
:type url: str
:returns: (tab) astropy.Table: image table with (x,y) & (ra, dec) coordinates, magnitudes etc.
"""
catname = dsname2total(dataset) + '_sexphot_trm.cat'
r = requests.get(url, params={'format': 'csv', 'filename': catname})
tab = ascii.read(r.text)
# change column names to lowercase and delete useless _totmag columns
for col in tab.colnames:
lcol = col.lower()
if lcol.endswith('_totmag'):
del tab[col]
elif lcol != col:
tab.rename_column(col,lcol)
# get the observation epoch and attach it as metadata
tab.meta['epoch'] = getepoch(dataset)
tab.meta['crval'] = getrefpos(dataset)
tab.meta['dataset'] = dataset
return tab
def dsname2total(dataset):
"""Convert dataset name to total image name.
Typical dataset name is hst_10188_10_acs_wfc (with no filter) but also works
with hst_10188_10_acs_wfc_total.
This translates Steve's WFPC2 dataset names (e.g. HST_08553_01_WFPC2_WFPC2)
to HLA-style names (hst_08553_01_wfpc2_total_wf).
:param dataset: Image name to look for
:type dataset: str
:returns: (totalname): string with total image name.
"""
dataset = dataset.lower()
# strip total off if it is already there
if dataset.endswith('_total'):
dataset = dataset[:-6]
# convert dataset name to catalog name
i = dataset.find('_wfpc2')
if i >= 0:
totalname = dataset[0:i+6] + '_total_wf'
else:
totalname = dataset + '_total'
return totalname
def gaiaquery(ramin, decmin, ramax, decmax, version='dr2',
url='http://hla.stsci.edu/cgi-bin/gaiaquery'):
"""
Return Gaia catalog for the RA/Dec box.
:param ramin: Minimum RA, units in degrees.
:param decmin: Minimum Dec, units in degrees.
:param ramax: Maximum RA, units in degrees.
:param decmax: Maximum Dec, units in degrees.
:param version: Version of the Gaia catalog. Either 'dr1' or 'dr2'.
:param url: Gaia server.
:type ramin: float
:type decmin: float
:type ramax: float
:type decmax: float
:type version: str
:type url: str
:returns: (tab) astropy.Table: Gaia catalog table with (ra,dec) coordinates, magnitudes etc.
"""
bbox = [ramin, decmin, ramax, decmax]
sbbox = ','.join([str(x) for x in [ramin, decmin, ramax, decmax]])
vlist = ['dr1','dr2']
if version not in ['dr1', 'dr2']:
raise ValueError("version '{}' must be dr1 or dr2".format(version))
r = requests.get(url, params={'bbox': sbbox, 'version':version,
'extra':'ra_dec_corr,phot_bp_mean_mag,phot_rp_mean_mag'})
tab = ascii.read(r.text, data_start=2)
# change column names to lowercase
for col in tab.colnames:
lcol = col.lower()
if lcol != col:
tab.rename_column(col,lcol)
return tab
# global cache to speed repeated calls for info on data
hlacache = {}
def gethlainfo(dataset, url="http://hla.stsci.edu/cgi-bin/hlaSIAP.cgi", params=None):
"""Get info on the observation from the HLA SIAP server.
Typical dataset name is hst_10188_10_acs_wfc (with no filter).
This translates Steve's WFPC2 dataset names (e.g. HST_08553_01_WFPC2_WFPC2)
to HLA-style names (hst_08553_01_wfpc2_total_wf)
:param dataset: Image name.
:param url: URL of the HLA SIAP server to extract the image from.
:param params: Dictionary of extra parameters to include
:type dataset: str
:type url: str
:type params: dict
:returns: astropy.Table: image table with (x,y) & (ra, dec) coordinates, magnitudes etc.
"""
totalname = dsname2total(dataset)
try:
return hlacache[totalname]
except KeyError:
pass
if not params:
params = {}
params = dict(params, config='ops', pos='0,0', size='180', imagetype='combined',
filter='detection', format='image/fits', visit=totalname)
pstring = "&".join(["{}={}".format(x[0],x[1]) for x in params.items()])
rurl = "{}?{}".format(url,pstring)
# annoying way to get rid of progress message
try:
save_stdout = sys.stdout
sys.stdout = StringIO()
# suppress a bunch of irrelevant warnings while parsing the VOTable
with warnings.catch_warnings():
warnings.simplefilter("ignore")
vtab = votable.parse_single_table(rurl, pedantic=False)
finally:
sys.stdout = save_stdout
tab = vtab.to_table(use_names_over_ids=True)
# fix another annoyance by changing 'object' columns to strings
for c in tab.colnames:
if str(tab[c].dtype)=='object' and vtab.get_field_by_id_or_name(c).datatype == 'char':
tab[c] = tab[c].astype(str)
hlacache[totalname] = tab
return tab
def getepoch(dataset):
"""Get the epoch for an HLA dataset.
This uses the HLA SIAP server to get the info.
:param dataset: Image name.
:type dataset: str
:returns: float: date in decimal years.
"""
tab = gethlainfo(dataset)
date = Time(tab['StartTime'][0])
return date.decimalyear
def getrefpos(dataset):
"""Return the reference position (crval1,crval2) for the HLA combined image.
This uses the HLA SIAP server to get the info.
:param dataset: Image name.
:type dataset: str
:returns: tuple of floats.
"""
tab = gethlainfo(dataset)
return tuple(tab['crval'][0])
def radec2xyz(ra,dec):
"""Convert ra and dec arrays to xyz values
ra, dec both may be scalars or arrays.
If both are arrays, they must have the same lengths.
:param ra: RA, in degrees.
:param dec: Dec, in degrees.
:type ra: float or array
:type dec: float or array
:returns: tuple (cxyz): [len(ra,dec),3], in radians.
"""
try:
nra = len(ra)
ra = np.asarray(ra)
except TypeError:
nra = 1
try:
ndec = len(dec)
dec = np.asarray(dec)
except TypeError:
ndec = 1
n = nra
if n == 1:
n = ndec
elif ndec != nra and ndec != 1:
raise ValueError("Mismatched array lengths for ra [{}], dec [{}]".format(nra,ndec))
cxyz = np.zeros((n,3),dtype=np.float)
rarad = d2r*ra
decrad = d2r*dec
cdec = np.cos(decrad)
cxyz[:,0] = cdec*np.cos(rarad)
cxyz[:,1] = cdec*np.sin(rarad)
cxyz[:,2] = np.sin(decrad)
return cxyz
def xyz2radec(cxyz):
"""Convert xyz value to RA and Dec arrays.
:param cxyz: Input (x,y,z) coordinates, in radians.
:type cxyz: numpy.ndarray
:returns: (ra,dec) in degrees.
"""
cxyz = np.asarray(cxyz)
if len(cxyz.shape) == 1 and cxyz.shape[0] == 3:
cxyz = cxyz.reshape((1,3))
elif not (len(cxyz.shape) == 2 and cxyz.shape[1] == 3):
raise ValueError("cxyz must be [3] or [n,3]")
# normalize cxyz
cxyz = cxyz / np.sqrt((cxyz**2).sum(axis=-1))[:,np.newaxis]
dec = r2d*np.arcsin(cxyz[:,2])
ra = r2d*np.arctan2(cxyz[:,1],cxyz[:,0])
return (ra,dec)
def cat2xyz(cat, ra='ra', dec='dec'):
"""Return array [len(cat),3] with xyz values
:param cat: Input catalog with RA, DEC coordinates in degrees.
:param ra: Select RA coordinates from the input catalog.
:param dec: Select Dec coordinates from the input catalog.
:type cat: astropy.Table
:type ra: str
:type dec: str
:returns: (xyz) tuple in radians.
"""
return radec2xyz(cat[ra],cat[dec])
def getdeltas(ra0,dec0,ra1,dec1):
"""Compute shifts in arcsec between two positions.
Input ra,dec units in degrees. At least one of ra0,dec0 or ra1,dec1 should be arrays
:param ra0: RA coordinates of catalog.
:param dec0: Dec coordinates of catalog.
:param ra1: RA coordinates of reference.
:param dec1: Dec coordinates of reference.
:type ra0: float or array
:type dec0: float or array
:type ra1: float or array
:type dec1: float or array
:returns: shifts dra, ddec in arcsec.
"""
dra = ra1-ra0
dra[dra > 180] -= 360
dra[dra < -180] += 360
dra = dra*np.cos(d2r*dec0)*3600
ddec = (dec1-dec0)*3600
return dra, ddec
def xyz2delta(dxyz,ra0,dec0):
"""Convert array of dxyz[*,3] values to dra,ddec given reference ra0,dec0.
:param dxyz: Input data array, (x,y,z) coordinates.
:param ra0: RA coordinates of reference catalog.
:param dec0: Dec coordinates of reference catalog.
:type dxyz: np.ndarray
:type ra0: float or array
:type dec0: float or array
:returns: shifts dra, ddec in arcsec.
"""
if len(dxyz.shape) != 2 or dxyz.shape[1] != 3:
raise ValueError("dxyz must be [*,3]")
xyz0 = radec2xyz(ra0,dec0)
xyz = dxyz + xyz0
ra, dec = xyz2radec(xyz)
dra, ddec = getdeltas(ra0,dec0,ra,dec)
return dra, ddec
| [
"astropy.io.votable.parse_single_table",
"numpy.asarray",
"numpy.arcsin",
"requests.get",
"warnings.catch_warnings",
"astropy.time.Time",
"numpy.zeros",
"warnings.simplefilter",
"numpy.arctan2",
"numpy.cos",
"numpy.sin",
"io.StringIO",
"astropy.io.ascii.read"
] | [((866, 930), 'requests.get', 'requests.get', (['url'], {'params': "{'format': 'csv', 'filename': catname}"}), "(url, params={'format': 'csv', 'filename': catname})\n", (878, 930), False, 'import os, requests, sys, warnings\n'), ((941, 959), 'astropy.io.ascii.read', 'ascii.read', (['r.text'], {}), '(r.text)\n', (951, 959), False, 'from astropy.io import ascii, votable\n'), ((3300, 3423), 'requests.get', 'requests.get', (['url'], {'params': "{'bbox': sbbox, 'version': version, 'extra':\n 'ra_dec_corr,phot_bp_mean_mag,phot_rp_mean_mag'}"}), "(url, params={'bbox': sbbox, 'version': version, 'extra':\n 'ra_dec_corr,phot_bp_mean_mag,phot_rp_mean_mag'})\n", (3312, 3423), False, 'import os, requests, sys, warnings\n'), ((3463, 3495), 'astropy.io.ascii.read', 'ascii.read', (['r.text'], {'data_start': '(2)'}), '(r.text, data_start=2)\n', (3473, 3495), False, 'from astropy.io import ascii, votable\n'), ((5911, 5936), 'astropy.time.Time', 'Time', (["tab['StartTime'][0]"], {}), "(tab['StartTime'][0])\n", (5915, 5936), False, 'from astropy.time import Time\n'), ((7083, 7115), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {'dtype': 'np.float'}), '((n, 3), dtype=np.float)\n', (7091, 7115), True, 'import numpy as np\n'), ((7165, 7179), 'numpy.cos', 'np.cos', (['decrad'], {}), '(decrad)\n', (7171, 7179), True, 'import numpy as np\n'), ((7266, 7280), 'numpy.sin', 'np.sin', (['decrad'], {}), '(decrad)\n', (7272, 7280), True, 'import numpy as np\n'), ((7530, 7546), 'numpy.asarray', 'np.asarray', (['cxyz'], {}), '(cxyz)\n', (7540, 7546), True, 'import numpy as np\n'), ((5016, 5026), 'io.StringIO', 'StringIO', ([], {}), '()\n', (5024, 5026), False, 'from io import StringIO\n'), ((6745, 6759), 'numpy.asarray', 'np.asarray', (['ra'], {}), '(ra)\n', (6755, 6759), True, 'import numpy as np\n'), ((6845, 6860), 'numpy.asarray', 'np.asarray', (['dec'], {}), '(dec)\n', (6855, 6860), True, 'import numpy as np\n'), ((7201, 7214), 'numpy.cos', 'np.cos', (['rarad'], {}), '(rarad)\n', (7207, 7214), True, 'import numpy as np\n'), ((7236, 7249), 'numpy.sin', 'np.sin', (['rarad'], {}), '(rarad)\n', (7242, 7249), True, 'import numpy as np\n'), ((7847, 7868), 'numpy.arcsin', 'np.arcsin', (['cxyz[:, 2]'], {}), '(cxyz[:, 2])\n', (7856, 7868), True, 'import numpy as np\n'), ((7881, 7915), 'numpy.arctan2', 'np.arctan2', (['cxyz[:, 1]', 'cxyz[:, 0]'], {}), '(cxyz[:, 1], cxyz[:, 0])\n', (7891, 7915), True, 'import numpy as np\n'), ((5116, 5141), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (5139, 5141), False, 'import os, requests, sys, warnings\n'), ((5155, 5186), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (5176, 5186), False, 'import os, requests, sys, warnings\n'), ((5206, 5254), 'astropy.io.votable.parse_single_table', 'votable.parse_single_table', (['rurl'], {'pedantic': '(False)'}), '(rurl, pedantic=False)\n', (5232, 5254), False, 'from astropy.io import ascii, votable\n'), ((9057, 9075), 'numpy.cos', 'np.cos', (['(d2r * dec0)'], {}), '(d2r * dec0)\n', (9063, 9075), True, 'import numpy as np\n')] |
import abc
import numpy as np
import six
import os
import tensorflow as tf
@six.add_metaclass(abc.ABCMeta)
class NeuralNetwork(object):
"""Abstract base class for Neural Network used in
policy-value net.
Details can be found in https://www.nature.com/articles/nature24270
'Mastering the game of Go without human knowledge'
"""
@abc.abstractmethod
def policyValueFunc(self, board):
pass
@abc.abstractmethod
def trainStep(self, state_batch, mcts_probs_batch, winner_batch, lr):
pass
@abc.abstractmethod
def save(self, path):
pass
@abc.abstractmethod
def restore(self, path):
pass
@abc.abstractproperty
def width(self):
pass
@abc.abstractproperty
def height(self):
pass
class SimpleCNN(NeuralNetwork):
def __init__(self, height, width, model_file=None, norm_weight=1e-4):
self.board_width = width
self.board_height = height
# Define the neural network
with tf.variable_scope("SimpleCNN"):
# input placeholders
# input states placeholder, 4 channels are:
# board_state[0]: current board state with only current player's stone
# board_state[1]: current board state with only opponent's stones
# board_state[2]: only one stone, indicate the last move(opponent made this move).
# board_state[3]: indicate the player to play, 0 for white, 1 for black
self.raw_input_states = tf.placeholder(
tf.float32, shape=[None, 4, height, width])
# label contains the result of game
self.value_labels = tf.placeholder(tf.float32, shape=[None, 1])
# label contains the probability vector from MCTS for each step of game
self.mcts_probs_labels = tf.placeholder(
tf.float32, shape=[None, height*width])
self.learning_rate = tf.placeholder(tf.float32)
self.is_training = tf.placeholder(tf.bool)
# tensorflow like input with format [N,H,W,C]
self.input_states = tf.transpose(self.raw_input_states, [0, 2, 3, 1])
# Shared Layers
with tf.variable_scope("shared_layers"):
self.conv1 = tf.layers.conv2d(inputs=self.input_states,
filters=32, kernel_size=3,
padding="same", activation=tf.nn.relu,
name="conv1")
self.batchnorm1 = tf.layers.batch_normalization(self.conv1, training=self.is_training)
self.conv2 = tf.layers.conv2d(inputs=self.batchnorm1, filters=64,
kernel_size=3, padding="same",
activation=tf.nn.relu, name="conv2")
self.batchnorm2 = tf.layers.batch_normalization(self.conv2, training=self.is_training)
self.conv3 = tf.layers.conv2d(inputs=self.conv2, filters=128,
kernel_size=3, padding="same",
activation=tf.nn.relu, name="conv3")
self.batchnorm3 = tf.layers.batch_normalization(self.conv3, training=self.is_training)
# Action net layers
with tf.variable_scope("action_layers"):
self.action_conv = tf.layers.conv2d(inputs=self.batchnorm3, filters=8,
kernel_size=1, padding="same",
activation=tf.nn.relu, name="action_conv")
self.action_conv_flat = tf.reshape(
self.action_conv, [-1, 8 * height * width])
self.action_out = tf.layers.dense(inputs=self.action_conv_flat,
units=height*width,
activation=tf.nn.softmax,
name="action_out")
self.action_out_log = tf.log(self.action_out)
# Value net layers
with tf.variable_scope("value_layers"):
self.value_conv = tf.layers.conv2d(inputs=self.batchnorm3, filters=2,
kernel_size=1, padding="same",
activation=tf.nn.relu, name="value_conv")
self.value_conv_flat = tf.reshape(
self.value_conv, [-1, 2 * height * width]
)
self.value_fc = tf.layers.dense(inputs=self.value_conv_flat, units=64,
activation=tf.nn.relu, name="value_fc")
self.value_out = tf.layers.dense(inputs=self.value_fc, units=1,
activation=tf.nn.tanh, name="value_out")
# losses
self.value_loss = tf.losses.mean_squared_error(
self.value_labels, self.value_out)
self.policy_loss = tf.negative(tf.reduce_mean(tf.reduce_sum(tf.multiply(
self.mcts_probs_labels, self.action_out_log), 1)))
trainable_vars = tf.trainable_variables()
self.l2_norm_weight = norm_weight
l2_norm = norm_weight * tf.add_n(
[tf.nn.l2_loss(v) for v in trainable_vars if ('bias' not in v.name.lower() and
'moving' not in v.name.lower())])
self.loss = self.value_loss + self.policy_loss + l2_norm
self.entropy = tf.negative(tf.reduce_mean(
tf.reduce_sum(self.action_out * tf.log(self.action_out), -1)
))
# train op part
self.optimizer = tf.train.AdamOptimizer(
learning_rate=self.learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = self.optimizer.minimize(self.loss)
self.global_step = tf.get_variable("global_step", initializer=0, trainable=False)
self.step_add_op = self.global_step + 1
# session
self.session = tf.Session()
self.session.run(tf.global_variables_initializer())
# Saver
self.saver = tf.train.Saver()
if model_file is not None:
self.restore(model_file)
def restore(self, model_path):
dir_path = os.path.dirname(model_path)
self.saver.restore(self.session, tf.train.latest_checkpoint(dir_path))
def save(self, model_path):
global_step = self.getGlobalStep()
dir_path = os.path.dirname(model_path)
if not tf.gfile.Exists(dir_path):
tf.gfile.MakeDirs(dir_path)
self.saver.save(self.session, model_path, global_step=global_step)
def getPolicyValue(self, state_batch):
act_prob, value = self.session.run(
[self.action_out, self.value_out],
feed_dict={self.raw_input_states: state_batch, self.is_training: False}
)
return act_prob, value
def policyValueFunc(self, board):
"""The Policy-value function.
This function takes a board state and return evaluation value
and next_action probability vector.
"""
valid_positions = board.availables
current_state = np.ascontiguousarray(board.currentState().reshape(
-1, 4, self.board_height, self.board_width))
policy_vec, value = self.getPolicyValue(current_state)
# 0 because getPolicyValue takes batch of data
policy_vec = zip(valid_positions, policy_vec[0][valid_positions])
return policy_vec, value
def trainStep(self, state_batch, mcts_probs_batch, winner_batch, lr):
"""Perform single training step.
Args:
state_batch: A numpy array of board state used as the training data.
mcts_probs_batch: A numpy array of action probability vectors
used as training label.
winner_batch: A numpy array of game result used as training label.
lr: learning rate.
"""
winner_batch = np.reshape(winner_batch, (-1, 1))
loss, _, _, entropy = self.session.run(
[self.loss, self.train_op, self.step_add_op, self.entropy],
feed_dict={self.raw_input_states: state_batch,
self.mcts_probs_labels: mcts_probs_batch,
self.value_labels: winner_batch,
self.learning_rate: lr,
self.is_training: True})
return loss, entropy
def getGlobalStep(self):
global_step = self.session.run(self.global_step)
return global_step
@property
def width(self):
return self.board_width
@property
def height(self):
return self.board_height
| [
"tensorflow.get_variable",
"tensorflow.transpose",
"tensorflow.multiply",
"tensorflow.control_dependencies",
"tensorflow.gfile.MakeDirs",
"tensorflow.log",
"numpy.reshape",
"tensorflow.gfile.Exists",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.layers.conv2d",
"tensorflow.layers... | [((78, 108), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (95, 108), False, 'import six\n'), ((6631, 6658), 'os.path.dirname', 'os.path.dirname', (['model_path'], {}), '(model_path)\n', (6646, 6658), False, 'import os\n'), ((6833, 6860), 'os.path.dirname', 'os.path.dirname', (['model_path'], {}), '(model_path)\n', (6848, 6860), False, 'import os\n'), ((8355, 8388), 'numpy.reshape', 'np.reshape', (['winner_batch', '(-1, 1)'], {}), '(winner_batch, (-1, 1))\n', (8365, 8388), True, 'import numpy as np\n'), ((1022, 1052), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""SimpleCNN"""'], {}), "('SimpleCNN')\n", (1039, 1052), True, 'import tensorflow as tf\n'), ((1528, 1586), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 4, height, width]'}), '(tf.float32, shape=[None, 4, height, width])\n', (1542, 1586), True, 'import tensorflow as tf\n'), ((1685, 1728), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 1]'}), '(tf.float32, shape=[None, 1])\n', (1699, 1728), True, 'import tensorflow as tf\n'), ((1851, 1907), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, height * width]'}), '(tf.float32, shape=[None, height * width])\n', (1865, 1907), True, 'import tensorflow as tf\n'), ((1957, 1983), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1971, 1983), True, 'import tensorflow as tf\n'), ((2015, 2038), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {}), '(tf.bool)\n', (2029, 2038), True, 'import tensorflow as tf\n'), ((2130, 2179), 'tensorflow.transpose', 'tf.transpose', (['self.raw_input_states', '[0, 2, 3, 1]'], {}), '(self.raw_input_states, [0, 2, 3, 1])\n', (2142, 2179), True, 'import tensorflow as tf\n'), ((5037, 5100), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', (['self.value_labels', 'self.value_out'], {}), '(self.value_labels, self.value_out)\n', (5065, 5100), True, 'import tensorflow as tf\n'), ((5300, 5324), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (5322, 5324), True, 'import tensorflow as tf\n'), ((5884, 5940), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (5906, 5940), True, 'import tensorflow as tf\n'), ((5996, 6038), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (6013, 6038), True, 'import tensorflow as tf\n'), ((6191, 6253), 'tensorflow.get_variable', 'tf.get_variable', (['"""global_step"""'], {'initializer': '(0)', 'trainable': '(False)'}), "('global_step', initializer=0, trainable=False)\n", (6206, 6253), True, 'import tensorflow as tf\n'), ((6356, 6368), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (6366, 6368), True, 'import tensorflow as tf\n'), ((6479, 6495), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (6493, 6495), True, 'import tensorflow as tf\n'), ((6700, 6736), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['dir_path'], {}), '(dir_path)\n', (6726, 6736), True, 'import tensorflow as tf\n'), ((6876, 6901), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['dir_path'], {}), '(dir_path)\n', (6891, 6901), True, 'import tensorflow as tf\n'), ((6915, 6942), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['dir_path'], {}), '(dir_path)\n', (6932, 6942), True, 'import tensorflow as tf\n'), ((2226, 2260), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""shared_layers"""'], {}), "('shared_layers')\n", (2243, 2260), True, 'import tensorflow as tf\n'), ((2291, 2417), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'self.input_states', 'filters': '(32)', 'kernel_size': '(3)', 'padding': '"""same"""', 'activation': 'tf.nn.relu', 'name': '"""conv1"""'}), "(inputs=self.input_states, filters=32, kernel_size=3,\n padding='same', activation=tf.nn.relu, name='conv1')\n", (2307, 2417), True, 'import tensorflow as tf\n'), ((2586, 2654), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['self.conv1'], {'training': 'self.is_training'}), '(self.conv1, training=self.is_training)\n', (2615, 2654), True, 'import tensorflow as tf\n'), ((2684, 2809), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'self.batchnorm1', 'filters': '(64)', 'kernel_size': '(3)', 'padding': '"""same"""', 'activation': 'tf.nn.relu', 'name': '"""conv2"""'}), "(inputs=self.batchnorm1, filters=64, kernel_size=3, padding\n ='same', activation=tf.nn.relu, name='conv2')\n", (2700, 2809), True, 'import tensorflow as tf\n'), ((2931, 2999), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['self.conv2'], {'training': 'self.is_training'}), '(self.conv2, training=self.is_training)\n', (2960, 2999), True, 'import tensorflow as tf\n'), ((3029, 3150), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'self.conv2', 'filters': '(128)', 'kernel_size': '(3)', 'padding': '"""same"""', 'activation': 'tf.nn.relu', 'name': '"""conv3"""'}), "(inputs=self.conv2, filters=128, kernel_size=3, padding=\n 'same', activation=tf.nn.relu, name='conv3')\n", (3045, 3150), True, 'import tensorflow as tf\n'), ((3272, 3340), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['self.conv3'], {'training': 'self.is_training'}), '(self.conv3, training=self.is_training)\n', (3301, 3340), True, 'import tensorflow as tf\n'), ((3390, 3424), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_layers"""'], {}), "('action_layers')\n", (3407, 3424), True, 'import tensorflow as tf\n'), ((3461, 3591), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'self.batchnorm3', 'filters': '(8)', 'kernel_size': '(1)', 'padding': '"""same"""', 'activation': 'tf.nn.relu', 'name': '"""action_conv"""'}), "(inputs=self.batchnorm3, filters=8, kernel_size=1, padding=\n 'same', activation=tf.nn.relu, name='action_conv')\n", (3477, 3591), True, 'import tensorflow as tf\n'), ((3731, 3785), 'tensorflow.reshape', 'tf.reshape', (['self.action_conv', '[-1, 8 * height * width]'], {}), '(self.action_conv, [-1, 8 * height * width])\n', (3741, 3785), True, 'import tensorflow as tf\n'), ((3841, 3957), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'self.action_conv_flat', 'units': '(height * width)', 'activation': 'tf.nn.softmax', 'name': '"""action_out"""'}), "(inputs=self.action_conv_flat, units=height * width,\n activation=tf.nn.softmax, name='action_out')\n", (3856, 3957), True, 'import tensorflow as tf\n'), ((4140, 4163), 'tensorflow.log', 'tf.log', (['self.action_out'], {}), '(self.action_out)\n', (4146, 4163), True, 'import tensorflow as tf\n'), ((4213, 4246), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""value_layers"""'], {}), "('value_layers')\n", (4230, 4246), True, 'import tensorflow as tf\n'), ((4282, 4411), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'self.batchnorm3', 'filters': '(2)', 'kernel_size': '(1)', 'padding': '"""same"""', 'activation': 'tf.nn.relu', 'name': '"""value_conv"""'}), "(inputs=self.batchnorm3, filters=2, kernel_size=1, padding=\n 'same', activation=tf.nn.relu, name='value_conv')\n", (4298, 4411), True, 'import tensorflow as tf\n'), ((4548, 4601), 'tensorflow.reshape', 'tf.reshape', (['self.value_conv', '[-1, 2 * height * width]'], {}), '(self.value_conv, [-1, 2 * height * width])\n', (4558, 4601), True, 'import tensorflow as tf\n'), ((4672, 4771), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'self.value_conv_flat', 'units': '(64)', 'activation': 'tf.nn.relu', 'name': '"""value_fc"""'}), "(inputs=self.value_conv_flat, units=64, activation=tf.nn.\n relu, name='value_fc')\n", (4687, 4771), True, 'import tensorflow as tf\n'), ((4848, 4940), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'self.value_fc', 'units': '(1)', 'activation': 'tf.nn.tanh', 'name': '"""value_out"""'}), "(inputs=self.value_fc, units=1, activation=tf.nn.tanh, name=\n 'value_out')\n", (4863, 4940), True, 'import tensorflow as tf\n'), ((6056, 6091), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), '(update_ops)\n', (6079, 6091), True, 'import tensorflow as tf\n'), ((6398, 6431), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6429, 6431), True, 'import tensorflow as tf\n'), ((5190, 5246), 'tensorflow.multiply', 'tf.multiply', (['self.mcts_probs_labels', 'self.action_out_log'], {}), '(self.mcts_probs_labels, self.action_out_log)\n', (5201, 5246), True, 'import tensorflow as tf\n'), ((5434, 5450), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['v'], {}), '(v)\n', (5447, 5450), True, 'import tensorflow as tf\n'), ((5782, 5805), 'tensorflow.log', 'tf.log', (['self.action_out'], {}), '(self.action_out)\n', (5788, 5805), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 15 17:59:11 2020
@author: yaoleixu
"""
# ============================================================================
# Import libraries
import numpy as np
from tensorflow.keras import activations, initializers, constraints
from tensorflow.keras import regularizers
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Layer
import tensorflow as tf
tf.random.set_seed(0)
# ============================================================================
# Special Multi-Graph Tensor Network Model
class SpecialMultiGraphTensorNetwork (tf.keras.layers.Layer):
# Initialize terms
def __init__(self, out_features, graphs_list, bias_bool=True, **kwargs):
'''
------
Inputs
------
out_features: (int) number of feature maps
graphs_list: (list) list of graph adjacency matrices
bias_bool: (bool) if use bias vector or not
'''
# Save input variables
self.out_features = out_features
self.graphs_list = graphs_list.copy()
self.bias_bool = bias_bool
# Useful variables
self.n_graphs = len(graphs_list)
self.graphs_shapes = [g.shape[0] for g in graphs_list]
super(SpecialMultiGraphTensorNetwork, self).__init__(**kwargs)
# Define weights
def build(self, input_shape):
# Graph adjacency matrices: I+D^{0.5}AD^{0.5}
for i in range(self.n_graphs):
self.graphs_list[i] = tf.constant(tf.cast(self.graphs_list[i]+np.eye(self.graphs_shapes[i]), tf.float32))
# Feature map kernel
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[-1], self.out_features),
initializer='uniform',
trainable=True)
# Bias tensor
if self.bias_bool:
self.bias = self.add_weight(name='bias',
shape=self.graphs_shapes + [self.out_features],
initializer='uniform',
trainable=True)
# Be sure to call this at the end
super(SpecialMultiGraphTensorNetwork, self).build(input_shape)
# Forward pass
def call(self, x):
# Generate feature map
output = tf.tensordot(x, self.kernel, axes=[[-1],[0]])
# Graph filters
for i in range(self.n_graphs):
output = tf.tensordot(self.graphs_list[i], output, axes=[[-1],[i+1]])
# Transpose back
transposing_list = [i for i in range(self.n_graphs, -1, -1)] + [self.n_graphs+1]
output = tf.transpose(output, transposing_list)
# Bias tensor
if self.bias_bool: output = output + self.bias # add bias
return output
# Compute output shape
def compute_output_shape(self, input_shape):
return (input_shape[0], self.out_features)
# ============================================================================
# Tensor-Train Fully-Connected Layer from Tensorizing Neural Networks
class TensorTrainLayer (tf.keras.layers.Layer):
# define initial variables needed for implementation
def __init__(self, tt_ips, tt_ops, tt_ranks, bias_bool=True, **kwargs):
# Tensor Train Variables.
self.tt_ips = np.array(tt_ips)
self.tt_ops = np.array(tt_ops)
self.tt_ranks = np.array(tt_ranks)
self.num_dim = np.array(tt_ips).shape[0]
self.param_n = np.sum(self.tt_ips*self.tt_ops*self.tt_ranks[1:]*self.tt_ranks[:-1])
self.bias_bool = bias_bool
super(TensorTrainLayer, self).__init__(**kwargs)
# define weights for each core
def build(self, input_shape):
# Initalize weights for the TT FCL. Note that Keras will pass the optimizer directly on these core parameters
self.cores = []
for d in range(self.num_dim):
if d == 0: my_shape = (self.tt_ips[d], self.tt_ops[d], self.tt_ranks[d+1])
elif d == self.num_dim-1: my_shape = (self.tt_ranks[d], self.tt_ips[d], self.tt_ops[d])
else: my_shape = (self.tt_ranks[d], self.tt_ips[d], self.tt_ops[d], self.tt_ranks[d+1])
self.cores.append(self.add_weight(name='tt_core_{}'.format(d),
shape=my_shape,
initializer='uniform',
trainable=True))
# Bias vector
if self.bias_bool:
self.bias = self.add_weight(name='bias',
shape=self.tt_ops,
initializer='uniform',
trainable=True)
# Be sure to call this at the end
super(TensorTrainLayer, self).build(input_shape)
# Implementing the layer logic
def call(self, x, mask=None):
w = self.cores[0]
for d in range(1, self.num_dim):
w = tf.tensordot(w, self.cores[d], [[-1],[0]])
output = tf.tensordot(x, w, [[i for i in range(1, 3+1)], [i for i in range(0, 2*3, 2)]])
if self.bias_bool: output = output + self.bias
return output
# Compute input/output shapes
def compute_output_shape(self, input_shape):
return (input_shape[0], np.prod(self.tt_ops))
# ============================================================================
# Graph Convolution Neural Network (slightly modified) from
# github.com/vermaMachineLearning/keras-deep-graph-learning
class GraphCNN(Layer):
def __init__(self,
output_dim,
num_filters,
graph_conv_filters,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(GraphCNN, self).__init__(**kwargs)
self.output_dim = output_dim
self.num_filters = num_filters
if num_filters != int(graph_conv_filters.get_shape().as_list()[-2]/graph_conv_filters.get_shape().as_list()[-1]):
raise ValueError('num_filters does not match with graph_conv_filters dimensions.')
self.graph_conv_filters = graph_conv_filters
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_initializer.__name__ = kernel_initializer
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
self.input_dim = input_shape[-1]
kernel_shape = (self.num_filters * self.input_dim, self.output_dim)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.output_dim,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, input):
# # 3D tensor input: (batch_size x n_nodes x n_features)
# output = graph_conv_op(input, self.num_filters, self.graph_conv_filters, self.kernel)
output = tf.tensordot(self.graph_conv_filters, input, [1, 1])
output = tf.tensordot(output, self.kernel, [2, 0])
output = tf.transpose(output, [1, 0, 2])
if self.use_bias:
output = K.bias_add(output, self.bias)
if self.activation is not None:
output = self.activation(output)
return output
def compute_output_shape(self, input_shape):
output_shape = (input_shape[0], self.output_dim)
return output_shape | [
"tensorflow.keras.constraints.get",
"numpy.prod",
"tensorflow.tensordot",
"numpy.eye",
"tensorflow.keras.activations.get",
"tensorflow.random.set_seed",
"tensorflow.transpose",
"tensorflow.keras.backend.bias_add",
"numpy.sum",
"numpy.array",
"tensorflow.keras.initializers.get",
"tensorflow.ker... | [((441, 462), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(0)'], {}), '(0)\n', (459, 462), True, 'import tensorflow as tf\n'), ((2531, 2577), 'tensorflow.tensordot', 'tf.tensordot', (['x', 'self.kernel'], {'axes': '[[-1], [0]]'}), '(x, self.kernel, axes=[[-1], [0]])\n', (2543, 2577), True, 'import tensorflow as tf\n'), ((2863, 2901), 'tensorflow.transpose', 'tf.transpose', (['output', 'transposing_list'], {}), '(output, transposing_list)\n', (2875, 2901), True, 'import tensorflow as tf\n'), ((3572, 3588), 'numpy.array', 'np.array', (['tt_ips'], {}), '(tt_ips)\n', (3580, 3588), True, 'import numpy as np\n'), ((3611, 3627), 'numpy.array', 'np.array', (['tt_ops'], {}), '(tt_ops)\n', (3619, 3627), True, 'import numpy as np\n'), ((3652, 3670), 'numpy.array', 'np.array', (['tt_ranks'], {}), '(tt_ranks)\n', (3660, 3670), True, 'import numpy as np\n'), ((3743, 3817), 'numpy.sum', 'np.sum', (['(self.tt_ips * self.tt_ops * self.tt_ranks[1:] * self.tt_ranks[:-1])'], {}), '(self.tt_ips * self.tt_ops * self.tt_ranks[1:] * self.tt_ranks[:-1])\n', (3749, 3817), True, 'import numpy as np\n'), ((6799, 6826), 'tensorflow.keras.activations.get', 'activations.get', (['activation'], {}), '(activation)\n', (6814, 6826), False, 'from tensorflow.keras import activations, initializers, constraints\n'), ((6894, 6930), 'tensorflow.keras.initializers.get', 'initializers.get', (['kernel_initializer'], {}), '(kernel_initializer)\n', (6910, 6930), False, 'from tensorflow.keras import activations, initializers, constraints\n'), ((7025, 7059), 'tensorflow.keras.initializers.get', 'initializers.get', (['bias_initializer'], {}), '(bias_initializer)\n', (7041, 7059), False, 'from tensorflow.keras import activations, initializers, constraints\n'), ((7094, 7130), 'tensorflow.keras.regularizers.get', 'regularizers.get', (['kernel_regularizer'], {}), '(kernel_regularizer)\n', (7110, 7130), False, 'from tensorflow.keras import regularizers\n'), ((7163, 7197), 'tensorflow.keras.regularizers.get', 'regularizers.get', (['bias_regularizer'], {}), '(bias_regularizer)\n', (7179, 7197), False, 'from tensorflow.keras import regularizers\n'), ((7234, 7272), 'tensorflow.keras.regularizers.get', 'regularizers.get', (['activity_regularizer'], {}), '(activity_regularizer)\n', (7250, 7272), False, 'from tensorflow.keras import regularizers\n'), ((7306, 7340), 'tensorflow.keras.constraints.get', 'constraints.get', (['kernel_constraint'], {}), '(kernel_constraint)\n', (7321, 7340), False, 'from tensorflow.keras import activations, initializers, constraints\n'), ((7372, 7404), 'tensorflow.keras.constraints.get', 'constraints.get', (['bias_constraint'], {}), '(bias_constraint)\n', (7387, 7404), False, 'from tensorflow.keras import activations, initializers, constraints\n'), ((8538, 8590), 'tensorflow.tensordot', 'tf.tensordot', (['self.graph_conv_filters', 'input', '[1, 1]'], {}), '(self.graph_conv_filters, input, [1, 1])\n', (8550, 8590), True, 'import tensorflow as tf\n'), ((8608, 8649), 'tensorflow.tensordot', 'tf.tensordot', (['output', 'self.kernel', '[2, 0]'], {}), '(output, self.kernel, [2, 0])\n', (8620, 8649), True, 'import tensorflow as tf\n'), ((8667, 8698), 'tensorflow.transpose', 'tf.transpose', (['output', '[1, 0, 2]'], {}), '(output, [1, 0, 2])\n', (8679, 8698), True, 'import tensorflow as tf\n'), ((2662, 2725), 'tensorflow.tensordot', 'tf.tensordot', (['self.graphs_list[i]', 'output'], {'axes': '[[-1], [i + 1]]'}), '(self.graphs_list[i], output, axes=[[-1], [i + 1]])\n', (2674, 2725), True, 'import tensorflow as tf\n'), ((5269, 5312), 'tensorflow.tensordot', 'tf.tensordot', (['w', 'self.cores[d]', '[[-1], [0]]'], {}), '(w, self.cores[d], [[-1], [0]])\n', (5281, 5312), True, 'import tensorflow as tf\n'), ((5613, 5633), 'numpy.prod', 'np.prod', (['self.tt_ops'], {}), '(self.tt_ops)\n', (5620, 5633), True, 'import numpy as np\n'), ((8746, 8775), 'tensorflow.keras.backend.bias_add', 'K.bias_add', (['output', 'self.bias'], {}), '(output, self.bias)\n', (8756, 8775), True, 'import tensorflow.keras.backend as K\n'), ((3694, 3710), 'numpy.array', 'np.array', (['tt_ips'], {}), '(tt_ips)\n', (3702, 3710), True, 'import numpy as np\n'), ((1611, 1640), 'numpy.eye', 'np.eye', (['self.graphs_shapes[i]'], {}), '(self.graphs_shapes[i])\n', (1617, 1640), True, 'import numpy as np\n')] |
# %%
# Models and tokenizers
import joblib
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import keras.backend as K
from tensorflow.keras.utils import to_categorical
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.models import load_model
import fasttext
import nltk
from xgboost import XGBClassifier
from imblearn.pipeline import make_pipeline, Pipeline
from imblearn.under_sampling import RandomUnderSampler
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import precision_score, recall_score, f1_score
# nmrezman
from ..models import get_bilstm_findings_classifier, get_bilstm_lung_adrenal_classifier, recommended_proc_model
from ...utils import generate_eval_report
# Misc
import os
import numpy as np
from tqdm import tqdm
import pandas as pd
import pickle
# Typing
from typing import Tuple, Union
import numpy.typing as npt
from keras.models import Sequential
# %%
def tokenize(
x: pd.Series,
max_num_words: int,
max_sequence_length: int,
tokenizer_fname: str=None,
is_create: bool=True,
) -> Tuple[npt.NDArray, dict, int]:
# Define the tokenizer
# Lowercase the text; filter out special characters
if is_create:
tokenizer = Tokenizer(num_words=max_num_words, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(x)
else:
tokenizer = joblib.load(tokenizer_fname)
word_index = tokenizer.word_index
vocab_size = len(word_index)+1
# Tokenize the notes
# Prepend since radiology fidings are almost always located in the last section of the report
x_tokenized = tokenizer.texts_to_sequences(x)
x_tokenized = pad_sequences(x_tokenized, maxlen=max_sequence_length, padding="pre")
# Save the tokenizer, which will be needed for classification and training other models
if is_create:
os.makedirs(os.path.dirname(tokenizer_fname), exist_ok=True)
joblib.dump(tokenizer, tokenizer_fname)
return x_tokenized, word_index, vocab_size
def train(
x_tokenized: npt.NDArray,
y: Union[list, npt.NDArray],
model: Sequential,
model_checkpoint_name: str="./output/best_model.h5",
epochs: int=50,
class_weight: dict=None,
result_fname: str="./output/validation.log",
) -> None:
"""
Train a Keras model
"""
# Make dirs before wasting time running model
os.makedirs(os.path.dirname(model_checkpoint_name), exist_ok=True)
os.makedirs(os.path.dirname(result_fname), exist_ok=True)
# Split the data into train and test
train_x, test_x, train_y, test_y = train_test_split(x_tokenized, y, test_size=0.30, random_state=133278)
# Clear the Keras backend, starting model training from scratch
K.clear_session()
# Train!
batch_size = 100
es = EarlyStopping(monitor="val_loss", mode="min", verbose=1, patience=15,)
mc = ModelCheckpoint(model_checkpoint_name, monitor="val_loss", mode="min", verbose=1, save_best_only=True,)
model.fit(
train_x,
to_categorical(train_y),
class_weight=class_weight,
epochs=epochs,
batch_size=batch_size,
callbacks=[es, mc],
verbose=1,
validation_data=(test_x, to_categorical(test_y)),
)
# Load in the best model
best_model = load_model(model_checkpoint_name)
# Perform confusion matrix and save the results
y_pred = best_model.predict(np.array(test_x))
y_pred = np.argmax(y_pred, axis=1)
generate_eval_report(test_y, y_pred, result_fname)
return
# %% [markdown]
# # Train Findings vs No Findings Model
# %%
def train_findings_model(
data_path: str,
glove_embedding_path: str,
model_checkpoint_name: str="findings_best_model.h5",
result_fname: str="findings_best_result.log",
tokenizer_fname: str="tokenizer.gz"
) -> None:
"""
Trains the Findings vs No Findings Phase 01 BiLSTM model.
Args:
data_path (`str`):
Path to the dataframe file with the preprocessed impressions and labels in ``new_note`` and
``selected_finding`` columns, respectively
glove_embedding_path (`str`):
Path to the pre-downloaded GloVe Stanford pretrained word vectors ``glove.6B.300d`` as found at
https://nlp.stanford.edu/projects/glove/
model_checkpoint_name (`str`):
Path / filename to save model checkpoints
result_fname (`str`):
Path / filename to save model evaluation metrics
tokenizer_fname (`str`):
Path / filename to save tokenizer
"""
# Import data
# NOTE: this data has already been preprocessed, extracting the findings, removing Dr signature, etc.
# See `from ..utils import preprocess_input`
modeling_df = joblib.load(data_path)
# Get preprocessed notes and labels as already indicated by the dataframe
X = modeling_df["new_note"]
labels = [0 if i == "No Findings" else 1 for i in modeling_df["selected_finding"]]
# Define model constants
max_sequence_length = 300 # Max length of report. Avg NM is ~250
max_num_words = 15000 # Max number of words for init vocab; the actual vocab size used by the model will be different
glove_embedding_dim = 300 # GloVe embedding dimension size
# Tokenize the data
X_tokenized, word_index, vocab_size = tokenize(
x=X,
max_num_words=max_num_words,
max_sequence_length=max_sequence_length,
tokenizer_fname=tokenizer_fname,
is_create=True,
)
# Get GloVe embedding matrix
# NOTE: Stanford pretrained word vectors glove.6B.300d were downloaded from https://nlp.stanford.edu/projects/glove/
glove_embeddings_index = {}
f = open(glove_embedding_path, encoding="utf8")
for line in f:
values = line.split()
word = values[0]
try:
coefs = np.asarray(values[1:], dtype="float32")
except:
pass
glove_embeddings_index[word] = coefs
f.close()
glove_embedding_matrix = np.random.random((len(word_index) + 1, glove_embedding_dim))
for word, i in word_index.items():
glove_embedding_vector = glove_embeddings_index.get(word)
if glove_embedding_vector is not None:
# words not found in embedding index will be all-zeros.
if len(glove_embedding_matrix[i]) != len(glove_embedding_vector):
print("could not broadcast input array from shape", str(len(glove_embedding_matrix[i])),
"into shape", str(len(glove_embedding_vector)), " Please make sure your"
" EMBEDDING_DIM is equal to embedding_vector file ,GloVe,")
exit(1)
glove_embedding_matrix[i] = glove_embedding_vector
# Define the model
model = get_bilstm_findings_classifier(
max_sequence_length=max_sequence_length,
max_num_words=vocab_size,
glove_embedding_dim=glove_embedding_dim,
glove_embedding_matrix=glove_embedding_matrix,
)
# Train and evaluate
train(
x_tokenized=X_tokenized,
y=labels,
model=model,
model_checkpoint_name=model_checkpoint_name,
epochs=30,
result_fname=result_fname,
)
return
# %% [markdown]
# # Train Lung vs Adrenal Findings Model
# %%
def train_lung_adrenal_model(
data_path: str,
bioword_path: str,
model_checkpoint_name: str="lung_adrenal_best_model.h5",
result_fname: str="lung_adrenal_best_result.log",
tokenizer_fname: str="tokenizer.gz"
) -> None:
"""
Trains the Lung vs Adrenal Findings Phase 01 BiLSTM model.
Args:
data_path (`str`):
Path to the dataframe file with the preprocessed impressions and labels in ``new_note`` and
``selected_finding`` columns, respectively
bioword_path (`str`):
Path to the BioWordVec pretrained word vectors ``BioWordVec_PubMed_MIMICIII_d200.bin`` as from
https://ftp.ncbi.nlm.nih.gov/pub/lu/Suppl/BioSentVec/BioWordVec_PubMed_MIMICIII_d200.bin
model_checkpoint_name (`str`):
Path / filename to save model checkpoints
result_fname (`str`):
Path / filename to save model evaluation metrics
tokenizer_fname (`str`):
Path / filename to save tokenizer
"""
# Import data
# NOTE: this data has already been preprocessed, extracting the findings, removing Dr signature, etc.
# See `from ..utils import preprocess_input`
modeling_df = joblib.load(data_path)
# Get preprocessed notes and labels as already indicated by the dataframe
X = modeling_df[modeling_df["selected_finding"]!="No Findings"]["new_note"]
labels = modeling_df[modeling_df["selected_finding"]!="No Findings"]["selected_finding"]
labels = [0 if i =="Lung Findings" else 1 for i in labels]
# Define model constants
max_sequence_length = 300 # Max length of report. Avg NM is ~250
max_num_words = 20000 # Max number of words for init vocab; the actual vocab size used by the model will be different
bioword_embedding_dim = 200 # Bioword embedding dimension size
# Tokenize the data
X_tokenized, word_index, vocab_size = tokenize(
x=X,
max_num_words=max_num_words,
max_sequence_length=max_sequence_length,
tokenizer_fname=tokenizer_fname,
is_create=False,
)
# Load the model for (bio) word embedding
# NOTE: bioword word vector downloaded from: https://ftp.ncbi.nlm.nih.gov/pub/lu/Suppl/BioSentVec/BioWordVec_PubMed_MIMICIII_d200.bin
model = fasttext.load_model(bioword_path)
# Prepare the word embedding matrix
num_words = min(len(word_index) + 1, max_num_words)
bioword_embedding_matrix = np.zeros((num_words, bioword_embedding_dim))
for word, i in tqdm(word_index.items()):
if i >= max_num_words:
continue
bioword_embedding_matrix[i] = model.get_word_vector(word)
# Define the model
model = get_bilstm_lung_adrenal_classifier(
max_num_words=vocab_size,
bioword_embedding_dim=bioword_embedding_dim,
max_sequence_length=max_sequence_length,
bioword_embedding_matrix=bioword_embedding_matrix,
)
# Train and evaluate
train(
x_tokenized=X_tokenized,
y=labels,
model=model,
model_checkpoint_name=model_checkpoint_name,
epochs=50,
class_weight={0:1, 1:100},
result_fname=result_fname,
)
return
# %% [markdown]
# # Train Lung Recommended Procedure (Chest CT or Ambiguous) Model
# %%
def train_lung_recommended_proc_model(
data_path: str,
model_checkpoint_name: str="lung_recommend_best_model.h5",
result_fname: str="lung_recommend_best_result.log",
tokenizer_fname: str="tokenizer.gz",
) -> None:
"""
Trains the Lung Recommended Procedure Phase 01 BiLSTM model. Recommends "Chest CT" or "Ambiguous" procedure for
"Lung Findings".
Args:
data_path (`str`):
Path to the dataframe file with the preprocessed impressions and labels in ``new_note`` and
``selected_finding`` columns, respectively
model_checkpoint_name (`str`):
Path / filename to save model checkpoints
result_fname (`str`):
Path / filename to save model evaluation metrics
tokenizer_fname (`str`):
Path / filename to save tokenizer
"""
# Import data
# NOTE: this data has already been preprocessed, extracting the findings, removing Dr signature, etc.
# See `from ..utils import preprocess_input`
modeling_df = joblib.load(data_path)
# Get the portion of the dataset that includes lung findings as already indicated by the dataframe
X = modeling_df[modeling_df["selected_finding"]=="Lung Findings"]["new_note"]
labels = modeling_df[modeling_df["selected_finding"]=="Lung Findings"]["selected_proc"]
labels = [1 if i=="CT Chest" else 0 for i in labels]
# Define model constants
max_sequence_length = 300 # Max length of report. Avg NM is ~250
max_num_words = 20000 # Max number of words for init vocab; the actual vocab size used by the model will be different
embedding_dim = 300 # embedding dimension size
# Tokenize the data
X_tokenized, word_index, vocab_size = tokenize(
x=X,
max_num_words=max_num_words,
max_sequence_length=max_sequence_length,
tokenizer_fname=tokenizer_fname,
is_create=False,
)
# Define the model
model = recommended_proc_model(
max_num_words=max_num_words,
embedding_dim=embedding_dim,
input_length=np.array(X_tokenized).shape[1],
)
# Train and evaluate
train(
x_tokenized=np.array(X_tokenized),
y=np.array(labels),
model=model,
model_checkpoint_name=model_checkpoint_name,
epochs=50,
result_fname=result_fname,
)
return
# %% [markdown]
# # Train Comment Extraction Model
# %%
def train_comment_model(
data_path: str,
model_checkpoint_name: str="comment_best_model.sav",
result_fname: str="comment_best_result.log",
) -> None:
"""
Trains the Comment Extraction Phase 01 XGBoost model.
Args:
data_path (`str`):
Path to the dataframe file with the preprocessed impressions and labels in ``new_note`` and
``selected_finding`` columns, respectively
model_checkpoint_name (`str`):
Path / filename to save model checkpoints
result_fname (`str`):
Path / filename to save model evaluation metrics
"""
# Import data
# NOTE: this data has already been preprocessed, extracting the findings, removing Dr signature, etc.
# See `from ..utils import preprocess_input`
modeling_df = joblib.load(data_path)
# Get the portion of the dataset that includes only findings
only_findings_df = modeling_df[modeling_df["selected_finding"]!="No Findings"]
# Split into train and test data
train, hold_out = train_test_split(only_findings_df, test_size=0.2)
# Tokenize into sentences. Model training is done on sentences vs the whole report
nltk.download("punkt")
main_row_sents = []
sent_classifier=[]
rpt_num=[]
def get_sentence_classification_data(train):
for idx, row in tqdm(train.iterrows()):
row_sents = nltk.tokenize.sent_tokenize(row["note"])
last_sentence_label = nltk.tokenize.sent_tokenize(row["selected_label"])[-1]
for jdx, ele in enumerate(row_sents):
if ele in last_sentence_label:
classifier = 1
else:
classifier = 0
sent_classifier.append(classifier)
rpt_num.append(row["rpt_num"])
main_row_sents.append(row_sents)
return main_row_sents, sent_classifier, rpt_num
main_row_sents, classifier, rpt_num = get_sentence_classification_data(train)
# Create a dataframe sentence classifier
flattened_sents = [i for sublist in main_row_sents for i in sublist]
sent_class_df = pd.DataFrame()
sent_class_df["sentence"] = flattened_sents
sent_class_df["finding_sent"] = classifier
sent_class_df["rpt_num"] = rpt_num
# Get matrix of counts
y = np.array(sent_class_df["finding_sent"])
my_stop_words = ["the", "is", "are", "a" "there", "for", "in"]
tvec = TfidfVectorizer(stop_words=my_stop_words, max_features=1000, ngram_range=(1,3))
cvec = CountVectorizer(stop_words=my_stop_words, ngram_range=(1,4))
# Define XGBoost classifier
xgb = XGBClassifier(eval_metric="error", use_label_encoder=False)
sent_class_df["X"] = sent_class_df["sentence"]
# Get the XGBoost pipeline and train
print("XgBoost results")
print("+"*100)
RUS_pipeline = make_pipeline(tvec, RandomUnderSampler(random_state=777), xgb)
lr_cv(5, sent_class_df.X, sent_class_df.finding_sent, RUS_pipeline, "macro", model_checkpoint_name, result_fname)
return
def lr_cv(
splits: int,
X: pd.Series,
Y: pd.Series,
pipeline: Pipeline,
average_method: str,
model_checkpoint_name: str,
result_fname: str,
) -> None:
"""
Trains the comment extraction model
Args:
splits (`int`):
Number of folds
X (`pandas.Series`):
Series of train and test X data
Y (`pandas.Series`):
Series of train and test Y data
pipeline (`imblearn.pipeline.Pipeline`):
Pipeline of transforms and resamples with a final estimator
average_method (`str`):
Type of averaging performed on the data
model_checkpoint_name (`str`):
Path / filename to save model checkpoints
result_fname (`str`):
Path / filename to save model evaluation metrics
"""
# Train and evaluate!
kfold = StratifiedKFold(n_splits=splits, shuffle=True, random_state=777)
accuracy = []
precision = []
recall = []
finding_recall =[]
no_finding_recall=[]
finding_precision=[]
no_finding_precision =[]
f1 = []
f1_all = []
for train, test in kfold.split(X, Y):
lr_fit = pipeline.fit(X[train], Y[train])
prediction = lr_fit.predict(X[test])
scores = lr_fit.score(X[test], Y[test])
print(" no_finding finding_present ")
accuracy.append(scores*100)
p_score = precision_score(Y[test], prediction, average=None)
print("precision:", p_score)
precision.append(precision_score(Y[test], prediction, average=average_method)*100)
finding_precision.append(p_score[1])
no_finding_precision.append(p_score[0])
r_score = recall_score(Y[test], prediction, average=None)
print("recall: ", r_score)
recall.append(recall_score(Y[test], prediction, average=average_method)*100)
finding_recall.append(r_score[1])
no_finding_recall.append(r_score[0])
f1.append(f1_score(Y[test], prediction, average=average_method)*100)
f_score = f1_score(Y[test], prediction, average=None)
f1_all.append(f_score)
print("f1 score: ", f_score)
print("-"*50)
# Save the model
os.makedirs(os.path.dirname(model_checkpoint_name), exist_ok=True)
pickle.dump(pipeline, open(model_checkpoint_name, "wb"))
# joblib.dump(pipeline, open(model_checkpoint_name, "wb"))
# Print a summary of the results
print("accuracy: %.2f%% (+/- %.2f%%)" % (np.mean(accuracy), np.std(accuracy)))
print("precision: %.2f%% (+/- %.2f%%)" % (np.mean(precision), np.std(precision)))
print("recall: %.2f%% (+/- %.2f%%)" % (np.mean(recall), np.std(recall)))
print("f1 score: %.2f%% (+/- %.2f%%)" % (np.mean(f1), np.std(f1)))
print("Finding Recall: %.2f%%" % (np.mean(finding_recall)*100))
print("No Finding Recall: %.2f%%" % (np.mean(no_finding_recall)*100))
print("Finding Precision: %.2f%%" % (np.mean(finding_precision)*100))
print("No Finding Precision: %.2f%%" % (np.mean(no_finding_precision)*100))
# Write results out to a file
with open(result_fname, "w") as fh:
fh.write("Classification Report:")
fh.write("\n\taccuracy: %.2f%% (+/- %.2f%%)" % (np.mean(accuracy), np.std(accuracy)))
fh.write("\n\tprecision: %.2f%% (+/- %.2f%%)" % (np.mean(precision), np.std(precision)))
fh.write("\n\trecall: %.2f%% (+/- %.2f%%)" % (np.mean(recall), np.std(recall)))
fh.write("\n\tf1 score: %.2f%% (+/- %.2f%%)" % (np.mean(f1), np.std(f1)))
fh.write("\n\tFinding Recall: %.2f%%" % (np.mean(finding_recall)*100))
fh.write("\n\tNo Finding Recall: %.2f%%" % (np.mean(no_finding_recall)*100))
fh.write("\n\tFinding Precision: %.2f%%" % (np.mean(finding_precision)*100))
fh.write("\n\tNo Finding Precision: %.2f%%" % (np.mean(no_finding_precision)*100))
fh.write("\n\nAll Results:")
for precision_no_finding, precision_finding, recall_no_finding, recall_finding, f1sc in zip(no_finding_precision, finding_precision, no_finding_recall, finding_recall, f1_all):
fh.write("\n\t no_finding finding_present ")
fh.write(f"\n\tprecision: [{precision_no_finding:0.8f} {precision_finding:0.8f}]")
fh.write(f"\n\trecall: [{recall_no_finding:0.8f} {recall_finding:0.8f}]")
fh.write(f"\n\tf1 score: {f1sc}")
fh.write("\n\t"+"-"*50)
return
# %%
| [
"nltk.download",
"sklearn.metrics.precision_score",
"sklearn.model_selection.StratifiedKFold",
"numpy.array",
"sklearn.metrics.recall_score",
"nltk.tokenize.sent_tokenize",
"keras.preprocessing.sequence.pad_sequences",
"fasttext.load_model",
"imblearn.under_sampling.RandomUnderSampler",
"numpy.mea... | [((1889, 1958), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['x_tokenized'], {'maxlen': 'max_sequence_length', 'padding': '"""pre"""'}), "(x_tokenized, maxlen=max_sequence_length, padding='pre')\n", (1902, 1958), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((2837, 2905), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_tokenized', 'y'], {'test_size': '(0.3)', 'random_state': '(133278)'}), '(x_tokenized, y, test_size=0.3, random_state=133278)\n', (2853, 2905), False, 'from sklearn.model_selection import train_test_split\n'), ((2980, 2997), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (2995, 2997), True, 'import keras.backend as K\n'), ((3042, 3111), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'verbose': '(1)', 'patience': '(15)'}), "(monitor='val_loss', mode='min', verbose=1, patience=15)\n", (3055, 3111), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((3122, 3228), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['model_checkpoint_name'], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'verbose': '(1)', 'save_best_only': '(True)'}), "(model_checkpoint_name, monitor='val_loss', mode='min',\n verbose=1, save_best_only=True)\n", (3137, 3228), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((3538, 3571), 'keras.models.load_model', 'load_model', (['model_checkpoint_name'], {}), '(model_checkpoint_name)\n', (3548, 3571), False, 'from keras.models import load_model\n'), ((3688, 3713), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (3697, 3713), True, 'import numpy as np\n'), ((5036, 5058), 'joblib.load', 'joblib.load', (['data_path'], {}), '(data_path)\n', (5047, 5058), False, 'import joblib\n'), ((8911, 8933), 'joblib.load', 'joblib.load', (['data_path'], {}), '(data_path)\n', (8922, 8933), False, 'import joblib\n'), ((10003, 10036), 'fasttext.load_model', 'fasttext.load_model', (['bioword_path'], {}), '(bioword_path)\n', (10022, 10036), False, 'import fasttext\n'), ((10165, 10209), 'numpy.zeros', 'np.zeros', (['(num_words, bioword_embedding_dim)'], {}), '((num_words, bioword_embedding_dim))\n', (10173, 10209), True, 'import numpy as np\n'), ((12080, 12102), 'joblib.load', 'joblib.load', (['data_path'], {}), '(data_path)\n', (12091, 12102), False, 'import joblib\n'), ((14323, 14345), 'joblib.load', 'joblib.load', (['data_path'], {}), '(data_path)\n', (14334, 14345), False, 'import joblib\n'), ((14555, 14604), 'sklearn.model_selection.train_test_split', 'train_test_split', (['only_findings_df'], {'test_size': '(0.2)'}), '(only_findings_df, test_size=0.2)\n', (14571, 14604), False, 'from sklearn.model_selection import train_test_split\n'), ((14697, 14719), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (14710, 14719), False, 'import nltk\n'), ((15643, 15657), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (15655, 15657), True, 'import pandas as pd\n'), ((15828, 15867), 'numpy.array', 'np.array', (["sent_class_df['finding_sent']"], {}), "(sent_class_df['finding_sent'])\n", (15836, 15867), True, 'import numpy as np\n'), ((15946, 16031), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'stop_words': 'my_stop_words', 'max_features': '(1000)', 'ngram_range': '(1, 3)'}), '(stop_words=my_stop_words, max_features=1000, ngram_range=(1, 3)\n )\n', (15961, 16031), False, 'from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n'), ((16037, 16098), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'stop_words': 'my_stop_words', 'ngram_range': '(1, 4)'}), '(stop_words=my_stop_words, ngram_range=(1, 4))\n', (16052, 16098), False, 'from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n'), ((16141, 16200), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {'eval_metric': '"""error"""', 'use_label_encoder': '(False)'}), "(eval_metric='error', use_label_encoder=False)\n", (16154, 16200), False, 'from xgboost import XGBClassifier\n'), ((17470, 17534), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'splits', 'shuffle': '(True)', 'random_state': '(777)'}), '(n_splits=splits, shuffle=True, random_state=777)\n', (17485, 17534), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((1441, 1536), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'max_num_words', 'filters': '"""!"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~"""', 'lower': '(True)'}), '(num_words=max_num_words, filters=\n \'!"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\', lower=True)\n', (1450, 1536), False, 'from keras.preprocessing.text import Tokenizer\n'), ((1595, 1623), 'joblib.load', 'joblib.load', (['tokenizer_fname'], {}), '(tokenizer_fname)\n', (1606, 1623), False, 'import joblib\n'), ((2147, 2186), 'joblib.dump', 'joblib.dump', (['tokenizer', 'tokenizer_fname'], {}), '(tokenizer, tokenizer_fname)\n', (2158, 2186), False, 'import joblib\n'), ((2639, 2677), 'os.path.dirname', 'os.path.dirname', (['model_checkpoint_name'], {}), '(model_checkpoint_name)\n', (2654, 2677), False, 'import os\n'), ((2710, 2739), 'os.path.dirname', 'os.path.dirname', (['result_fname'], {}), '(result_fname)\n', (2725, 2739), False, 'import os\n'), ((3266, 3289), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['train_y'], {}), '(train_y)\n', (3280, 3289), False, 'from tensorflow.keras.utils import to_categorical\n'), ((3657, 3673), 'numpy.array', 'np.array', (['test_x'], {}), '(test_x)\n', (3665, 3673), True, 'import numpy as np\n'), ((16382, 16418), 'imblearn.under_sampling.RandomUnderSampler', 'RandomUnderSampler', ([], {'random_state': '(777)'}), '(random_state=777)\n', (16400, 16418), False, 'from imblearn.under_sampling import RandomUnderSampler\n'), ((18038, 18088), 'sklearn.metrics.precision_score', 'precision_score', (['Y[test]', 'prediction'], {'average': 'None'}), '(Y[test], prediction, average=None)\n', (18053, 18088), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((18337, 18384), 'sklearn.metrics.recall_score', 'recall_score', (['Y[test]', 'prediction'], {'average': 'None'}), '(Y[test], prediction, average=None)\n', (18349, 18384), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((18690, 18733), 'sklearn.metrics.f1_score', 'f1_score', (['Y[test]', 'prediction'], {'average': 'None'}), '(Y[test], prediction, average=None)\n', (18698, 18733), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((18862, 18900), 'os.path.dirname', 'os.path.dirname', (['model_checkpoint_name'], {}), '(model_checkpoint_name)\n', (18877, 18900), False, 'import os\n'), ((2090, 2122), 'os.path.dirname', 'os.path.dirname', (['tokenizer_fname'], {}), '(tokenizer_fname)\n', (2105, 2122), False, 'import os\n'), ((6156, 6195), 'numpy.asarray', 'np.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (6166, 6195), True, 'import numpy as np\n'), ((13239, 13260), 'numpy.array', 'np.array', (['X_tokenized'], {}), '(X_tokenized)\n', (13247, 13260), True, 'import numpy as np\n'), ((13272, 13288), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (13280, 13288), True, 'import numpy as np\n'), ((14903, 14943), 'nltk.tokenize.sent_tokenize', 'nltk.tokenize.sent_tokenize', (["row['note']"], {}), "(row['note'])\n", (14930, 14943), False, 'import nltk\n'), ((3460, 3482), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['test_y'], {}), '(test_y)\n', (3474, 3482), False, 'from tensorflow.keras.utils import to_categorical\n'), ((14978, 15028), 'nltk.tokenize.sent_tokenize', 'nltk.tokenize.sent_tokenize', (["row['selected_label']"], {}), "(row['selected_label'])\n", (15005, 15028), False, 'import nltk\n'), ((18151, 18211), 'sklearn.metrics.precision_score', 'precision_score', (['Y[test]', 'prediction'], {'average': 'average_method'}), '(Y[test], prediction, average=average_method)\n', (18166, 18211), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((18444, 18501), 'sklearn.metrics.recall_score', 'recall_score', (['Y[test]', 'prediction'], {'average': 'average_method'}), '(Y[test], prediction, average=average_method)\n', (18456, 18501), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((18613, 18666), 'sklearn.metrics.f1_score', 'f1_score', (['Y[test]', 'prediction'], {'average': 'average_method'}), '(Y[test], prediction, average=average_method)\n', (18621, 18666), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((19124, 19141), 'numpy.mean', 'np.mean', (['accuracy'], {}), '(accuracy)\n', (19131, 19141), True, 'import numpy as np\n'), ((19143, 19159), 'numpy.std', 'np.std', (['accuracy'], {}), '(accuracy)\n', (19149, 19159), True, 'import numpy as np\n'), ((19208, 19226), 'numpy.mean', 'np.mean', (['precision'], {}), '(precision)\n', (19215, 19226), True, 'import numpy as np\n'), ((19228, 19245), 'numpy.std', 'np.std', (['precision'], {}), '(precision)\n', (19234, 19245), True, 'import numpy as np\n'), ((19291, 19306), 'numpy.mean', 'np.mean', (['recall'], {}), '(recall)\n', (19298, 19306), True, 'import numpy as np\n'), ((19308, 19322), 'numpy.std', 'np.std', (['recall'], {}), '(recall)\n', (19314, 19322), True, 'import numpy as np\n'), ((19370, 19381), 'numpy.mean', 'np.mean', (['f1'], {}), '(f1)\n', (19377, 19381), True, 'import numpy as np\n'), ((19383, 19393), 'numpy.std', 'np.std', (['f1'], {}), '(f1)\n', (19389, 19393), True, 'import numpy as np\n'), ((19435, 19458), 'numpy.mean', 'np.mean', (['finding_recall'], {}), '(finding_recall)\n', (19442, 19458), True, 'import numpy as np\n'), ((19506, 19532), 'numpy.mean', 'np.mean', (['no_finding_recall'], {}), '(no_finding_recall)\n', (19513, 19532), True, 'import numpy as np\n'), ((19580, 19606), 'numpy.mean', 'np.mean', (['finding_precision'], {}), '(finding_precision)\n', (19587, 19606), True, 'import numpy as np\n'), ((19657, 19686), 'numpy.mean', 'np.mean', (['no_finding_precision'], {}), '(no_finding_precision)\n', (19664, 19686), True, 'import numpy as np\n'), ((13144, 13165), 'numpy.array', 'np.array', (['X_tokenized'], {}), '(X_tokenized)\n', (13152, 13165), True, 'import numpy as np\n'), ((19867, 19884), 'numpy.mean', 'np.mean', (['accuracy'], {}), '(accuracy)\n', (19874, 19884), True, 'import numpy as np\n'), ((19886, 19902), 'numpy.std', 'np.std', (['accuracy'], {}), '(accuracy)\n', (19892, 19902), True, 'import numpy as np\n'), ((19962, 19980), 'numpy.mean', 'np.mean', (['precision'], {}), '(precision)\n', (19969, 19980), True, 'import numpy as np\n'), ((19982, 19999), 'numpy.std', 'np.std', (['precision'], {}), '(precision)\n', (19988, 19999), True, 'import numpy as np\n'), ((20056, 20071), 'numpy.mean', 'np.mean', (['recall'], {}), '(recall)\n', (20063, 20071), True, 'import numpy as np\n'), ((20073, 20087), 'numpy.std', 'np.std', (['recall'], {}), '(recall)\n', (20079, 20087), True, 'import numpy as np\n'), ((20146, 20157), 'numpy.mean', 'np.mean', (['f1'], {}), '(f1)\n', (20153, 20157), True, 'import numpy as np\n'), ((20159, 20169), 'numpy.std', 'np.std', (['f1'], {}), '(f1)\n', (20165, 20169), True, 'import numpy as np\n'), ((20222, 20245), 'numpy.mean', 'np.mean', (['finding_recall'], {}), '(finding_recall)\n', (20229, 20245), True, 'import numpy as np\n'), ((20304, 20330), 'numpy.mean', 'np.mean', (['no_finding_recall'], {}), '(no_finding_recall)\n', (20311, 20330), True, 'import numpy as np\n'), ((20389, 20415), 'numpy.mean', 'np.mean', (['finding_precision'], {}), '(finding_precision)\n', (20396, 20415), True, 'import numpy as np\n'), ((20477, 20506), 'numpy.mean', 'np.mean', (['no_finding_precision'], {}), '(no_finding_precision)\n', (20484, 20506), True, 'import numpy as np\n')] |
# Copyright 2017 Novartis Institutes for BioMedical Research Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
from __future__ import print_function
import numpy as np
from scipy import misc
class PMLImage:
""" Project Mona Lisa (PML) Image class processes a single
training image from the collected data. Each process in
PMLImage processes one image from the source path and saves
the processed image in the destination directory path.
"""
def __init__(self, source_path, destination_path):
"""
Args:
source_path (str): the source path where the image to be
processed is stored.
destination_dir (str): the destination path where the
image to be processed will be stored after any given
PMLImage process is taken effect.
"""
self.source_path = source_path
self.destination_path = destination_path
self.resize_dim = 128
def process_img_for_predict(self):
""" Method that pipelines the PMLImage grey_image(),
crop_img(), and resize_img() methods and, for each,
operates on the single image in the source path. In order
for this method to work, the source path and destination
path have to be equal.
Returns:
(bool) True if successful, otherwise an error is
thrown.
"""
assert self.source_path == self.destination_path
greyed_img = self.grey_img()
cropped_img = self.crop_img()
resized_img = self.resize_img()
return True
def img_obj_to_array(self):
"""
Returns: the saved image in the source directory as a numpy
array.
"""
return misc.imread(self.source_path)
def grey_img(self):
""" Converts the RGB png images file in the source path into
a grey-scale image and saves the result in the destination
directory.
Returns:
(bool) True if successful, otherwise an error is
thrown.
"""
# read the image as a numpy array
img = misc.imread(self.source_path)
lx, ly, _ = img.shape
identity = np.identity(ly)
# just get the saturation value in the third index of the
# third dimension, and then create a 2 dimensional numpy array.
greyed_img = np.dot(img[:, :, 3], identity)
# save the numpy array as a grey-scale png image.
misc.toimage(greyed_img, cmin=0.0, cmax=255.0)\
.save(self.destination_path)
return True
def crop_img(self):
""" Crops the source 2D png image into a square shape,
where the new sides of the square is the side of
the shortest side in the original image.
Returns:
(bool) True if successful, otherwise an error is
thrown.
"""
img = misc.imread(self.source_path)
lx, ly = img.shape
# the shortest side:
coord_min = min(lx, ly)
# cropping the images into a centered square, and the cropped
# image will have side length of the shortest side (coord_min)
crop_x = (lx - coord_min) / 2
crop_y = (ly - coord_min) / 2
new_x_coord_start = crop_x
new_x_coord_end = crop_x + coord_min
new_y_coord_start = crop_y
new_y_coord_end = crop_y + coord_min
crop_img = img[new_x_coord_start: new_x_coord_end,
new_y_coord_start: new_y_coord_end]
# save the array as an image
misc.imsave(self.destination_path, crop_img)
return True
def resize_img(self):
""" Resizes the 2D source image into a square shape of
<self.resize_dim> by <self.resize_dim>. This will magnify
or shrink the image as needed to fit the new dimensions.
Returns:
(bool) True if successfil, otherwise an error is
thrown.
"""
img = misc.imread(self.source_path)
size = (self.resize_dim, self.resize_dim)
resized_img = misc.imresize(img, size)
misc.imsave(self.destination_path, resized_img)
return True
def synthesize_img(self, n_per_img):
"""
"""
datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=False,
fill_mode='nearest')
img = misc.imread(self.source_path)
filename = self.source_path.split('/')[-1]
save_prefix = filename.split('.')[0]
# the .flow() command below generates batches of randomly transformed images
# and saves the results to the directory
i = 0
for batch in datagen.flow(
x,
batch_size=1,
save_to_dir=source_dir,
save_prefix='%s.%d' % (save_prefix, i),
save_format='png'):
i += 1
if i > 20:
break # otherwise the generator would loop indefinitely
| [
"numpy.identity",
"scipy.misc.imsave",
"scipy.misc.toimage",
"numpy.dot",
"scipy.misc.imread",
"scipy.misc.imresize"
] | [((2300, 2329), 'scipy.misc.imread', 'misc.imread', (['self.source_path'], {}), '(self.source_path)\n', (2311, 2329), False, 'from scipy import misc\n'), ((2697, 2726), 'scipy.misc.imread', 'misc.imread', (['self.source_path'], {}), '(self.source_path)\n', (2708, 2726), False, 'from scipy import misc\n'), ((2776, 2791), 'numpy.identity', 'np.identity', (['ly'], {}), '(ly)\n', (2787, 2791), True, 'import numpy as np\n'), ((2952, 2982), 'numpy.dot', 'np.dot', (['img[:, :, 3]', 'identity'], {}), '(img[:, :, 3], identity)\n', (2958, 2982), True, 'import numpy as np\n'), ((3498, 3527), 'scipy.misc.imread', 'misc.imread', (['self.source_path'], {}), '(self.source_path)\n', (3509, 3527), False, 'from scipy import misc\n'), ((4160, 4204), 'scipy.misc.imsave', 'misc.imsave', (['self.destination_path', 'crop_img'], {}), '(self.destination_path, crop_img)\n', (4171, 4204), False, 'from scipy import misc\n'), ((4591, 4620), 'scipy.misc.imread', 'misc.imread', (['self.source_path'], {}), '(self.source_path)\n', (4602, 4620), False, 'from scipy import misc\n'), ((4693, 4717), 'scipy.misc.imresize', 'misc.imresize', (['img', 'size'], {}), '(img, size)\n', (4706, 4717), False, 'from scipy import misc\n'), ((4726, 4773), 'scipy.misc.imsave', 'misc.imsave', (['self.destination_path', 'resized_img'], {}), '(self.destination_path, resized_img)\n', (4737, 4773), False, 'from scipy import misc\n'), ((5140, 5169), 'scipy.misc.imread', 'misc.imread', (['self.source_path'], {}), '(self.source_path)\n', (5151, 5169), False, 'from scipy import misc\n'), ((3050, 3096), 'scipy.misc.toimage', 'misc.toimage', (['greyed_img'], {'cmin': '(0.0)', 'cmax': '(255.0)'}), '(greyed_img, cmin=0.0, cmax=255.0)\n', (3062, 3096), False, 'from scipy import misc\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 18 16:55:14 2017
@author: ajaver
"""
import os
import tables
import numpy as np
import warnings
from .getFoodContourNN import get_food_contour_nn
from .getFoodContourMorph import get_food_contour_morph
from tierpsy.helper.misc import TimeCounter, print_flush, get_base_name
def calculate_food_cnt(mask_file, use_nn_food_cnt, model_path, _is_debug=False, solidity_th=0.98):
if use_nn_food_cnt:
if not os.path.exists(model_path):
warnings.warn('The model to obtain the food contour was not found. Nothing to do here...\n If you dont have a valid model. You could try to set `food_method=MORPH` to use a different algorithm.')
return
food_cnt, food_prob,cnt_solidity = get_food_contour_nn(mask_file, model_path, _is_debug=_is_debug)
if cnt_solidity < solidity_th:
food_cnt = np.zeros(0)
else:
food_cnt = get_food_contour_morph(mask_file, _is_debug=_is_debug)
return food_cnt
def getFoodContour(mask_file,
skeletons_file,
use_nn_food_cnt,
model_path,
solidity_th=0.98,
_is_debug = False
):
base_name = get_base_name(mask_file)
progress_timer = TimeCounter('')
print_flush("{} Calculating food contour {}".format(base_name, progress_timer.get_time_str()))
food_cnt = calculate_food_cnt(mask_file,
use_nn_food_cnt = use_nn_food_cnt,
model_path = model_path,
solidity_th= solidity_th,
_is_debug = _is_debug)
#store contour coordinates into the skeletons file and mask_file the contour file
for fname in [skeletons_file, mask_file]:
with tables.File(fname, 'r+') as fid:
if '/food_cnt_coord' in fid:
fid.remove_node('/food_cnt_coord')
#if it is a valid contour save it
if food_cnt is not None and \
food_cnt.size >= 2 and \
food_cnt.ndim == 2 and \
food_cnt.shape[1] == 2:
tab = fid.create_array('/',
'food_cnt_coord',
obj=food_cnt)
tab._v_attrs['use_nn_food_cnt'] = int(use_nn_food_cnt)
| [
"os.path.exists",
"tierpsy.helper.misc.get_base_name",
"numpy.zeros",
"tierpsy.helper.misc.TimeCounter",
"warnings.warn",
"tables.File"
] | [((1268, 1292), 'tierpsy.helper.misc.get_base_name', 'get_base_name', (['mask_file'], {}), '(mask_file)\n', (1281, 1292), False, 'from tierpsy.helper.misc import TimeCounter, print_flush, get_base_name\n'), ((1319, 1334), 'tierpsy.helper.misc.TimeCounter', 'TimeCounter', (['""""""'], {}), "('')\n", (1330, 1334), False, 'from tierpsy.helper.misc import TimeCounter, print_flush, get_base_name\n'), ((488, 514), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (502, 514), False, 'import os\n'), ((526, 734), 'warnings.warn', 'warnings.warn', (['"""The model to obtain the food contour was not found. Nothing to do here...\n If you dont have a valid model. You could try to set `food_method=MORPH` to use a different algorithm."""'], {}), '(\n """The model to obtain the food contour was not found. Nothing to do here...\n If you dont have a valid model. You could try to set `food_method=MORPH` to use a different algorithm."""\n )\n', (539, 734), False, 'import warnings\n'), ((909, 920), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (917, 920), True, 'import numpy as np\n'), ((1888, 1912), 'tables.File', 'tables.File', (['fname', '"""r+"""'], {}), "(fname, 'r+')\n", (1899, 1912), False, 'import tables\n')] |
""" Analysis code for IGMSurvey objects
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import glob
import json
import pdb
from astropy.table import Table
from linetools import utils as ltu
def calc_slgrid_atan(surveys, Agrid, Bgrid, Cgrid, C2grid):
""" Calculate the sightline grid for a Atan l(z) fit
Breaking this off for bootstrap speed-up
Parameters
----------
surveys : list of DLASurvey objects
Agrid
Bgrid
Cgrid
C2grid
Returns
-------
slgrid : ndarray
Sightline term in likelihood function
"""
# Integrating over the sightlines
slgrid = np.zeros_like(Agrid)
# Int(atan[x-a]) = (a-x) atan(a-x) - 0.5 * ln(a**2 - 2ax + x**2 + 1)
for isurvey in surveys:
slines = isurvey.sightlines
gds = slines['Z_START'] < slines['Z_END']
zstart = slines['Z_START'][gds]
zend = slines['Z_END'][gds]
# Integrate constant term
AAgrid = Agrid * np.sum(zend-zstart)
slgrid += AAgrid
# Integrate second term
for iz in zend:
CCgrid = (Cgrid-iz) * np.arctan(Cgrid-iz) - 0.5 * np.log(
C2grid - 2*Cgrid*iz + iz**2 + 1)
slgrid += Bgrid * CCgrid
if np.min(CCgrid) < -0.1:
pdb.set_trace()
for iz in zstart:
CCgrid = (Cgrid-iz) * np.arctan(Cgrid-iz) - 0.5 * np.log(
C2grid - 2*Cgrid*iz + iz**2 + 1)
slgrid -= Bgrid * CCgrid
# Return
return slgrid
def fit_atan_dla_lz(surveys, nstep=20, bootstrap=True,
nboot=10, nproc=2,
fit_out=None, boot_out=None,
verbose=True):
""" Fit a A + B * atan(z-C) l(z) model to AbsSys data
Writes bootstrap analysis to hard-drive
Code used in Prochaska & Neeleman 2017 for DLAs
Parameters
----------
surveys : list of IGMSurvey objects
If None, a default list is loaded
nstep : int, optional
Steps in each dimension of the grid
bootstrap : bool, optional
Perform bootstrap analysis
nboot : int, optional
Number of bootstrap iterations
nproc : int, optional
Number of processors to use
fit_out : str, optional
Output filename for best fit (JSON)
boot_out : str, optional
Output filename for bootstrap analysis
verbose : bool, optional
Returns
-------
dfits : dict
Best fit parameters
boot_tbl : Table
Returned if bootstrap=True
else return None
"""
# Name and date
# Init
if boot_out is None:
boot_out = './lz_boot.fits.gz'
if fit_out is None:
fit_out = './lz_fit.json'
# Synthesize
all_z = np.concatenate([isurvey.zabs for isurvey in surveys])
ndla = len(all_z)
# Model : l(z) = A + B * atan(C-z)
Aparm = np.linspace(0.05, 0.5, num=nstep).astype(np.float32)
Bparm = np.linspace(0.05, 0.5, num=nstep).astype(np.float32)
Cparm = np.linspace(1., 6., num=nstep).astype(np.float32)
# Generate grids (float32)
Agrid, Bgrid, Cgrid = np.meshgrid(Aparm, Bparm, Cparm, copy=False)
C2grid = Cgrid**2
# Sightline grid
if verbose:
print("Sightline calculation...")
slgrid = calc_slgrid_atan(surveys, Agrid, Bgrid, Cgrid, C2grid)
if bootstrap:
if verbose:
print("Bootstrapping!")
sv_fits = []
rN = np.random.poisson(ndla, size=nboot)
# Boot me
z_list = []
for kk,irN in enumerate(rN):
# Draw nPoisson
rval = (np.random.uniform(size=irN)*ndla).astype(int)
# Draw from all_z
draw_z = all_z[rval]
z_list.append(draw_z)
# Run
if nproc == 1:
for draw_z in z_list:
if verbose:
print("Working on iteration: {:d} of {:d}".format(kk, nboot))
dfits, _, _ = Ln_lz_atan(Agrid, Bgrid, Cgrid, slgrid, draw_z, write=False)
# Save
sv_fits.append(dfits.copy())
else:
import multiprocessing
pool = multiprocessing.Pool(nproc) # initialize thread pool N threads
inp_list = []
for ii in range(nboot):
inp_list.append(
dict(A=Agrid, B=Bgrid, C=Cgrid, sl=slgrid, z=z_list[ii]))
if verbose:
print("Mapping...")
sv_fits = pool.map(map_Ln_atan, inp_list)
# Write
boot_tbl = Table()
for key in ['A', 'B', 'C']:
boot_tbl[key] = [ifits['lz']['atan'][key] for ifits in sv_fits]
boot_tbl.write(boot_out, overwrite=True)
if verbose:
print("Wrote {:s}".format(boot_out))
else:
boot_tbl = None
# Best
dfits, _, _ = Ln_lz_atan(Agrid, Bgrid, Cgrid, slgrid, all_z, write=True)
# Finish
return dfits, boot_tbl
def Ln_lz_atan(Agrid, Bgrid, Cgrid, slgrid, all_z, write=True, verbose=True):
""" Likelihood function for arctan model
Parameters
----------
Agrid
Bgrid
Cgrid
slgrid
all_z
write
Returns
-------
dfits : dict
Contains best fit model
dlagrid : ndarray
for debugging
lngrid : ndarray
"""
# z0 estimate from 21cm surveys
lz_z0 = dict(value=np.mean([0.026, 0.045]), sig=0.01)
# Init
dlagrid = np.zeros_like(Agrid)
# Generate Likelihood for DLAs
np.seterr(invalid='ignore')
for z in all_z:
dlagrid += np.log(Agrid + Bgrid * np.arctan(z-Cgrid))
bad = np.isnan(dlagrid)
dlagrid[bad] = -1e9
# Likelihood
lngrid = dlagrid - slgrid
# z=0
model_z0 = Agrid + Bgrid * np.arctan(0.-Cgrid)
lnP = -1 * (model_z0-lz_z0['value'])**2 / 2 / (lz_z0['sig']**2)
lngrid += lnP
# Best
indices = np.where(lngrid == np.max(lngrid))
best = Agrid[indices][0], Bgrid[indices][0], Cgrid[indices][0]
if verbose:
print('Best fit: A={}, B={}, C={}'.format(best[0], best[1], best[2]))
# Load
dfits = {}
# Write
dfits['lz'] = {}
dfits['lz']['atan'] = dict(A=Agrid[indices][0], B=Bgrid[indices][0], C=Cgrid[indices][0],
form='A + B*atan(z-C)')
# Return
return dfits, dlagrid, lngrid
def map_Ln_atan(map_dict):
""" For multiprocessing the bootstrap
Parameters
----------
map_dict
Returns
-------
"""
dfits, _, _ = Ln_lz_atan(map_dict['A'], map_dict['B'], map_dict['C'],
map_dict['sl'], map_dict['z'], write=False,
verbose=False)
return dfits
def fit_fN_dblpow(NHI, a3_mnx, a4_mnx, Nd_mnx, nstep=100,
Nmin=10**(20.3), Nmax=1e99, verbose=True):
""" Fit a double power-law to an input NHI distribution
Only does the shape
Done in float32 to preserve memory
Code from Prochaska & Neeleman (2017) [and also PHW05]
Parameters
----------
NHI : ndarray
log10 NHI values
a3_mnx : tuple
min/max of lower NHI power-law
a4_mnx : tuple
min/max of upper NHI power-law
Nd_mnx : tuple
min/max of break column in log10
nstep : int, optional
Nmin : float, optional
Minimum NHI in the analysis [usually DLA criterion]
Nmax : float, optional
Maximum NHI in the analysis
Returns
-------
dfits : dict
Contains the fit
best : tuple
Best fit values in grid for Nd, a3, a4
Ndgrid
a3grid
a4grid
lik
"""
# Generate 1D arrays
a3stp = np.linspace(a3_mnx[0], a3_mnx[1], nstep).astype(np.float32)
a4stp = np.linspace(a4_mnx[0], a4_mnx[1], nstep).astype(np.float32)
Ndstp = np.linspace(Nd_mnx[0], Nd_mnx[1], nstep).astype(np.float32)
# Generate grids (float32)
a3grid, a4grid, Ndgrid = np.meshgrid(a3stp, a4stp, Ndstp, copy=False)
# Linear
Ns10 = 10.**Ndgrid
# Calculate denominator
denom = Ns10 * ((1. - (Nmin / Ns10)**(a3grid + 1.)) / (1. + a3grid) + (
(Nmax / Ns10)**(a4grid + 1) - 1.) / (a4grid + 1.))
num = np.zeros_like(Ns10)
# Numerator
# Loop on DLAs
for iNHI10 in 10.**NHI:
# Upper end
high = iNHI10 > Ns10
if np.sum(high) > 0:
num[high] += a4grid[high] * np.log(iNHI10 / Ns10[high])
# Low end
if np.sum(~high) > 0:
num[~high] += a3grid[~high] * np.log(iNHI10 / Ns10[~high])
# Liklihood (Beware of Signs!)
lik = num - NHI.size * np.log(denom)
mxL = np.max(lik)
indices = np.where(lik == mxL)
best = Ndgrid[indices][0], a3grid[indices][0], a4grid[indices][0]
if verbose:
print('Best fit: Nd={}, a3={}, a4={}'.format(best[0], best[1], best[2]))
# Load
dfits = {}
# Write
dfits['fN'] = {}
dfits['fN']['dpow'] = dict(Nd=Ndgrid[indices][0], a3=a3grid[indices][0], a4=a4grid[indices][0],
form='(N/Nd)**aa with aa=a3 if N<Nd else aa=a4')
# KS Test
ks_test = False
if ks_test:
ns10 = 10**best[0]
dblpow_k = 1. / (ns10 * (1. - (Nmin / Ns10)**(best[1] + 1)) / (1. + best[1]) + (
(Nmax / Ns10)**(best[2] + 1) - 1.) / (best[2] + 1))
dblpow_b1 = best[1]
dblpow_b2 = best[2]
dblpow_nd = ns10
dblpow_nmin = Nmin
noise = 0.02
dNHI = 10**(NHI + noise * np.random.uniform(size=NHI.size))
#ksone, darr, 'x_maxdblpow_kscumf', d, ksprob
return dfits, best, Ndgrid, a3grid, a4grid, lik
| [
"numpy.mean",
"numpy.random.poisson",
"astropy.table.Table",
"numpy.where",
"numpy.log",
"numpy.min",
"numpy.max",
"numpy.sum",
"numpy.linspace",
"numpy.random.uniform",
"numpy.seterr",
"numpy.isnan",
"numpy.concatenate",
"multiprocessing.Pool",
"pdb.set_trace",
"numpy.meshgrid",
"nu... | [((677, 697), 'numpy.zeros_like', 'np.zeros_like', (['Agrid'], {}), '(Agrid)\n', (690, 697), True, 'import numpy as np\n'), ((2772, 2825), 'numpy.concatenate', 'np.concatenate', (['[isurvey.zabs for isurvey in surveys]'], {}), '([isurvey.zabs for isurvey in surveys])\n', (2786, 2825), True, 'import numpy as np\n'), ((3139, 3183), 'numpy.meshgrid', 'np.meshgrid', (['Aparm', 'Bparm', 'Cparm'], {'copy': '(False)'}), '(Aparm, Bparm, Cparm, copy=False)\n', (3150, 3183), True, 'import numpy as np\n'), ((5439, 5459), 'numpy.zeros_like', 'np.zeros_like', (['Agrid'], {}), '(Agrid)\n', (5452, 5459), True, 'import numpy as np\n'), ((5499, 5526), 'numpy.seterr', 'np.seterr', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (5508, 5526), True, 'import numpy as np\n'), ((5619, 5636), 'numpy.isnan', 'np.isnan', (['dlagrid'], {}), '(dlagrid)\n', (5627, 5636), True, 'import numpy as np\n'), ((7888, 7932), 'numpy.meshgrid', 'np.meshgrid', (['a3stp', 'a4stp', 'Ndstp'], {'copy': '(False)'}), '(a3stp, a4stp, Ndstp, copy=False)\n', (7899, 7932), True, 'import numpy as np\n'), ((8145, 8164), 'numpy.zeros_like', 'np.zeros_like', (['Ns10'], {}), '(Ns10)\n', (8158, 8164), True, 'import numpy as np\n'), ((8581, 8592), 'numpy.max', 'np.max', (['lik'], {}), '(lik)\n', (8587, 8592), True, 'import numpy as np\n'), ((8607, 8627), 'numpy.where', 'np.where', (['(lik == mxL)'], {}), '(lik == mxL)\n', (8615, 8627), True, 'import numpy as np\n'), ((3463, 3498), 'numpy.random.poisson', 'np.random.poisson', (['ndla'], {'size': 'nboot'}), '(ndla, size=nboot)\n', (3480, 3498), True, 'import numpy as np\n'), ((4558, 4565), 'astropy.table.Table', 'Table', ([], {}), '()\n', (4563, 4565), False, 'from astropy.table import Table\n'), ((1020, 1041), 'numpy.sum', 'np.sum', (['(zend - zstart)'], {}), '(zend - zstart)\n', (1026, 1041), True, 'import numpy as np\n'), ((2901, 2934), 'numpy.linspace', 'np.linspace', (['(0.05)', '(0.5)'], {'num': 'nstep'}), '(0.05, 0.5, num=nstep)\n', (2912, 2934), True, 'import numpy as np\n'), ((2966, 2999), 'numpy.linspace', 'np.linspace', (['(0.05)', '(0.5)'], {'num': 'nstep'}), '(0.05, 0.5, num=nstep)\n', (2977, 2999), True, 'import numpy as np\n'), ((3031, 3063), 'numpy.linspace', 'np.linspace', (['(1.0)', '(6.0)'], {'num': 'nstep'}), '(1.0, 6.0, num=nstep)\n', (3042, 3063), True, 'import numpy as np\n'), ((4173, 4200), 'multiprocessing.Pool', 'multiprocessing.Pool', (['nproc'], {}), '(nproc)\n', (4193, 4200), False, 'import multiprocessing\n'), ((5379, 5402), 'numpy.mean', 'np.mean', (['[0.026, 0.045]'], {}), '([0.026, 0.045])\n', (5386, 5402), True, 'import numpy as np\n'), ((5751, 5773), 'numpy.arctan', 'np.arctan', (['(0.0 - Cgrid)'], {}), '(0.0 - Cgrid)\n', (5760, 5773), True, 'import numpy as np\n'), ((5901, 5915), 'numpy.max', 'np.max', (['lngrid'], {}), '(lngrid)\n', (5907, 5915), True, 'import numpy as np\n'), ((7623, 7663), 'numpy.linspace', 'np.linspace', (['a3_mnx[0]', 'a3_mnx[1]', 'nstep'], {}), '(a3_mnx[0], a3_mnx[1], nstep)\n', (7634, 7663), True, 'import numpy as np\n'), ((7695, 7735), 'numpy.linspace', 'np.linspace', (['a4_mnx[0]', 'a4_mnx[1]', 'nstep'], {}), '(a4_mnx[0], a4_mnx[1], nstep)\n', (7706, 7735), True, 'import numpy as np\n'), ((7767, 7807), 'numpy.linspace', 'np.linspace', (['Nd_mnx[0]', 'Nd_mnx[1]', 'nstep'], {}), '(Nd_mnx[0], Nd_mnx[1], nstep)\n', (7778, 7807), True, 'import numpy as np\n'), ((8288, 8300), 'numpy.sum', 'np.sum', (['high'], {}), '(high)\n', (8294, 8300), True, 'import numpy as np\n'), ((8403, 8416), 'numpy.sum', 'np.sum', (['(~high)'], {}), '(~high)\n', (8409, 8416), True, 'import numpy as np\n'), ((8556, 8569), 'numpy.log', 'np.log', (['denom'], {}), '(denom)\n', (8562, 8569), True, 'import numpy as np\n'), ((1292, 1306), 'numpy.min', 'np.min', (['CCgrid'], {}), '(CCgrid)\n', (1298, 1306), True, 'import numpy as np\n'), ((1331, 1346), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1344, 1346), False, 'import pdb\n'), ((8346, 8373), 'numpy.log', 'np.log', (['(iNHI10 / Ns10[high])'], {}), '(iNHI10 / Ns10[high])\n', (8352, 8373), True, 'import numpy as np\n'), ((8464, 8492), 'numpy.log', 'np.log', (['(iNHI10 / Ns10[~high])'], {}), '(iNHI10 / Ns10[~high])\n', (8470, 8492), True, 'import numpy as np\n'), ((1155, 1176), 'numpy.arctan', 'np.arctan', (['(Cgrid - iz)'], {}), '(Cgrid - iz)\n', (1164, 1176), True, 'import numpy as np\n'), ((1183, 1228), 'numpy.log', 'np.log', (['(C2grid - 2 * Cgrid * iz + iz ** 2 + 1)'], {}), '(C2grid - 2 * Cgrid * iz + iz ** 2 + 1)\n', (1189, 1228), True, 'import numpy as np\n'), ((1407, 1428), 'numpy.arctan', 'np.arctan', (['(Cgrid - iz)'], {}), '(Cgrid - iz)\n', (1416, 1428), True, 'import numpy as np\n'), ((1435, 1480), 'numpy.log', 'np.log', (['(C2grid - 2 * Cgrid * iz + iz ** 2 + 1)'], {}), '(C2grid - 2 * Cgrid * iz + iz ** 2 + 1)\n', (1441, 1480), True, 'import numpy as np\n'), ((5589, 5609), 'numpy.arctan', 'np.arctan', (['(z - Cgrid)'], {}), '(z - Cgrid)\n', (5598, 5609), True, 'import numpy as np\n'), ((9426, 9458), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'NHI.size'}), '(size=NHI.size)\n', (9443, 9458), True, 'import numpy as np\n'), ((3622, 3649), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'irN'}), '(size=irN)\n', (3639, 3649), True, 'import numpy as np\n')] |
from utils import *
import numpy as np
import h5py
import os
import pandas as pd
from PIL import Image
from tqdm import tqdm
def resize_images(image_list, im_size):
return_list = []
for im in image_list:
img = Image.open(im)
img = img.resize((im_size, im_size), Image.ANTIALIAS)
np_img = np.array(img)
return_list.append(np_img)
return return_list
def create_image_label_list(img_path, group, im_size, skip, all_labels):
label = all_labels['label'].loc[int(group)]
image_list = os.listdir(img_path + os.sep + group)
if len(image_list) < 24:
return [], []
image_list = sorted(image_list[:24:skip])
images = resize_images([img_path + os.sep + group + os.sep + i for i in image_list], im_size)
return images, label
def make_hdf5(img_path, im_size, skip, all_labels, desired_labels, fname='data_hdf5.h5'):
indices = list(all_labels[all_labels['label'].isin(desired_labels)].index)
hf = h5py.File(fname, 'w')
for group in tqdm(indices):
group = str(group)
images, label = create_image_label_list(img_path, group, im_size, skip, all_labels)
if not images:
print('{} excluded, because of the short length'.format(group))
continue
label_id = desired_labels.index(label)
hfgroup = hf.create_group(group)
hfgroup.create_dataset('images', data=images)
hfgroup.create_dataset('label', data=label)
hfgroup.create_dataset('label_id', data=label_id)
hf.close()
if __name__ == "__main__":
# read config.ini and use the settings
param = get_configs()
data_path = param['data_path']
img_path = param['img_path']
train_labels = pd.read_csv(param['csv_train'], names=['label'], sep=';')
val_labels = pd.read_csv(param['csv_val'], names=['label'], sep=';')
all_labels = pd.read_csv(param['csv_labels'], sep=';')
labels = param['labels']
fn_postfix = str(len(labels))
print('labels are {}, length of {}'.format(labels, fn_postfix))
train_fn = data_path + os.sep + 'train_hdf5' + fn_postfix + '.h5'
val_fn = data_path + os.sep + 'val_hdf5' + fn_postfix + '.h5'
maker_params = {'img_path': img_path, 'im_size': param['im_size'], 'skip': param['skip'], 'desired_labels': labels}
make_hdf5(all_labels=train_labels, fname=train_fn, **maker_params)
make_hdf5(all_labels=val_labels, fname=val_fn, **maker_params) | [
"os.listdir",
"PIL.Image.open",
"pandas.read_csv",
"tqdm.tqdm",
"h5py.File",
"numpy.array"
] | [((535, 572), 'os.listdir', 'os.listdir', (['(img_path + os.sep + group)'], {}), '(img_path + os.sep + group)\n', (545, 572), False, 'import os\n'), ((973, 994), 'h5py.File', 'h5py.File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (982, 994), False, 'import h5py\n'), ((1012, 1025), 'tqdm.tqdm', 'tqdm', (['indices'], {}), '(indices)\n', (1016, 1025), False, 'from tqdm import tqdm\n'), ((1731, 1788), 'pandas.read_csv', 'pd.read_csv', (["param['csv_train']"], {'names': "['label']", 'sep': '""";"""'}), "(param['csv_train'], names=['label'], sep=';')\n", (1742, 1788), True, 'import pandas as pd\n'), ((1808, 1863), 'pandas.read_csv', 'pd.read_csv', (["param['csv_val']"], {'names': "['label']", 'sep': '""";"""'}), "(param['csv_val'], names=['label'], sep=';')\n", (1819, 1863), True, 'import pandas as pd\n'), ((1883, 1924), 'pandas.read_csv', 'pd.read_csv', (["param['csv_labels']"], {'sep': '""";"""'}), "(param['csv_labels'], sep=';')\n", (1894, 1924), True, 'import pandas as pd\n'), ((229, 243), 'PIL.Image.open', 'Image.open', (['im'], {}), '(im)\n', (239, 243), False, 'from PIL import Image\n'), ((323, 336), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (331, 336), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Cloud HR experiments
Created on Fri Dec 9 11:22:51 2016
@author: maxwell
"""
import numpy as np
import matplotlib.pyplot as plt
import time
import copy
import atmosphere as a
from parm import ChemParm, LWParm, SWParm
from solver import SolverFactory,HR
from misc.humidity import manaberh
import misc.solargeometry as solar
# %% set up
st = time.clock()
timestr = "Elapsed Time: {:4f}s"
plev =np.logspace(-2,np.log10(1013), 601)
lat = 10
decl = 21.11
rhlabel= ''
holdrh=True
rh = np.ones(len(plev))*0.5
holdtsfc=True
cpdair=1004.0
timestep=0.25
radmodel='fu'
gridstagger=False
maxsteps=300
tol = .011
cldprofiles=[ None,
'atmosphere/profiles/hr/yang/cf_allclouds/annual_mean_20Nto20S_cf.txt',
'atmosphere/profiles/hr/yang/cf_withoutcirrus/annual_mean_20Nto20S_cf_withoutcirrus.txt',
None,
'atmosphere/profiles/hr/yang/cf_allclouds/annual_mean_wp_cf.txt',
'atmosphere/profiles/hr/yang/cf_withoutcirrus/annual_mean_wp_cf_withoutcirrus.txt'
]
o3profiles=['atmosphere/profiles/ozone/yang/annual_ozone_20Nto20S.dat',
'atmosphere/profiles/ozone/yang/annual_ozone_20Nto20S.dat',
'atmosphere/profiles/ozone/yang/annual_ozone_20Nto20S.dat',
'atmosphere/profiles/ozone/yang/annual_ozone_fiji.dat',
'atmosphere/profiles/ozone/yang/annual_ozone_fiji.dat',
'atmosphere/profiles/ozone/yang/annual_ozone_fiji.dat'
]
anames=['Trop', 'Trop CF', 'Trop CF (no Ci)', 'WP', 'WP CF', 'WP CF (no Ci)']
cldconds = dict(zip(anames, cldprofiles))
o3conds = dict(zip(anames, o3profiles))
ts = 300.0
nsim = len(anames)
atms=dict.fromkeys(anames)
atms_noco2 = dict.fromkeys(anames)
atms_noo3 = dict.fromkeys(anames)
atms_wvonly = dict.fromkeys(anames)
hr=dict.fromkeys(anames)
auxhr=dict.fromkeys(anames)
hr_noco2=dict.fromkeys(anames)
hr_noo3 = dict.fromkeys(anames)
hr_wvonly= dict.fromkeys(anames)
flx = dict.fromkeys(anames)
flx_noco2 = dict.fromkeys(anames)
flx_noo3 = dict.fromkeys(anames)
flx_wvonly = dict.fromkeys(anames)
swparm = dict.fromkeys(anames)
cparm = ChemParm()
cparm_noco2=ChemParm(co2ppmv=1.0e-4)
lwparm = LWParm()
slv = SolverFactory.create(kind='rce', timestep=timestep, holdtsfc=holdtsfc,
radmodel=radmodel,cpdair=cpdair,tol=tol,
maxsteps=maxsteps)
radslv = SolverFactory.create(kind='rad',radmodel=radmodel,
cpdair=cpdair)
st2 = st
for name,cond in cldconds.items():
print('SOLVING {}'.format(name))
prof = 'jtrp'
atms[name] = a.Atmosphere.mcclatchy(prof, p=plev, rhlev=rh, holdrh=holdrh,
gridstagger=gridstagger,tsfc=ts)
atms[name].ozone_fromfile(o3conds[name])
atms_noco2[name] = copy.deepcopy(atms[name])
atms_noo3[name] = copy.deepcopy(atms[name])
atms_noo3[name].o3 = np.zeros(len(atms_noo3[name]))
atms_wvonly[name] = copy.deepcopy(atms[name])
atms_wvonly[name].o3 = np.zeros(len(atms_wvonly[name]))
mu=solar.mubar(lat,decl)
fday=solar.fday(lat,decl)
swparm[name] = SWParm(coszen=mu,fday=fday)
if cond is not None:
auxhr[name]=HR.fromfile(atms[name], cond)
else:
zeros = np.zeros(len(atms[name]))
auxhr[name]=HR(zeros, zeros)
slv.auxhr=auxhr[name]
atms[name],flx[name],hr[name] = slv.solve(
atms[name],cparm,lwparm,swparm[name])
atms_noco2[name], flx_noco2[name], hr_noco2[name] = radslv.solve(
atms_noco2[name],cparm_noco2, lwparm,swparm[name])
atms_noo3[name], flx_noo3[name], hr_noo3[name] = radslv.solve(
atms_noo3[name],cparm,lwparm,swparm[name])
atms_wvonly[name], flx_wvonly[name], hr_wvonly[name] = radslv.solve(
atms_wvonly[name], cparm_noco2, lwparm,swparm[name])
ed = time.clock()
print(timestr.format(ed-st2))
st2 = ed
print('Total time: {}'.format(ed-st))
# %% plot temps
plt.figure(1)
plt.clf()
yls = (8,1013)
#ytcks = [10,20,40,60,80,100,200,400,600,800,1000]
xls = (150,300)
xtcks = np.linspace(150,300,3)
plt.suptitle('Temperature (K)')
if(gridstagger):
fstag='0'
else:
fstag='1'
for i, name in enumerate(anames):
ax = plt.subplot(1,nsim,i+1)
plt.hold('on')
try:
plt.semilogy(atms[name].t, atms[name].p)
plt.plot(atms[name].tconv, atms[name].pconv, 'ks')
plt.plot(atms[name].tcold, atms[name].pcold, 'ko')
except ValueError:
pass
finally:
plt.ylim(yls)
# plt.yticks(ytcks)
plt.xlim(xls)
plt.xticks(xtcks)
plt.xlabel(name.upper())
ax.invert_yaxis()
if (i==0):
plt.ylabel('Pressure (hPa)')
# ax.yaxis.set_ticklabels(['{:.0f}'.format(tick) for tick in ytcks])
else:
ax.yaxis.set_ticklabels([])
else:
plt.show()
figname = 'img/rce_{}g{}_cld_{}_eq.png'.format(
radmodel,fstag,rhlabel)
print("Writing figure: {}".format(figname))
plt.savefig(
bbox_inches='tight', dpi=300, filename=figname)
# %% plot hr (net)
plt.figure(2)
plt.clf()
yls = (10,1013)
xls = (-3,2)
xtcks = np.linspace(xls[0],xls[1],2)
#ytcks = np.linspace(,1000,10)
plt.suptitle('HR (K/d)')
for i, name in enumerate(anames):
ax = plt.subplot(1,nsim,i+1)
plt.hold('on')
iconv = atms[name].iconv
icold = atms[name].icold
try:
plt.semilogy(hr[name].hrir, atms[name].p,color='crimson')
plt.semilogy(hr[name].hrsw, atms[name].p,'mediumblue')
plt.semilogy(hr[name].hr, atms[name].p,'orange')
plt.plot(np.zeros(len(atms[name])), atms[name].p, 'k--')
plt.plot(xtcks, np.ones(len(xtcks))*atms[name].pcold, 'k')
plt.plot(xtcks, np.ones(len(xtcks))*atms[name].pconv, 'k')
except ValueError:
pass
finally:
plt.ylim(yls)
# plt.yticks(ytcks)
plt.xlim(xls)
plt.xticks(xtcks)
plt.xlabel(name.upper())
ax.invert_yaxis()
if (i==0):
plt.ylabel('Pressure (hPa)')
# ax.yaxis.set_ticklabels(['{:.0f}'.format(tick) for tick in ytcks])
else:
ax.yaxis.set_ticklabels([])
else:
plt.show()
figname = 'img/rce_{}g{}_cld_{}_hr.png'.format(
radmodel,fstag,rhlabel)
print("Writing figure: {}".format(figname))
plt.savefig(
bbox_inches='tight', dpi=300, filename=figname)
# %% co2-only hr
plt.figure(3)
plt.clf()
yls = (10,1013)
xls = (-3,3)
xtcks = np.linspace(xls[0],xls[1],3)
#ytcks = np.linspace(100,1000,10)
plt.suptitle('CO2 IR HR (K/d)')
for i, name in enumerate(anames):
ax = plt.subplot(1,nsim,i+1)
plt.hold('on')
iconv = atms[name].iconv
icold = atms[name].icold
try:
plt.plot(hr[name].hrir - hr_noco2[name].hrir, atms[name].p)
plt.plot(np.zeros(len(atms[name])), atms[name].p, 'k--')
plt.plot(xtcks, np.ones(len(xtcks))*atms[name].pcold, 'k')
plt.plot(xtcks, np.ones(len(xtcks))*atms[name].pconv, 'k')
except ValueError:
pass
finally:
plt.ylim(yls)
plt.xlim(xls)
plt.xticks(xtcks)
plt.xlabel(name.upper())
ax.invert_yaxis()
ax.set_yscale('log')
# plt.yticks(ytcks)
if (i==0):
plt.ylabel('Pressure (hPa)')
# ax.yaxis.set_ticklabels(['{:.0f}'.format(tick) for tick in ytcks])
else:
ax.yaxis.set_ticklabels([])
else:
plt.show()
figname = 'img/rce_{}g{}_cld_{}_hrco2.png'.format(
radmodel,fstag,rhlabel)
print("Writing figure: {}".format(figname))
plt.savefig(
bbox_inches='tight', dpi=300, filename=figname)
# %% o3-only hr
plt.figure(4)
plt.clf()
yls = (10,1013)
xls = (-1,2)
xtcks = np.linspace(xls[0],xls[1],4)
#ytcks = np.linspace(100,1000,10)
plt.suptitle(' O3 HR (K/d)')
for i, name in enumerate(anames):
ax = plt.subplot(1,nsim,i+1)
plt.hold('on')
iconv = atms[name].iconv
icold = atms[name].icold
try:
# plt.plot(hr[name].hrir - hr_noo3[name].hrir, atms[name].p,color='crimson')
# plt.plot(hr[name].hrsw - hr_noo3[name].hrsw, atms[name].p,color='mediumblue')
plt.plot(hr[name].hr - hr_noo3[name].hr, atms[name].p,'orange')
plt.plot(np.zeros(len(atms[name])), atms[name].p, 'k--')
plt.plot(xtcks, np.ones(len(xtcks))*atms[name].pcold, 'k')
plt.plot(xtcks, np.ones(len(xtcks))*atms[name].pconv, 'k')
except ValueError:
pass
finally:
plt.ylim(yls)
plt.xlim(xls)
plt.xticks(xtcks)
plt.xlabel(name.upper())
ax.invert_yaxis()
ax.set_yscale('log')
# plt.yticks(ytcks)
if (i==0):
plt.ylabel('Pressure (hPa)')
# ax.yaxis.set_ticklabels(['{:.0f}'.format(tick) for tick in ytcks])
else:
ax.yaxis.set_ticklabels([])
else:
plt.show()
figname = 'img/rce_{}g{}_cld_{}_hro3.png'.format(
radmodel,fstag,rhlabel)
print("Writing figure: {}".format(figname))
plt.savefig(
bbox_inches='tight', dpi=300, filename=figname)
# %% wv-only hr
plt.figure(5)
plt.clf()
yls = (10, 1013)
xls = (-2, 2)
xtcks = np.linspace(-2,2,3)
plt.suptitle('H2O HR')
for i, name in enumerate(anames):
ax = plt.subplot(1,nsim,i+1)
plt.hold('on')
iconv = atms[name].iconv
icold = atms[name].icold
try:
# plt.plot(hr[name].hrir - hr_noo3[name].hrir, atms[name].p,color='crimson')
# plt.plot(hr[name].hrsw - hr_noo3[name].hrsw, atms[name].p,color='mediumblue')
plt.plot(hr_wvonly[name].hr, atms[name].p,'g')
plt.plot(np.zeros(len(atms[name])), atms[name].p, 'k--')
plt.plot(xtcks, np.ones(len(xtcks))*atms[name].pcold, 'k')
plt.plot(xtcks, np.ones(len(xtcks))*atms[name].pconv, 'k')
except ValueError:
pass
finally:
plt.ylim(yls)
plt.xlim(xls)
plt.xticks(xtcks)
plt.xlabel(name.upper())
ax.invert_yaxis()
ax.set_yscale('log')
# plt.yticks(ytcks)
if (i==0):
plt.ylabel('Pressure (hPa)')
# ax.yaxis.set_ticklabels(['{:.0f}'.format(tick) for tick in ytcks])
else:
ax.yaxis.set_ticklabels([])
else:
plt.show()
figname = 'img/rce_{}g{}_cld_{}_hrwv.png'.format(
radmodel,fstag,rhlabel)
print("Writing figure: {}".format(figname))
plt.savefig(
bbox_inches='tight', dpi=300, filename=figname)
# %% compare o3 profiles
plt.figure(6)
plt.clf()
xls1 = (0,18)
xls2 = (-1.6, 0.2)
yls = (1, 1013)
plt.subplot(1,2,1)
plt.hold('on')
plt.plot(atms['Trop'].o3*1e6,atms['Trop'].p, label='All Tropics')
plt.plot(atms['WP'].o3*1e6, atms['WP'].p, label='WP')
plt.ylim(yls)
plt.xlim(xls1)
ax = plt.gca()
ax.set_yscale('log')
ax.invert_yaxis()
plt.title('O3 Profile')
plt.ylabel('Pressure (hPa)')
plt.xlabel('Conc. ($10^{-6}$ g/g)')
plt.legend(loc='best')
plt.subplot(1,2,2)
plt.hold('on')
plt.plot(1e6*(atms['WP'].o3-atms['Trop'].o3), atms['WP'].p)
plt.plot(np.zeros(len(atms['WP'])), atms['WP'].p, 'k--')
plt.ylim(yls)
plt.xlim(xls2)
ax = plt.gca()
ax.set_yscale('log')
ax.invert_yaxis()
plt.title('$\Delta$ O3')
plt.xlabel('Conc. ($10^{-6}$ g/g)')
figname = 'img/rce_{}g{}_cld_{}_o3prof.png'.format(
radmodel,fstag,rhlabel)
print("Writing figure: {}".format(figname))
plt.savefig(
bbox_inches='tight', dpi=300, filename=figname)
# %% show delta T for clouds
plt.figure(7)
plt.clf()
xls1 = (-10,10)
xls2 = (-10, 10)
yls = (40, 400)
plt.subplot(1,2,1)
plt.hold('on')
names = anames[1:3]
ref = anames[0]
for name in names:
plt.plot(atms[name].t-atms[ref].t,atms[name].p,label=name)
plt.plot(atms[name].tconv-atms[ref].t[atms[name].iconv], atms[name].pconv, 'ks')
plt.plot(atms[name].tcold-atms[ref].t[atms[name].icold], atms[name].pcold, 'ko')
plt.plot(np.zeros(len(atms[name])),atms[name].p, 'k--')
plt.ylim(yls)
plt.xlim(xls1)
ax = plt.gca()
ax.set_yscale('log')
ax.invert_yaxis()
plt.title('')
plt.ylabel('Pressure (hPa)')
plt.xlabel('Temperature (K)')
plt.legend(loc='best')
plt.subplot(1,2,2)
plt.hold('on')
names = anames[4:6]
ref = anames[3]
for name in names:
plt.plot(atms[name].t-atms[ref].t,atms[name].p,label=name)
plt.plot(atms[name].tconv-atms[ref].t[atms[name].iconv], atms[name].pconv, 'ks')
plt.plot(atms[name].tcold-atms[ref].t[atms[name].icold], atms[name].pcold, 'ko')
plt.plot(np.zeros(len(atms[name])),atms[name].p, 'k--')
plt.ylim(yls)
plt.xlim(xls1)
ax = plt.gca()
ax.set_yscale('log')
ax.invert_yaxis()
plt.title('')
plt.ylabel('Pressure (hPa)')
plt.xlabel('Temperature (K)')
plt.legend(loc='best')
figname = 'img/rce_{}g{}_cld_{}_tdiff.png'.format(
radmodel,fstag,rhlabel)
print("Writing figure: {}".format(figname))
plt.savefig(
bbox_inches='tight', dpi=300, filename=figname)
# %% plot of cloud heating rates
plt.figure(8)
plt.clf()
xls = (-0.5,1.5)
yls = (80, 1000)
plt.subplot(131)
plt.hold('on')
names = anames[1:3]
for name in names:
plt.plot(auxhr[name].hr,atms[name].p,label=name)
plt.plot(auxhr[name].hr[atms[name].iconv], atms[name].pconv, 'ks')
plt.plot(auxhr[name].hr[atms[name].icold], atms[name].pcold, 'ko')
plt.plot(np.zeros(len(atms[name])),atms[name].p, 'k--')
names = anames[4:6]
for name in names:
plt.plot(auxhr[name].hr,atms[name].p,'--',label=name)
plt.plot(auxhr[name].hr[atms[name].iconv], atms[name].pconv, 'ks')
plt.plot(auxhr[name].hr[atms[name].icold], atms[name].pcold, 'ko')
plt.plot(np.zeros(len(atms[name])),atms[name].p, 'k--')
plt.ylim(yls)
plt.xlim(xls)
ax = plt.gca()
ax.set_yscale('log')
ax.invert_yaxis()
plt.title('')
plt.ylabel('Pressure (hPa)')
plt.xlabel('Cloud HR (K/d)')
#plt.legend(loc='best')
plt.subplot(132)
plt.hold('on')
names = anames[1:3]
for name in names:
plt.plot(auxhr[name].hrir,atms[name].p,label=name)
plt.plot(auxhr[name].hrir[atms[name].iconv], atms[name].pconv, 'ks')
plt.plot(auxhr[name].hrir[atms[name].icold], atms[name].pcold, 'ko')
plt.plot(np.zeros(len(atms[name])),atms[name].p, 'k--')
names = anames[4:6]
for name in names:
plt.plot(auxhr[name].hrir,atms[name].p,'--',label=name)
plt.plot(auxhr[name].hrir[atms[name].iconv], atms[name].pconv, 'ks')
plt.plot(auxhr[name].hrir[atms[name].icold], atms[name].pcold, 'ko')
plt.plot(np.zeros(len(atms[name])),atms[name].p, 'k--')
plt.ylim(yls)
plt.xlim(xls)
ax = plt.gca()
ax.set_yscale('log')
ax.invert_yaxis()
plt.title('')
plt.ylabel('Pressure (hPa)')
plt.xlabel('Cloud IR HR (K/d)')
#plt.legend(loc='best')
plt.subplot(133)
plt.hold('on')
names = anames[1:3]
for name in names:
plt.plot(auxhr[name].hrsw,atms[name].p,label=name)
plt.plot(auxhr[name].hrsw[atms[name].iconv], atms[name].pconv, 'ks')
plt.plot(auxhr[name].hrsw[atms[name].icold], atms[name].pcold, 'ko')
plt.plot(np.zeros(len(atms[name])),atms[name].p, 'k--')
names = anames[4:6]
for name in names:
plt.plot(auxhr[name].hrsw,atms[name].p,'--',label=name)
plt.plot(auxhr[name].hrsw[atms[name].iconv], atms[name].pconv, 'ks')
plt.plot(auxhr[name].hrsw[atms[name].icold], atms[name].pcold, 'ko')
plt.plot(np.zeros(len(atms[name])),atms[name].p, 'k--')
plt.ylim(yls)
plt.xlim(xls)
ax = plt.gca()
ax.set_yscale('log')
ax.invert_yaxis()
plt.title('')
plt.ylabel('Pressure (hPa)')
plt.xlabel('Cloud SW HR (K/d)')
#plt.legend(loc='best')
| [
"solver.SolverFactory.create",
"numpy.log10",
"time.clock",
"matplotlib.pyplot.ylabel",
"misc.solargeometry.mubar",
"atmosphere.Atmosphere.mcclatchy",
"copy.deepcopy",
"parm.SWParm",
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"solver... | [((400, 412), 'time.clock', 'time.clock', ([], {}), '()\n', (410, 412), False, 'import time\n'), ((2161, 2171), 'parm.ChemParm', 'ChemParm', ([], {}), '()\n', (2169, 2171), False, 'from parm import ChemParm, LWParm, SWParm\n'), ((2184, 2208), 'parm.ChemParm', 'ChemParm', ([], {'co2ppmv': '(0.0001)'}), '(co2ppmv=0.0001)\n', (2192, 2208), False, 'from parm import ChemParm, LWParm, SWParm\n'), ((2218, 2226), 'parm.LWParm', 'LWParm', ([], {}), '()\n', (2224, 2226), False, 'from parm import ChemParm, LWParm, SWParm\n'), ((2233, 2369), 'solver.SolverFactory.create', 'SolverFactory.create', ([], {'kind': '"""rce"""', 'timestep': 'timestep', 'holdtsfc': 'holdtsfc', 'radmodel': 'radmodel', 'cpdair': 'cpdair', 'tol': 'tol', 'maxsteps': 'maxsteps'}), "(kind='rce', timestep=timestep, holdtsfc=holdtsfc,\n radmodel=radmodel, cpdair=cpdair, tol=tol, maxsteps=maxsteps)\n", (2253, 2369), False, 'from solver import SolverFactory, HR\n'), ((2427, 2493), 'solver.SolverFactory.create', 'SolverFactory.create', ([], {'kind': '"""rad"""', 'radmodel': 'radmodel', 'cpdair': 'cpdair'}), "(kind='rad', radmodel=radmodel, cpdair=cpdair)\n", (2447, 2493), False, 'from solver import SolverFactory, HR\n'), ((4018, 4031), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (4028, 4031), True, 'import matplotlib.pyplot as plt\n'), ((4032, 4041), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4039, 4041), True, 'import matplotlib.pyplot as plt\n'), ((4132, 4156), 'numpy.linspace', 'np.linspace', (['(150)', '(300)', '(3)'], {}), '(150, 300, 3)\n', (4143, 4156), True, 'import numpy as np\n'), ((4155, 4186), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Temperature (K)"""'], {}), "('Temperature (K)')\n", (4167, 4186), True, 'import matplotlib.pyplot as plt\n'), ((5056, 5115), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'bbox_inches': '"""tight"""', 'dpi': '(300)', 'filename': 'figname'}), "(bbox_inches='tight', dpi=300, filename=figname)\n", (5067, 5115), True, 'import matplotlib.pyplot as plt\n'), ((5141, 5154), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (5151, 5154), True, 'import matplotlib.pyplot as plt\n'), ((5155, 5164), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5162, 5164), True, 'import matplotlib.pyplot as plt\n'), ((5202, 5232), 'numpy.linspace', 'np.linspace', (['xls[0]', 'xls[1]', '(2)'], {}), '(xls[0], xls[1], 2)\n', (5213, 5232), True, 'import numpy as np\n'), ((5262, 5286), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""HR (K/d)"""'], {}), "('HR (K/d)')\n", (5274, 5286), True, 'import matplotlib.pyplot as plt\n'), ((6382, 6441), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'bbox_inches': '"""tight"""', 'dpi': '(300)', 'filename': 'figname'}), "(bbox_inches='tight', dpi=300, filename=figname)\n", (6393, 6441), True, 'import matplotlib.pyplot as plt\n'), ((6465, 6478), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (6475, 6478), True, 'import matplotlib.pyplot as plt\n'), ((6479, 6488), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6486, 6488), True, 'import matplotlib.pyplot as plt\n'), ((6526, 6556), 'numpy.linspace', 'np.linspace', (['xls[0]', 'xls[1]', '(3)'], {}), '(xls[0], xls[1], 3)\n', (6537, 6556), True, 'import numpy as np\n'), ((6589, 6620), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""CO2 IR HR (K/d)"""'], {}), "('CO2 IR HR (K/d)')\n", (6601, 6620), True, 'import matplotlib.pyplot as plt\n'), ((7631, 7690), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'bbox_inches': '"""tight"""', 'dpi': '(300)', 'filename': 'figname'}), "(bbox_inches='tight', dpi=300, filename=figname)\n", (7642, 7690), True, 'import matplotlib.pyplot as plt\n'), ((7716, 7729), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {}), '(4)\n', (7726, 7729), True, 'import matplotlib.pyplot as plt\n'), ((7730, 7739), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7737, 7739), True, 'import matplotlib.pyplot as plt\n'), ((7777, 7807), 'numpy.linspace', 'np.linspace', (['xls[0]', 'xls[1]', '(4)'], {}), '(xls[0], xls[1], 4)\n', (7788, 7807), True, 'import numpy as np\n'), ((7840, 7868), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['""" O3 HR (K/d)"""'], {}), "(' O3 HR (K/d)')\n", (7852, 7868), True, 'import matplotlib.pyplot as plt\n'), ((9051, 9110), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'bbox_inches': '"""tight"""', 'dpi': '(300)', 'filename': 'figname'}), "(bbox_inches='tight', dpi=300, filename=figname)\n", (9062, 9110), True, 'import matplotlib.pyplot as plt\n'), ((9133, 9146), 'matplotlib.pyplot.figure', 'plt.figure', (['(5)'], {}), '(5)\n', (9143, 9146), True, 'import matplotlib.pyplot as plt\n'), ((9147, 9156), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9154, 9156), True, 'import matplotlib.pyplot as plt\n'), ((9196, 9217), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(3)'], {}), '(-2, 2, 3)\n', (9207, 9217), True, 'import numpy as np\n'), ((9216, 9238), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""H2O HR"""'], {}), "('H2O HR')\n", (9228, 9238), True, 'import matplotlib.pyplot as plt\n'), ((10404, 10463), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'bbox_inches': '"""tight"""', 'dpi': '(300)', 'filename': 'figname'}), "(bbox_inches='tight', dpi=300, filename=figname)\n", (10415, 10463), True, 'import matplotlib.pyplot as plt\n'), ((10495, 10508), 'matplotlib.pyplot.figure', 'plt.figure', (['(6)'], {}), '(6)\n', (10505, 10508), True, 'import matplotlib.pyplot as plt\n'), ((10509, 10518), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (10516, 10518), True, 'import matplotlib.pyplot as plt\n'), ((10569, 10589), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (10580, 10589), True, 'import matplotlib.pyplot as plt\n'), ((10588, 10602), 'matplotlib.pyplot.hold', 'plt.hold', (['"""on"""'], {}), "('on')\n", (10596, 10602), True, 'import matplotlib.pyplot as plt\n'), ((10603, 10677), 'matplotlib.pyplot.plot', 'plt.plot', (["(atms['Trop'].o3 * 1000000.0)", "atms['Trop'].p"], {'label': '"""All Tropics"""'}), "(atms['Trop'].o3 * 1000000.0, atms['Trop'].p, label='All Tropics')\n", (10611, 10677), True, 'import matplotlib.pyplot as plt\n'), ((10669, 10730), 'matplotlib.pyplot.plot', 'plt.plot', (["(atms['WP'].o3 * 1000000.0)", "atms['WP'].p"], {'label': '"""WP"""'}), "(atms['WP'].o3 * 1000000.0, atms['WP'].p, label='WP')\n", (10677, 10730), True, 'import matplotlib.pyplot as plt\n'), ((10723, 10736), 'matplotlib.pyplot.ylim', 'plt.ylim', (['yls'], {}), '(yls)\n', (10731, 10736), True, 'import matplotlib.pyplot as plt\n'), ((10737, 10751), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xls1'], {}), '(xls1)\n', (10745, 10751), True, 'import matplotlib.pyplot as plt\n'), ((10757, 10766), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10764, 10766), True, 'import matplotlib.pyplot as plt\n'), ((10806, 10829), 'matplotlib.pyplot.title', 'plt.title', (['"""O3 Profile"""'], {}), "('O3 Profile')\n", (10815, 10829), True, 'import matplotlib.pyplot as plt\n'), ((10830, 10858), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pressure (hPa)"""'], {}), "('Pressure (hPa)')\n", (10840, 10858), True, 'import matplotlib.pyplot as plt\n'), ((10859, 10894), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Conc. ($10^{-6}$ g/g)"""'], {}), "('Conc. ($10^{-6}$ g/g)')\n", (10869, 10894), True, 'import matplotlib.pyplot as plt\n'), ((10895, 10917), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (10905, 10917), True, 'import matplotlib.pyplot as plt\n'), ((10919, 10939), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (10930, 10939), True, 'import matplotlib.pyplot as plt\n'), ((10938, 10952), 'matplotlib.pyplot.hold', 'plt.hold', (['"""on"""'], {}), "('on')\n", (10946, 10952), True, 'import matplotlib.pyplot as plt\n'), ((10953, 11022), 'matplotlib.pyplot.plot', 'plt.plot', (["(1000000.0 * (atms['WP'].o3 - atms['Trop'].o3))", "atms['WP'].p"], {}), "(1000000.0 * (atms['WP'].o3 - atms['Trop'].o3), atms['WP'].p)\n", (10961, 11022), True, 'import matplotlib.pyplot as plt\n'), ((11070, 11083), 'matplotlib.pyplot.ylim', 'plt.ylim', (['yls'], {}), '(yls)\n', (11078, 11083), True, 'import matplotlib.pyplot as plt\n'), ((11084, 11098), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xls2'], {}), '(xls2)\n', (11092, 11098), True, 'import matplotlib.pyplot as plt\n'), ((11104, 11113), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11111, 11113), True, 'import matplotlib.pyplot as plt\n'), ((11153, 11178), 'matplotlib.pyplot.title', 'plt.title', (['"""$\\\\Delta$ O3"""'], {}), "('$\\\\Delta$ O3')\n", (11162, 11178), True, 'import matplotlib.pyplot as plt\n'), ((11178, 11213), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Conc. ($10^{-6}$ g/g)"""'], {}), "('Conc. ($10^{-6}$ g/g)')\n", (11188, 11213), True, 'import matplotlib.pyplot as plt\n'), ((11354, 11413), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'bbox_inches': '"""tight"""', 'dpi': '(300)', 'filename': 'figname'}), "(bbox_inches='tight', dpi=300, filename=figname)\n", (11365, 11413), True, 'import matplotlib.pyplot as plt\n'), ((11452, 11465), 'matplotlib.pyplot.figure', 'plt.figure', (['(7)'], {}), '(7)\n', (11462, 11465), True, 'import matplotlib.pyplot as plt\n'), ((11466, 11475), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (11473, 11475), True, 'import matplotlib.pyplot as plt\n'), ((11526, 11546), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (11537, 11546), True, 'import matplotlib.pyplot as plt\n'), ((11545, 11559), 'matplotlib.pyplot.hold', 'plt.hold', (['"""on"""'], {}), "('on')\n", (11553, 11559), True, 'import matplotlib.pyplot as plt\n'), ((11908, 11921), 'matplotlib.pyplot.ylim', 'plt.ylim', (['yls'], {}), '(yls)\n', (11916, 11921), True, 'import matplotlib.pyplot as plt\n'), ((11922, 11936), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xls1'], {}), '(xls1)\n', (11930, 11936), True, 'import matplotlib.pyplot as plt\n'), ((11942, 11951), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11949, 11951), True, 'import matplotlib.pyplot as plt\n'), ((11991, 12004), 'matplotlib.pyplot.title', 'plt.title', (['""""""'], {}), "('')\n", (12000, 12004), True, 'import matplotlib.pyplot as plt\n'), ((12005, 12033), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pressure (hPa)"""'], {}), "('Pressure (hPa)')\n", (12015, 12033), True, 'import matplotlib.pyplot as plt\n'), ((12034, 12063), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temperature (K)"""'], {}), "('Temperature (K)')\n", (12044, 12063), True, 'import matplotlib.pyplot as plt\n'), ((12064, 12086), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (12074, 12086), True, 'import matplotlib.pyplot as plt\n'), ((12088, 12108), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (12099, 12108), True, 'import matplotlib.pyplot as plt\n'), ((12107, 12121), 'matplotlib.pyplot.hold', 'plt.hold', (['"""on"""'], {}), "('on')\n", (12115, 12121), True, 'import matplotlib.pyplot as plt\n'), ((12470, 12483), 'matplotlib.pyplot.ylim', 'plt.ylim', (['yls'], {}), '(yls)\n', (12478, 12483), True, 'import matplotlib.pyplot as plt\n'), ((12484, 12498), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xls1'], {}), '(xls1)\n', (12492, 12498), True, 'import matplotlib.pyplot as plt\n'), ((12504, 12513), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12511, 12513), True, 'import matplotlib.pyplot as plt\n'), ((12553, 12566), 'matplotlib.pyplot.title', 'plt.title', (['""""""'], {}), "('')\n", (12562, 12566), True, 'import matplotlib.pyplot as plt\n'), ((12567, 12595), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pressure (hPa)"""'], {}), "('Pressure (hPa)')\n", (12577, 12595), True, 'import matplotlib.pyplot as plt\n'), ((12596, 12625), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temperature (K)"""'], {}), "('Temperature (K)')\n", (12606, 12625), True, 'import matplotlib.pyplot as plt\n'), ((12626, 12648), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (12636, 12648), True, 'import matplotlib.pyplot as plt\n'), ((12789, 12848), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'bbox_inches': '"""tight"""', 'dpi': '(300)', 'filename': 'figname'}), "(bbox_inches='tight', dpi=300, filename=figname)\n", (12800, 12848), True, 'import matplotlib.pyplot as plt\n'), ((12890, 12903), 'matplotlib.pyplot.figure', 'plt.figure', (['(8)'], {}), '(8)\n', (12900, 12903), True, 'import matplotlib.pyplot as plt\n'), ((12904, 12913), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (12911, 12913), True, 'import matplotlib.pyplot as plt\n'), ((12949, 12965), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (12960, 12965), True, 'import matplotlib.pyplot as plt\n'), ((12966, 12980), 'matplotlib.pyplot.hold', 'plt.hold', (['"""on"""'], {}), "('on')\n", (12974, 12980), True, 'import matplotlib.pyplot as plt\n'), ((13574, 13587), 'matplotlib.pyplot.ylim', 'plt.ylim', (['yls'], {}), '(yls)\n', (13582, 13587), True, 'import matplotlib.pyplot as plt\n'), ((13588, 13601), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xls'], {}), '(xls)\n', (13596, 13601), True, 'import matplotlib.pyplot as plt\n'), ((13607, 13616), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13614, 13616), True, 'import matplotlib.pyplot as plt\n'), ((13656, 13669), 'matplotlib.pyplot.title', 'plt.title', (['""""""'], {}), "('')\n", (13665, 13669), True, 'import matplotlib.pyplot as plt\n'), ((13670, 13698), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pressure (hPa)"""'], {}), "('Pressure (hPa)')\n", (13680, 13698), True, 'import matplotlib.pyplot as plt\n'), ((13699, 13727), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Cloud HR (K/d)"""'], {}), "('Cloud HR (K/d)')\n", (13709, 13727), True, 'import matplotlib.pyplot as plt\n'), ((13753, 13769), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (13764, 13769), True, 'import matplotlib.pyplot as plt\n'), ((13770, 13784), 'matplotlib.pyplot.hold', 'plt.hold', (['"""on"""'], {}), "('on')\n", (13778, 13784), True, 'import matplotlib.pyplot as plt\n'), ((14390, 14403), 'matplotlib.pyplot.ylim', 'plt.ylim', (['yls'], {}), '(yls)\n', (14398, 14403), True, 'import matplotlib.pyplot as plt\n'), ((14404, 14417), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xls'], {}), '(xls)\n', (14412, 14417), True, 'import matplotlib.pyplot as plt\n'), ((14423, 14432), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (14430, 14432), True, 'import matplotlib.pyplot as plt\n'), ((14472, 14485), 'matplotlib.pyplot.title', 'plt.title', (['""""""'], {}), "('')\n", (14481, 14485), True, 'import matplotlib.pyplot as plt\n'), ((14486, 14514), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pressure (hPa)"""'], {}), "('Pressure (hPa)')\n", (14496, 14514), True, 'import matplotlib.pyplot as plt\n'), ((14515, 14546), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Cloud IR HR (K/d)"""'], {}), "('Cloud IR HR (K/d)')\n", (14525, 14546), True, 'import matplotlib.pyplot as plt\n'), ((14572, 14588), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (14583, 14588), True, 'import matplotlib.pyplot as plt\n'), ((14589, 14603), 'matplotlib.pyplot.hold', 'plt.hold', (['"""on"""'], {}), "('on')\n", (14597, 14603), True, 'import matplotlib.pyplot as plt\n'), ((15209, 15222), 'matplotlib.pyplot.ylim', 'plt.ylim', (['yls'], {}), '(yls)\n', (15217, 15222), True, 'import matplotlib.pyplot as plt\n'), ((15223, 15236), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xls'], {}), '(xls)\n', (15231, 15236), True, 'import matplotlib.pyplot as plt\n'), ((15242, 15251), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (15249, 15251), True, 'import matplotlib.pyplot as plt\n'), ((15291, 15304), 'matplotlib.pyplot.title', 'plt.title', (['""""""'], {}), "('')\n", (15300, 15304), True, 'import matplotlib.pyplot as plt\n'), ((15305, 15333), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pressure (hPa)"""'], {}), "('Pressure (hPa)')\n", (15315, 15333), True, 'import matplotlib.pyplot as plt\n'), ((15334, 15365), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Cloud SW HR (K/d)"""'], {}), "('Cloud SW HR (K/d)')\n", (15344, 15365), True, 'import matplotlib.pyplot as plt\n'), ((468, 482), 'numpy.log10', 'np.log10', (['(1013)'], {}), '(1013)\n', (476, 482), True, 'import numpy as np\n'), ((2642, 2742), 'atmosphere.Atmosphere.mcclatchy', 'a.Atmosphere.mcclatchy', (['prof'], {'p': 'plev', 'rhlev': 'rh', 'holdrh': 'holdrh', 'gridstagger': 'gridstagger', 'tsfc': 'ts'}), '(prof, p=plev, rhlev=rh, holdrh=holdrh, gridstagger=\n gridstagger, tsfc=ts)\n', (2664, 2742), True, 'import atmosphere as a\n'), ((2845, 2870), 'copy.deepcopy', 'copy.deepcopy', (['atms[name]'], {}), '(atms[name])\n', (2858, 2870), False, 'import copy\n'), ((2893, 2918), 'copy.deepcopy', 'copy.deepcopy', (['atms[name]'], {}), '(atms[name])\n', (2906, 2918), False, 'import copy\n'), ((2999, 3024), 'copy.deepcopy', 'copy.deepcopy', (['atms[name]'], {}), '(atms[name])\n', (3012, 3024), False, 'import copy\n'), ((3093, 3115), 'misc.solargeometry.mubar', 'solar.mubar', (['lat', 'decl'], {}), '(lat, decl)\n', (3104, 3115), True, 'import misc.solargeometry as solar\n'), ((3124, 3145), 'misc.solargeometry.fday', 'solar.fday', (['lat', 'decl'], {}), '(lat, decl)\n', (3134, 3145), True, 'import misc.solargeometry as solar\n'), ((3164, 3192), 'parm.SWParm', 'SWParm', ([], {'coszen': 'mu', 'fday': 'fday'}), '(coszen=mu, fday=fday)\n', (3170, 3192), False, 'from parm import ChemParm, LWParm, SWParm\n'), ((3899, 3911), 'time.clock', 'time.clock', ([], {}), '()\n', (3909, 3911), False, 'import time\n'), ((4283, 4310), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'nsim', '(i + 1)'], {}), '(1, nsim, i + 1)\n', (4294, 4310), True, 'import matplotlib.pyplot as plt\n'), ((4311, 4325), 'matplotlib.pyplot.hold', 'plt.hold', (['"""on"""'], {}), "('on')\n", (4319, 4325), True, 'import matplotlib.pyplot as plt\n'), ((4911, 4921), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4919, 4921), True, 'import matplotlib.pyplot as plt\n'), ((5331, 5358), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'nsim', '(i + 1)'], {}), '(1, nsim, i + 1)\n', (5342, 5358), True, 'import matplotlib.pyplot as plt\n'), ((5359, 5373), 'matplotlib.pyplot.hold', 'plt.hold', (['"""on"""'], {}), "('on')\n", (5367, 5373), True, 'import matplotlib.pyplot as plt\n'), ((6237, 6247), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6245, 6247), True, 'import matplotlib.pyplot as plt\n'), ((6665, 6692), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'nsim', '(i + 1)'], {}), '(1, nsim, i + 1)\n', (6676, 6692), True, 'import matplotlib.pyplot as plt\n'), ((6693, 6707), 'matplotlib.pyplot.hold', 'plt.hold', (['"""on"""'], {}), "('on')\n", (6701, 6707), True, 'import matplotlib.pyplot as plt\n'), ((7483, 7493), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7491, 7493), True, 'import matplotlib.pyplot as plt\n'), ((7913, 7940), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'nsim', '(i + 1)'], {}), '(1, nsim, i + 1)\n', (7924, 7940), True, 'import matplotlib.pyplot as plt\n'), ((7941, 7955), 'matplotlib.pyplot.hold', 'plt.hold', (['"""on"""'], {}), "('on')\n", (7949, 7955), True, 'import matplotlib.pyplot as plt\n'), ((8904, 8914), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8912, 8914), True, 'import matplotlib.pyplot as plt\n'), ((9283, 9310), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'nsim', '(i + 1)'], {}), '(1, nsim, i + 1)\n', (9294, 9310), True, 'import matplotlib.pyplot as plt\n'), ((9311, 9325), 'matplotlib.pyplot.hold', 'plt.hold', (['"""on"""'], {}), "('on')\n", (9319, 9325), True, 'import matplotlib.pyplot as plt\n'), ((10257, 10267), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10265, 10267), True, 'import matplotlib.pyplot as plt\n'), ((11619, 11681), 'matplotlib.pyplot.plot', 'plt.plot', (['(atms[name].t - atms[ref].t)', 'atms[name].p'], {'label': 'name'}), '(atms[name].t - atms[ref].t, atms[name].p, label=name)\n', (11627, 11681), True, 'import matplotlib.pyplot as plt\n'), ((11682, 11768), 'matplotlib.pyplot.plot', 'plt.plot', (['(atms[name].tconv - atms[ref].t[atms[name].iconv])', 'atms[name].pconv', '"""ks"""'], {}), "(atms[name].tconv - atms[ref].t[atms[name].iconv], atms[name].pconv,\n 'ks')\n", (11690, 11768), True, 'import matplotlib.pyplot as plt\n'), ((11767, 11853), 'matplotlib.pyplot.plot', 'plt.plot', (['(atms[name].tcold - atms[ref].t[atms[name].icold])', 'atms[name].pcold', '"""ko"""'], {}), "(atms[name].tcold - atms[ref].t[atms[name].icold], atms[name].pcold,\n 'ko')\n", (11775, 11853), True, 'import matplotlib.pyplot as plt\n'), ((12181, 12243), 'matplotlib.pyplot.plot', 'plt.plot', (['(atms[name].t - atms[ref].t)', 'atms[name].p'], {'label': 'name'}), '(atms[name].t - atms[ref].t, atms[name].p, label=name)\n', (12189, 12243), True, 'import matplotlib.pyplot as plt\n'), ((12244, 12330), 'matplotlib.pyplot.plot', 'plt.plot', (['(atms[name].tconv - atms[ref].t[atms[name].iconv])', 'atms[name].pconv', '"""ks"""'], {}), "(atms[name].tconv - atms[ref].t[atms[name].iconv], atms[name].pconv,\n 'ks')\n", (12252, 12330), True, 'import matplotlib.pyplot as plt\n'), ((12329, 12415), 'matplotlib.pyplot.plot', 'plt.plot', (['(atms[name].tcold - atms[ref].t[atms[name].icold])', 'atms[name].pcold', '"""ko"""'], {}), "(atms[name].tcold - atms[ref].t[atms[name].icold], atms[name].pcold,\n 'ko')\n", (12337, 12415), True, 'import matplotlib.pyplot as plt\n'), ((13024, 13074), 'matplotlib.pyplot.plot', 'plt.plot', (['auxhr[name].hr', 'atms[name].p'], {'label': 'name'}), '(auxhr[name].hr, atms[name].p, label=name)\n', (13032, 13074), True, 'import matplotlib.pyplot as plt\n'), ((13077, 13143), 'matplotlib.pyplot.plot', 'plt.plot', (['auxhr[name].hr[atms[name].iconv]', 'atms[name].pconv', '"""ks"""'], {}), "(auxhr[name].hr[atms[name].iconv], atms[name].pconv, 'ks')\n", (13085, 13143), True, 'import matplotlib.pyplot as plt\n'), ((13148, 13214), 'matplotlib.pyplot.plot', 'plt.plot', (['auxhr[name].hr[atms[name].icold]', 'atms[name].pcold', '"""ko"""'], {}), "(auxhr[name].hr[atms[name].icold], atms[name].pcold, 'ko')\n", (13156, 13214), True, 'import matplotlib.pyplot as plt\n'), ((13318, 13374), 'matplotlib.pyplot.plot', 'plt.plot', (['auxhr[name].hr', 'atms[name].p', '"""--"""'], {'label': 'name'}), "(auxhr[name].hr, atms[name].p, '--', label=name)\n", (13326, 13374), True, 'import matplotlib.pyplot as plt\n'), ((13376, 13442), 'matplotlib.pyplot.plot', 'plt.plot', (['auxhr[name].hr[atms[name].iconv]', 'atms[name].pconv', '"""ks"""'], {}), "(auxhr[name].hr[atms[name].iconv], atms[name].pconv, 'ks')\n", (13384, 13442), True, 'import matplotlib.pyplot as plt\n'), ((13447, 13513), 'matplotlib.pyplot.plot', 'plt.plot', (['auxhr[name].hr[atms[name].icold]', 'atms[name].pcold', '"""ko"""'], {}), "(auxhr[name].hr[atms[name].icold], atms[name].pcold, 'ko')\n", (13455, 13513), True, 'import matplotlib.pyplot as plt\n'), ((13828, 13880), 'matplotlib.pyplot.plot', 'plt.plot', (['auxhr[name].hrir', 'atms[name].p'], {'label': 'name'}), '(auxhr[name].hrir, atms[name].p, label=name)\n', (13836, 13880), True, 'import matplotlib.pyplot as plt\n'), ((13883, 13951), 'matplotlib.pyplot.plot', 'plt.plot', (['auxhr[name].hrir[atms[name].iconv]', 'atms[name].pconv', '"""ks"""'], {}), "(auxhr[name].hrir[atms[name].iconv], atms[name].pconv, 'ks')\n", (13891, 13951), True, 'import matplotlib.pyplot as plt\n'), ((13956, 14024), 'matplotlib.pyplot.plot', 'plt.plot', (['auxhr[name].hrir[atms[name].icold]', 'atms[name].pcold', '"""ko"""'], {}), "(auxhr[name].hrir[atms[name].icold], atms[name].pcold, 'ko')\n", (13964, 14024), True, 'import matplotlib.pyplot as plt\n'), ((14128, 14186), 'matplotlib.pyplot.plot', 'plt.plot', (['auxhr[name].hrir', 'atms[name].p', '"""--"""'], {'label': 'name'}), "(auxhr[name].hrir, atms[name].p, '--', label=name)\n", (14136, 14186), True, 'import matplotlib.pyplot as plt\n'), ((14188, 14256), 'matplotlib.pyplot.plot', 'plt.plot', (['auxhr[name].hrir[atms[name].iconv]', 'atms[name].pconv', '"""ks"""'], {}), "(auxhr[name].hrir[atms[name].iconv], atms[name].pconv, 'ks')\n", (14196, 14256), True, 'import matplotlib.pyplot as plt\n'), ((14261, 14329), 'matplotlib.pyplot.plot', 'plt.plot', (['auxhr[name].hrir[atms[name].icold]', 'atms[name].pcold', '"""ko"""'], {}), "(auxhr[name].hrir[atms[name].icold], atms[name].pcold, 'ko')\n", (14269, 14329), True, 'import matplotlib.pyplot as plt\n'), ((14647, 14699), 'matplotlib.pyplot.plot', 'plt.plot', (['auxhr[name].hrsw', 'atms[name].p'], {'label': 'name'}), '(auxhr[name].hrsw, atms[name].p, label=name)\n', (14655, 14699), True, 'import matplotlib.pyplot as plt\n'), ((14702, 14770), 'matplotlib.pyplot.plot', 'plt.plot', (['auxhr[name].hrsw[atms[name].iconv]', 'atms[name].pconv', '"""ks"""'], {}), "(auxhr[name].hrsw[atms[name].iconv], atms[name].pconv, 'ks')\n", (14710, 14770), True, 'import matplotlib.pyplot as plt\n'), ((14775, 14843), 'matplotlib.pyplot.plot', 'plt.plot', (['auxhr[name].hrsw[atms[name].icold]', 'atms[name].pcold', '"""ko"""'], {}), "(auxhr[name].hrsw[atms[name].icold], atms[name].pcold, 'ko')\n", (14783, 14843), True, 'import matplotlib.pyplot as plt\n'), ((14947, 15005), 'matplotlib.pyplot.plot', 'plt.plot', (['auxhr[name].hrsw', 'atms[name].p', '"""--"""'], {'label': 'name'}), "(auxhr[name].hrsw, atms[name].p, '--', label=name)\n", (14955, 15005), True, 'import matplotlib.pyplot as plt\n'), ((15007, 15075), 'matplotlib.pyplot.plot', 'plt.plot', (['auxhr[name].hrsw[atms[name].iconv]', 'atms[name].pconv', '"""ks"""'], {}), "(auxhr[name].hrsw[atms[name].iconv], atms[name].pconv, 'ks')\n", (15015, 15075), True, 'import matplotlib.pyplot as plt\n'), ((15080, 15148), 'matplotlib.pyplot.plot', 'plt.plot', (['auxhr[name].hrsw[atms[name].icold]', 'atms[name].pcold', '"""ko"""'], {}), "(auxhr[name].hrsw[atms[name].icold], atms[name].pcold, 'ko')\n", (15088, 15148), True, 'import matplotlib.pyplot as plt\n'), ((3238, 3267), 'solver.HR.fromfile', 'HR.fromfile', (['atms[name]', 'cond'], {}), '(atms[name], cond)\n', (3249, 3267), False, 'from solver import SolverFactory, HR\n'), ((3340, 3356), 'solver.HR', 'HR', (['zeros', 'zeros'], {}), '(zeros, zeros)\n', (3342, 3356), False, 'from solver import SolverFactory, HR\n'), ((4343, 4383), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['atms[name].t', 'atms[name].p'], {}), '(atms[name].t, atms[name].p)\n', (4355, 4383), True, 'import matplotlib.pyplot as plt\n'), ((4392, 4442), 'matplotlib.pyplot.plot', 'plt.plot', (['atms[name].tconv', 'atms[name].pconv', '"""ks"""'], {}), "(atms[name].tconv, atms[name].pconv, 'ks')\n", (4400, 4442), True, 'import matplotlib.pyplot as plt\n'), ((4451, 4501), 'matplotlib.pyplot.plot', 'plt.plot', (['atms[name].tcold', 'atms[name].pcold', '"""ko"""'], {}), "(atms[name].tcold, atms[name].pcold, 'ko')\n", (4459, 4501), True, 'import matplotlib.pyplot as plt\n'), ((4559, 4572), 'matplotlib.pyplot.ylim', 'plt.ylim', (['yls'], {}), '(yls)\n', (4567, 4572), True, 'import matplotlib.pyplot as plt\n'), ((4608, 4621), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xls'], {}), '(xls)\n', (4616, 4621), True, 'import matplotlib.pyplot as plt\n'), ((4630, 4647), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xtcks'], {}), '(xtcks)\n', (4640, 4647), True, 'import matplotlib.pyplot as plt\n'), ((5449, 5507), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['hr[name].hrir', 'atms[name].p'], {'color': '"""crimson"""'}), "(hr[name].hrir, atms[name].p, color='crimson')\n", (5461, 5507), True, 'import matplotlib.pyplot as plt\n'), ((5515, 5570), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['hr[name].hrsw', 'atms[name].p', '"""mediumblue"""'], {}), "(hr[name].hrsw, atms[name].p, 'mediumblue')\n", (5527, 5570), True, 'import matplotlib.pyplot as plt\n'), ((5578, 5627), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['hr[name].hr', 'atms[name].p', '"""orange"""'], {}), "(hr[name].hr, atms[name].p, 'orange')\n", (5590, 5627), True, 'import matplotlib.pyplot as plt\n'), ((5884, 5897), 'matplotlib.pyplot.ylim', 'plt.ylim', (['yls'], {}), '(yls)\n', (5892, 5897), True, 'import matplotlib.pyplot as plt\n'), ((5933, 5946), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xls'], {}), '(xls)\n', (5941, 5946), True, 'import matplotlib.pyplot as plt\n'), ((5955, 5972), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xtcks'], {}), '(xtcks)\n', (5965, 5972), True, 'import matplotlib.pyplot as plt\n'), ((6784, 6843), 'matplotlib.pyplot.plot', 'plt.plot', (['(hr[name].hrir - hr_noco2[name].hrir)', 'atms[name].p'], {}), '(hr[name].hrir - hr_noco2[name].hrir, atms[name].p)\n', (6792, 6843), True, 'import matplotlib.pyplot as plt\n'), ((7101, 7114), 'matplotlib.pyplot.ylim', 'plt.ylim', (['yls'], {}), '(yls)\n', (7109, 7114), True, 'import matplotlib.pyplot as plt\n'), ((7123, 7136), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xls'], {}), '(xls)\n', (7131, 7136), True, 'import matplotlib.pyplot as plt\n'), ((7145, 7162), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xtcks'], {}), '(xtcks)\n', (7155, 7162), True, 'import matplotlib.pyplot as plt\n'), ((8202, 8266), 'matplotlib.pyplot.plot', 'plt.plot', (['(hr[name].hr - hr_noo3[name].hr)', 'atms[name].p', '"""orange"""'], {}), "(hr[name].hr - hr_noo3[name].hr, atms[name].p, 'orange')\n", (8210, 8266), True, 'import matplotlib.pyplot as plt\n'), ((8522, 8535), 'matplotlib.pyplot.ylim', 'plt.ylim', (['yls'], {}), '(yls)\n', (8530, 8535), True, 'import matplotlib.pyplot as plt\n'), ((8544, 8557), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xls'], {}), '(xls)\n', (8552, 8557), True, 'import matplotlib.pyplot as plt\n'), ((8566, 8583), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xtcks'], {}), '(xtcks)\n', (8576, 8583), True, 'import matplotlib.pyplot as plt\n'), ((9572, 9619), 'matplotlib.pyplot.plot', 'plt.plot', (['hr_wvonly[name].hr', 'atms[name].p', '"""g"""'], {}), "(hr_wvonly[name].hr, atms[name].p, 'g')\n", (9580, 9619), True, 'import matplotlib.pyplot as plt\n'), ((9875, 9888), 'matplotlib.pyplot.ylim', 'plt.ylim', (['yls'], {}), '(yls)\n', (9883, 9888), True, 'import matplotlib.pyplot as plt\n'), ((9897, 9910), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xls'], {}), '(xls)\n', (9905, 9910), True, 'import matplotlib.pyplot as plt\n'), ((9919, 9936), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xtcks'], {}), '(xtcks)\n', (9929, 9936), True, 'import matplotlib.pyplot as plt\n'), ((4738, 4766), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pressure (hPa)"""'], {}), "('Pressure (hPa)')\n", (4748, 4766), True, 'import matplotlib.pyplot as plt\n'), ((6064, 6092), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pressure (hPa)"""'], {}), "('Pressure (hPa)')\n", (6074, 6092), True, 'import matplotlib.pyplot as plt\n'), ((7310, 7338), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pressure (hPa)"""'], {}), "('Pressure (hPa)')\n", (7320, 7338), True, 'import matplotlib.pyplot as plt\n'), ((8731, 8759), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pressure (hPa)"""'], {}), "('Pressure (hPa)')\n", (8741, 8759), True, 'import matplotlib.pyplot as plt\n'), ((10084, 10112), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pressure (hPa)"""'], {}), "('Pressure (hPa)')\n", (10094, 10112), True, 'import matplotlib.pyplot as plt\n')] |
import backbone.support.configurations_variables as confv
import backbone.support.data_loading as dl
import backbone.support.data_analysis as da
import backbone.support.data_cleaning as dc
import backbone.support.configuration_classes as confc
import backbone.support.saving_loading as sl
import backbone.support.plots_and_charts as pc
import backbone.support.build_features as bf
import numpy as np
import backbone.support.models as mdl
from sklearn.utils.class_weight import compute_class_weight
from tensorflow.keras.callbacks import TensorBoard
import time
import backbone.support.directory_file_checking as dfc
import os
from tensorflow.python.keras.callbacks import CSVLogger
import tensorflow as tf
print("\t===========================================================================================\n"
"\t\tMain program started for MAIN-DATABASE:{database}, GENDER-ISOLATION:{gender}\n"
"\t\t\t\u2234 Dataset Name: {name}\n"
"\t==========================================================================================="
.format(database=confv.database_shemo, gender=confv.gender_male, name=confv.dataset_shemo_male))
'''
# DATA LOADING SECTION
print("\n--------------------Started loading original data from the main database: {name}--------------------".format(name=confv.database_shemo))
data_info_shemo_df = dl.load_original_data(database=confv.database_shemo)
print("No. of sample audio files in {database} database: {length}\n".format(database=confv.database_shemo, length=len(data_info_shemo_df)))
print("Dataframe head of {database} database:".format(database=confv.database_shemo))
print(data_info_shemo_df.head())
print("\nDataframe tail of {database} database:".format(database=confv.database_shemo))
print(data_info_shemo_df.tail())
print("--------------------Finished loading original data from the main database: {name}--------------------".format(name=confv.database_shemo))
# RANDOM BASE AUDIO WAVE ANALYSIS SECTION
print("\n\n--------------------Started random base audio wave analysis for the main database: {name}--------------------".format(name=confv.database_shemo))
da.base_audio_wave_analysis(data_info_shemo_df.audio_fname[500], database=confv.database_shemo, status=confv.original)
print("--------------------Finished random base audio wave analysis for the main database: {name}--------------------".format(name=confv.database_shemo))
# DATAFRAME ADJUSTMENTS SECTION
print("\n\n--------------------Started dataframe adjustment for the main database: {name}--------------------".format(name=confv.database_shemo))
data_info_shemo_df_m, data_info_shemo_df_f = dc.data_adjustments(data_info_shemo_df)
print("--------------------Finished dataframe adjustment for the main database: {name}--------------------".format(name=confv.database_shemo))
# DATAFRAME SAVING
print("\n\n--------------------Started dataframe saving for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
shemo_m_df_obj = confc.DataFrame(database=confv.database_shemo, gender=confv.gender_male, df=data_info_shemo_df_m)
sl.save_dataframe(shemo_m_df_obj)
print("--------------------Finished dataframe saving for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
'''
# LOAD REQUIRED PICKLE
print("\n\n--------------------Started dataframe loading for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
shemo_m_df_obj = confc.DataFrame(database=confv.database_shemo, gender=confv.gender_male)
shemo_m_df_obj = sl.load_dataframe(shemo_m_df_obj)
data_info_shemo_df_m = shemo_m_df_obj.df
print(shemo_m_df_obj.database)
print(shemo_m_df_obj.gender)
print(len(data_info_shemo_df_m))
print(data_info_shemo_df_m.head())
print(data_info_shemo_df_m.tail())
print(shemo_m_df_obj.dataset)
print(shemo_m_df_obj.save_path)
print("--------------------Finished dataframe loading for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
'''
# ORIGINAL DATA DISTRIBUTION ANALYSIS SECTION
print("\n\n--------------------Started original data distribution analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
pc.emotion_distribution_bar_plot(df=data_info_shemo_df_m, title="{database} - {gender} Isolation - No. of Files".format(database=confv.database_shemo, gender=confv.gender_male))
pc.emotion_distribution_pie_plot(df=data_info_shemo_df_m, database=confv.database_shemo, status=confv.original, gender=confv.gender_male, title="{database} - {gender} Isolation - Class/Data/Time Distribution".format(database=confv.database_shemo, gender=confv.gender_male))
print("--------------------Finished original data distribution analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
# ORIGINAL DATA VISUAL ANALYSIS (signal, fft, fbank, mfcc) SECTION
print("\n\n--------------------Started original data visual analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
da.visual_analysis(df=data_info_shemo_df_m, database=confv.database_shemo, status=confv.original, gender=confv.gender_male, envelope=False, resample=False)
da.visual_analysis(df=data_info_shemo_df_m, database=confv.database_shemo, status=confv.original, gender=confv.gender_male, envelope=True, resample=True)
print("--------------------Finished original data visual analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
# DATA CLEANING - DOWN SAMPLING AND NOISE FLOOR DETECTION
print("\n\n--------------------Started data cleaning for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
dc.data_cleaning(df=data_info_shemo_df_m, database=confv.database_shemo)
print("--------------------Finished data cleaning for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
'''
# DATA MINIMUM AUDIO LENGTH COMPLIANCE CHECK
print("\n\n--------------------Started data minimum audio compliance check for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
data_info_shemo_df_m = dc.check_and_adjust_df_for_minimum_audio_length_after_cleaning(df=data_info_shemo_df_m, database=confv.database_shemo, gender=confv.gender_male)
print("--------------------Finished data minimum audio compliance check for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
'''
# CLEANED DATA DISTRIBUTION ANALYSIS SECTION
print("\n\n--------------------Started cleaned data distribution analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
pc.emotion_distribution_bar_plot(df=data_info_shemo_df_m, title="{database} - {gender} Isolation - No. of Files".format(database=confv.database_shemo, gender=confv.gender_male))
pc.emotion_distribution_pie_plot(df=data_info_shemo_df_m, database=confv.database_shemo, status=confv.clean, gender=confv.gender_male, title="{database} - {gender} Isolation - Class/Data/Time Distribution".format(database=confv.database_shemo, gender=confv.gender_male))
print("--------------------Finished cleaned data distribution analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
# CLEANED DATA VISUAL ANALYSIS (signal, fft, fbank, mfcc) SECTION
print("\n\n--------------------Started cleaned data visual analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
da.visual_analysis(df=data_info_shemo_df_m, database=confv.database_shemo, status=confv.clean, gender=confv.gender_male, envelope=False, resample=False)
# This is same as,
# da.visual_analysis(df=data_info_shemo_df_m, database=confv.database_shemo, status=confv.original, gender=confv.gender_male, envelope=True, resample=True)
# Since these cleaned data are already equipped with envelope and resampling, setting them to False or True does not matter.
# (envelope and resample does not matter when its clean)
print("--------------------Finished cleaned data visual analysis for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
'''
# Building Features
print("\n\n--------------------Started building features for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
classes = list(np.unique(data_info_shemo_df_m.stress_emotion))
mconf_shemo_m = confc.ModelConfig(database=confv.database_shemo, gender=confv.gender_male, mode=confv.ml_mode_convolutional, classes=classes)
print(mconf_shemo_m.database)
print(mconf_shemo_m.gender)
print(mconf_shemo_m.mode)
print(mconf_shemo_m.nfilt)
print(mconf_shemo_m.nfeat)
print(mconf_shemo_m.nfft)
print(mconf_shemo_m.step)
print(mconf_shemo_m.classes)
print(mconf_shemo_m.features_save_name)
print(mconf_shemo_m.model_config_save_name)
print(mconf_shemo_m.training_log_name)
print(mconf_shemo_m.model_save_name)
print(mconf_shemo_m.model_h5_save_name)
print(mconf_shemo_m.model_tflite_save_name)
print(mconf_shemo_m.feature_path)
print(mconf_shemo_m.model_config_path)
print(mconf_shemo_m.training_log_path)
print(mconf_shemo_m.model_path)
print(mconf_shemo_m.model_h5_path)
print(mconf_shemo_m.model_tflite_path)
rfpconf_shemo_m = confc.RandFeatParams(df=data_info_shemo_df_m, database=confv.database_shemo, gender=confv.gender_male)
X, y = bf.build_random_features(modelconfig=mconf_shemo_m, randfeatparams=rfpconf_shemo_m)
print("--------------------Finished building features for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
# MODEL & TRAINING
print("\n\n--------------------Started model training for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
input_shape = (X.shape[1], X.shape[2], 1)
model = mdl.get_shemo_male_model(input_shape)
y_flat = np.argmax(y, axis=1)
class_weight = compute_class_weight('balanced', np.unique(y_flat), y_flat)
class_weight = {i : class_weight[i] for i in range(2)}
NAME = "{database}-{gender}-{modeltype}-{spec}-{time}".format(database=confv.database_shemo, gender=confv.gender_male, modeltype=confv.ml_mode_convolutional, spec="1st", time=int(time.time()))
mdl_logs_pth = os.path.join(confv.base_store, confv.log_dir)
tensorboard = TensorBoard(log_dir=mdl_logs_pth + '\\{}'.format(NAME))
dfc.check_dir_inside_saved_features_and_modelconfigs_and_models(parent=confv.saved_training_metrics_logs, database=confv.database_shemo, gender=confv.gender_male)
csv_logger = CSVLogger(mconf_shemo_m.training_log_path)
model.fit(X, y, epochs=35, batch_size=128, shuffle=True, class_weight=class_weight, validation_split=0.2, callbacks=[tensorboard, csv_logger])
# Can improve... more epochs
print("--------------------Finished model training for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
# MODEL SAVING
print("\n\n--------------------Started model saving for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
dfc.check_dir_inside_saved_features_and_modelconfigs_and_models(parent=confv.saved_models, database=confv.database_shemo, gender=confv.gender_male)
model.save(mconf_shemo_m.model_path)
model.save(mconf_shemo_m.model_h5_path)
# Convert the model & save in tflite
converter = tf.lite.TFLiteConverter.from_saved_model(mconf_shemo_m.model_path)
tflite_model = converter.convert()
with open(mconf_shemo_m.model_tflite_path, 'wb') as outfile:
outfile.write(tflite_model)
print("--------------------Finished model saving for adjusted and {gender} isolated dataset: {name}--------------------".format(gender=confv.gender_male, name=confv.dataset_shemo_male))
| [
"backbone.support.data_cleaning.check_and_adjust_df_for_minimum_audio_length_after_cleaning",
"backbone.support.models.get_shemo_male_model",
"tensorflow.lite.TFLiteConverter.from_saved_model",
"numpy.unique",
"backbone.support.configuration_classes.RandFeatParams",
"os.path.join",
"numpy.argmax",
"ti... | [((3598, 3670), 'backbone.support.configuration_classes.DataFrame', 'confc.DataFrame', ([], {'database': 'confv.database_shemo', 'gender': 'confv.gender_male'}), '(database=confv.database_shemo, gender=confv.gender_male)\n', (3613, 3670), True, 'import backbone.support.configuration_classes as confc\n'), ((3688, 3721), 'backbone.support.saving_loading.load_dataframe', 'sl.load_dataframe', (['shemo_m_df_obj'], {}), '(shemo_m_df_obj)\n', (3705, 3721), True, 'import backbone.support.saving_loading as sl\n'), ((6686, 6840), 'backbone.support.data_cleaning.check_and_adjust_df_for_minimum_audio_length_after_cleaning', 'dc.check_and_adjust_df_for_minimum_audio_length_after_cleaning', ([], {'df': 'data_info_shemo_df_m', 'database': 'confv.database_shemo', 'gender': 'confv.gender_male'}), '(df=\n data_info_shemo_df_m, database=confv.database_shemo, gender=confv.\n gender_male)\n', (6748, 6840), True, 'import backbone.support.data_cleaning as dc\n'), ((9241, 9370), 'backbone.support.configuration_classes.ModelConfig', 'confc.ModelConfig', ([], {'database': 'confv.database_shemo', 'gender': 'confv.gender_male', 'mode': 'confv.ml_mode_convolutional', 'classes': 'classes'}), '(database=confv.database_shemo, gender=confv.gender_male,\n mode=confv.ml_mode_convolutional, classes=classes)\n', (9258, 9370), True, 'import backbone.support.configuration_classes as confc\n'), ((10066, 10172), 'backbone.support.configuration_classes.RandFeatParams', 'confc.RandFeatParams', ([], {'df': 'data_info_shemo_df_m', 'database': 'confv.database_shemo', 'gender': 'confv.gender_male'}), '(df=data_info_shemo_df_m, database=confv.database_shemo,\n gender=confv.gender_male)\n', (10086, 10172), True, 'import backbone.support.configuration_classes as confc\n'), ((10176, 10264), 'backbone.support.build_features.build_random_features', 'bf.build_random_features', ([], {'modelconfig': 'mconf_shemo_m', 'randfeatparams': 'rfpconf_shemo_m'}), '(modelconfig=mconf_shemo_m, randfeatparams=\n rfpconf_shemo_m)\n', (10200, 10264), True, 'import backbone.support.build_features as bf\n'), ((10713, 10750), 'backbone.support.models.get_shemo_male_model', 'mdl.get_shemo_male_model', (['input_shape'], {}), '(input_shape)\n', (10737, 10750), True, 'import backbone.support.models as mdl\n'), ((10761, 10781), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (10770, 10781), True, 'import numpy as np\n'), ((11121, 11166), 'os.path.join', 'os.path.join', (['confv.base_store', 'confv.log_dir'], {}), '(confv.base_store, confv.log_dir)\n', (11133, 11166), False, 'import os\n'), ((11238, 11409), 'backbone.support.directory_file_checking.check_dir_inside_saved_features_and_modelconfigs_and_models', 'dfc.check_dir_inside_saved_features_and_modelconfigs_and_models', ([], {'parent': 'confv.saved_training_metrics_logs', 'database': 'confv.database_shemo', 'gender': 'confv.gender_male'}), '(parent=\n confv.saved_training_metrics_logs, database=confv.database_shemo,\n gender=confv.gender_male)\n', (11301, 11409), True, 'import backbone.support.directory_file_checking as dfc\n'), ((11414, 11456), 'tensorflow.python.keras.callbacks.CSVLogger', 'CSVLogger', (['mconf_shemo_m.training_log_path'], {}), '(mconf_shemo_m.training_log_path)\n', (11423, 11456), False, 'from tensorflow.python.keras.callbacks import CSVLogger\n'), ((12023, 12180), 'backbone.support.directory_file_checking.check_dir_inside_saved_features_and_modelconfigs_and_models', 'dfc.check_dir_inside_saved_features_and_modelconfigs_and_models', ([], {'parent': 'confv.saved_models', 'database': 'confv.database_shemo', 'gender': 'confv.gender_male'}), '(parent=\n confv.saved_models, database=confv.database_shemo, gender=confv.gender_male\n )\n', (12086, 12180), True, 'import backbone.support.directory_file_checking as dfc\n'), ((12298, 12364), 'tensorflow.lite.TFLiteConverter.from_saved_model', 'tf.lite.TFLiteConverter.from_saved_model', (['mconf_shemo_m.model_path'], {}), '(mconf_shemo_m.model_path)\n', (12338, 12364), True, 'import tensorflow as tf\n'), ((9177, 9223), 'numpy.unique', 'np.unique', (['data_info_shemo_df_m.stress_emotion'], {}), '(data_info_shemo_df_m.stress_emotion)\n', (9186, 9223), True, 'import numpy as np\n'), ((10830, 10847), 'numpy.unique', 'np.unique', (['y_flat'], {}), '(y_flat)\n', (10839, 10847), True, 'import numpy as np\n'), ((11092, 11103), 'time.time', 'time.time', ([], {}), '()\n', (11101, 11103), False, 'import time\n')] |
import numpy as np
import pytest
from sklearn_extra.robust import (
RobustWeightedClassifier,
RobustWeightedRegressor,
RobustWeightedKMeans,
)
from sklearn.datasets import make_blobs
from sklearn.linear_model import SGDClassifier, SGDRegressor, HuberRegressor
from sklearn.cluster import KMeans
from sklearn.utils import shuffle
from sklearn.metrics import r2_score
from sklearn.utils._testing import (
assert_array_almost_equal,
assert_almost_equal,
)
# Test version of sklearn, in version older than v1.0 squared_loss must be used
import sklearn
if sklearn.__version__[0] == "0":
SQ_LOSS = "squared_loss"
else:
SQ_LOSS = "squared_error"
k_values = [None, 10] # values of k for test robust
c_values = [None, 1e-3] # values of c for test robust
# Classification test with outliers
rng = np.random.RandomState(42)
X_cc, y_cc = make_blobs(
n_samples=100,
centers=np.array([[-1, -1], [1, 1]]),
random_state=rng,
)
for f in range(3):
X_cc[f] = [10, 5] + rng.normal(size=2) * 0.1
y_cc[f] = 0
classif_losses = ["log", "hinge"]
weightings = ["huber", "mom"]
multi_class = ["ovr", "ovo"]
def test_robust_estimator_max_iter():
"""Test that warning message is thrown when max_iter is reached."""
model = RobustWeightedClassifier(max_iter=1)
msg = "Maximum number of iteration reached before"
with pytest.warns(UserWarning, match=msg):
model.fit(X_cc, y_cc)
def test_robust_estimator_unsupported_loss():
"""Test that warning message is thrown when unsupported loss."""
model = RobustWeightedClassifier(loss="invalid")
msg = "The loss invalid is not supported. "
with pytest.raises(ValueError, match=msg):
model.fit(X_cc, y_cc)
def test_robust_estimator_unsupported_weighting():
"""Test that warning message is thrown when unsupported weighting."""
model = RobustWeightedClassifier(weighting="invalid")
msg = "No such weighting scheme"
with pytest.raises(ValueError, match=msg):
model.fit(X_cc, y_cc)
def test_robust_estimator_unsupported_multiclass():
"""Test that warning message is thrown when unsupported weighting."""
model = RobustWeightedClassifier(multi_class="invalid")
msg = "No such multiclass method implemented."
with pytest.raises(ValueError, match=msg):
model.fit(X_cc, y_cc)
def test_robust_estimator_input_validation_and_fit_check():
# Invalid parameters
msg = "max_iter must be > 0, got 0."
with pytest.raises(ValueError, match=msg):
RobustWeightedKMeans(max_iter=0).fit(X_cc)
msg = "c must be > 0, got 0."
with pytest.raises(ValueError, match=msg):
RobustWeightedKMeans(c=0).fit(X_cc)
msg = "burn_in must be >= 0, got -1."
with pytest.raises(ValueError, match=msg):
RobustWeightedClassifier(burn_in=-1).fit(X_cc, y_cc)
msg = "eta0 must be > 0, got 0."
with pytest.raises(ValueError, match=msg):
RobustWeightedClassifier(burn_in=1, eta0=0).fit(X_cc, y_cc)
msg = "k must be integer >= 0, and smaller than floor"
with pytest.raises(ValueError, match=msg):
RobustWeightedKMeans(k=-1).fit(X_cc)
@pytest.mark.parametrize("loss", classif_losses)
@pytest.mark.parametrize("weighting", weightings)
@pytest.mark.parametrize("k", k_values)
@pytest.mark.parametrize("c", c_values)
@pytest.mark.parametrize("multi_class", multi_class)
def test_corrupted_classif(loss, weighting, k, c, multi_class):
clf = RobustWeightedClassifier(
loss=loss,
max_iter=100,
weighting=weighting,
k=k,
c=c,
multi_class=multi_class,
random_state=rng,
)
clf.fit(X_cc, y_cc)
score = clf.score(X_cc, y_cc)
assert score > 0.8
# Classification test without outliers
rng = np.random.RandomState(42)
X_c, y_c = make_blobs(
n_samples=100,
centers=np.array([[-1, -1], [1, 1], [3, -1]]),
random_state=rng,
)
# check binary throw an error
def test_robust_estimator_unsupported_loss():
model = RobustWeightedClassifier(multi_class="binary")
msg = "y must be binary."
with pytest.raises(ValueError, match=msg):
model.fit(X_c, y_c)
# Check that the fit is close to SGD when in extremal parameter cases
@pytest.mark.parametrize("loss", classif_losses)
@pytest.mark.parametrize("weighting", weightings)
@pytest.mark.parametrize("multi_class", multi_class)
def test_not_robust_classif(loss, weighting, multi_class):
clf = RobustWeightedClassifier(
loss=loss,
max_iter=100,
weighting=weighting,
k=0,
c=1e7,
burn_in=0,
multi_class=multi_class,
random_state=rng,
)
clf_not_rob = SGDClassifier(loss=loss, random_state=rng)
clf.fit(X_c, y_c)
clf_not_rob.fit(X_c, y_c)
pred1 = clf.predict(X_c)
pred2 = clf_not_rob.predict(X_c)
assert np.mean((pred1 > 0) == (pred2 > 0)) > 0.8
assert clf.score(X_c, y_c) == np.mean(pred1 == y_c)
# Make binary uncorrupted dataset
X_cb, y_cb = make_blobs(
n_samples=100, centers=np.array([[-1, -1], [1, 1]]), random_state=rng
)
@pytest.mark.parametrize("weighting", weightings)
def test_classif_binary(weighting):
clf = RobustWeightedClassifier(
max_iter=100,
weighting=weighting,
k=0,
c=1e7,
burn_in=0,
multi_class="binary",
random_state=rng,
)
clf_not_rob = SGDClassifier(loss="log", random_state=rng)
clf.fit(X_cb, y_cb)
clf_not_rob.fit(X_cb, y_cb)
norm_coef1 = np.linalg.norm(np.hstack([clf.coef_.ravel(), clf.intercept_]))
norm_coef2 = np.linalg.norm(
np.hstack([clf_not_rob.coef_.ravel(), clf_not_rob.intercept_])
)
coef1 = clf.coef_ / norm_coef1
coef2 = clf_not_rob.coef_ / norm_coef2
intercept1 = clf.intercept_ / norm_coef1
intercept2 = clf_not_rob.intercept_ / norm_coef2
assert np.linalg.norm(coef1 - coef2) < 0.5
assert np.linalg.norm(intercept1 - intercept2) < 0.5
assert len(clf.weights_) == len(X_cb)
# Check that weights_ parameter can be used as outlier score.
@pytest.mark.parametrize("weighting", weightings)
def test_classif_corrupted_weights(weighting):
clf = RobustWeightedClassifier(
max_iter=100,
weighting=weighting,
k=5,
c=1,
burn_in=0,
multi_class="binary",
random_state=rng,
)
clf.fit(X_cc, y_cc)
assert np.mean(clf.weights_[:3]) < np.mean(clf.weights_[3:])
# Case "log" loss, test predict_proba
@pytest.mark.parametrize("weighting", weightings)
def test_predict_proba(weighting):
clf = RobustWeightedClassifier(
max_iter=100,
weighting=weighting,
k=0,
c=1e7,
burn_in=0,
random_state=rng,
)
clf_not_rob = SGDClassifier(loss="log", random_state=rng)
clf.fit(X_c, y_c)
clf_not_rob.fit(X_c, y_c)
pred1 = clf.base_estimator_.predict_proba(X_c)[:, 1]
pred2 = clf_not_rob.predict_proba(X_c)[:, 1]
assert np.mean((pred1 > 1 / 2) == (pred2 > 1 / 2)) > 0.8
# check that classifier with another loss than log raises an error
def test_robust_no_proba():
est = RobustWeightedClassifier(loss="hinge").fit(X_c, y_c)
msg = "Probability estimates are not available for loss='hinge'"
with pytest.raises(AttributeError, match=msg):
est.predict_proba(X_c)
# Regression test with outliers
X_rc = rng.uniform(-1, 1, size=[200])
y_rc = X_rc + 0.1 * rng.normal(size=200)
X_rc[0] = 10
X_rc = X_rc.reshape(-1, 1)
y_rc[0] = -1
regression_losses = [SQ_LOSS, "huber"]
@pytest.mark.parametrize("loss", regression_losses)
@pytest.mark.parametrize("weighting", weightings)
@pytest.mark.parametrize("k", k_values)
@pytest.mark.parametrize("c", c_values)
def test_corrupted_regression(loss, weighting, k, c):
reg = RobustWeightedRegressor(
loss=loss,
max_iter=50,
weighting=weighting,
k=k,
c=c,
random_state=rng,
n_iter_no_change=20,
)
reg.fit(X_rc, y_rc)
assert np.abs(reg.coef_[0] - 1) < 0.1
assert np.abs(reg.intercept_[0]) < 0.1
# Check that weights_ parameter can be used as outlier score.
@pytest.mark.parametrize("weighting", weightings)
def test_regression_corrupted_weights(weighting):
reg = RobustWeightedRegressor(
max_iter=100,
weighting=weighting,
k=5,
c=1,
burn_in=0,
random_state=rng,
)
reg.fit(X_rc, y_rc)
assert reg.weights_[0] < np.mean(reg.weights_[1:])
X_r = rng.uniform(-1, 1, size=[1000])
y_r = X_r + 0.1 * rng.normal(size=1000)
X_r = X_r.reshape(-1, 1)
# Check that the fit is close to SGD when in extremal parameter cases
@pytest.mark.parametrize("loss", regression_losses)
@pytest.mark.parametrize("weighting", weightings)
def test_not_robust_regression(loss, weighting):
reg = RobustWeightedRegressor(
loss=loss,
max_iter=100,
weighting=weighting,
k=0,
c=1e7,
burn_in=0,
random_state=rng,
)
reg_not_rob = SGDRegressor(loss=loss, random_state=rng)
reg.fit(X_r, y_r)
reg_not_rob.fit(X_r, y_r)
pred1 = reg.predict(X_r)
pred2 = reg_not_rob.predict(X_r)
difference = [
np.linalg.norm(pred1[i] - pred2[i]) for i in range(len(pred1))
]
assert np.mean(difference) < 1
assert_almost_equal(reg.score(X_r, y_r), r2_score(y_r, reg.predict(X_r)))
# Compare with HuberRegressor on dataset corrupted in y
X_rcy = rng.uniform(-1, 1, size=[200])
y_rcy = X_rcy + 0.1 * rng.normal(size=200)
X_rcy = X_rcy.reshape(-1, 1)
y_rcy[0] = -1
def test_vs_huber():
reg1 = RobustWeightedRegressor(
max_iter=100,
weighting="huber",
k=5,
c=1,
burn_in=0,
sgd_args={"learning_rate": "adaptive"}, # test sgd_args
random_state=rng,
)
reg2 = HuberRegressor()
reg1.fit(X_rcy, y_rcy)
reg2.fit(X_rcy, y_rcy)
assert np.abs(reg1.coef_[0] - reg2.coef_[0]) < 1e-2
# Clustering test with outliers
rng = np.random.RandomState(42)
X_clusterc, y_clusterc = make_blobs(
n_samples=100, centers=np.array([[-1, -1], [1, 1]]), random_state=rng
)
for f in range(3):
X_clusterc[f] = [20, 5] + rng.normal(size=2) * 0.1
y_clusterc[f] = 0
X_cluster, y_cluster = shuffle(X_clusterc, y_clusterc, random_state=rng)
weightings = ["huber", "mom"]
@pytest.mark.parametrize("weighting", weightings)
@pytest.mark.parametrize("k", k_values)
@pytest.mark.parametrize("c", c_values)
def test_corrupted_cluster(weighting, k, c):
km = RobustWeightedKMeans(
n_clusters=2,
max_iter=50,
weighting=weighting,
k=5,
c=None,
random_state=rng,
)
km.fit(X_clusterc)
error = np.mean((km.predict(X_clusterc) - y_clusterc) ** 2)
assert error < 100
# Clustering test without outliers
rng = np.random.RandomState(42)
X_cluster, y_cluster = make_blobs(
n_samples=100, centers=np.array([[-1, -1], [1, 1]]), random_state=rng
)
# Check that the fit is close to KMeans when in extremal parameter cases
@pytest.mark.parametrize("weighting", weightings)
def test_not_robust_cluster(weighting):
clf = RobustWeightedKMeans(
n_clusters=2,
max_iter=100,
weighting=weighting,
k=0,
c=1e7,
random_state=rng,
)
clf_not_rob = KMeans(2, random_state=rng)
clf.fit(X_cluster)
clf_not_rob.fit(X_cluster)
pred1 = [clf.cluster_centers_[i] for i in clf.predict(X_cluster)]
pred2 = [
clf_not_rob.cluster_centers_[i] for i in clf_not_rob.predict(X_cluster)
]
difference = [
np.linalg.norm(pred1[i] - pred2[i]) for i in range(len(pred1))
]
assert np.mean(difference) < 1
def test_transform():
n_clusters = 2
km = RobustWeightedKMeans(n_clusters=n_clusters, random_state=rng)
km.fit(X_cluster)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert X_new[c, c] == 0
for c2 in range(n_clusters):
if c != c2:
assert X_new[c, c2] > 0
def test_fit_transform():
X1 = (
RobustWeightedKMeans(n_clusters=2, random_state=42)
.fit(X_cluster)
.transform(X_cluster)
)
X2 = RobustWeightedKMeans(n_clusters=2, random_state=42).fit_transform(
X_cluster
)
assert_array_almost_equal(X1, X2)
| [
"sklearn.cluster.KMeans",
"sklearn_extra.robust.RobustWeightedClassifier",
"sklearn.linear_model.SGDClassifier",
"numpy.mean",
"numpy.abs",
"sklearn_extra.robust.RobustWeightedRegressor",
"sklearn.linear_model.SGDRegressor",
"sklearn.utils.shuffle",
"pytest.warns",
"sklearn.utils._testing.assert_a... | [((822, 847), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (843, 847), True, 'import numpy as np\n'), ((3150, 3197), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""loss"""', 'classif_losses'], {}), "('loss', classif_losses)\n", (3173, 3197), False, 'import pytest\n'), ((3199, 3247), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weighting"""', 'weightings'], {}), "('weighting', weightings)\n", (3222, 3247), False, 'import pytest\n'), ((3249, 3287), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""k"""', 'k_values'], {}), "('k', k_values)\n", (3272, 3287), False, 'import pytest\n'), ((3289, 3327), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""c"""', 'c_values'], {}), "('c', c_values)\n", (3312, 3327), False, 'import pytest\n'), ((3329, 3380), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""multi_class"""', 'multi_class'], {}), "('multi_class', multi_class)\n", (3352, 3380), False, 'import pytest\n'), ((3770, 3795), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (3791, 3795), True, 'import numpy as np\n'), ((4227, 4274), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""loss"""', 'classif_losses'], {}), "('loss', classif_losses)\n", (4250, 4274), False, 'import pytest\n'), ((4276, 4324), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weighting"""', 'weightings'], {}), "('weighting', weightings)\n", (4299, 4324), False, 'import pytest\n'), ((4326, 4377), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""multi_class"""', 'multi_class'], {}), "('multi_class', multi_class)\n", (4349, 4377), False, 'import pytest\n'), ((5084, 5132), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weighting"""', 'weightings'], {}), "('weighting', weightings)\n", (5107, 5132), False, 'import pytest\n'), ((6063, 6111), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weighting"""', 'weightings'], {}), "('weighting', weightings)\n", (6086, 6111), False, 'import pytest\n'), ((6483, 6531), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weighting"""', 'weightings'], {}), "('weighting', weightings)\n", (6506, 6531), False, 'import pytest\n'), ((7535, 7585), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""loss"""', 'regression_losses'], {}), "('loss', regression_losses)\n", (7558, 7585), False, 'import pytest\n'), ((7587, 7635), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weighting"""', 'weightings'], {}), "('weighting', weightings)\n", (7610, 7635), False, 'import pytest\n'), ((7637, 7675), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""k"""', 'k_values'], {}), "('k', k_values)\n", (7660, 7675), False, 'import pytest\n'), ((7677, 7715), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""c"""', 'c_values'], {}), "('c', c_values)\n", (7700, 7715), False, 'import pytest\n'), ((8135, 8183), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weighting"""', 'weightings'], {}), "('weighting', weightings)\n", (8158, 8183), False, 'import pytest\n'), ((8653, 8703), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""loss"""', 'regression_losses'], {}), "('loss', regression_losses)\n", (8676, 8703), False, 'import pytest\n'), ((8705, 8753), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weighting"""', 'weightings'], {}), "('weighting', weightings)\n", (8728, 8753), False, 'import pytest\n'), ((9987, 10012), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (10008, 10012), True, 'import numpy as np\n'), ((10245, 10294), 'sklearn.utils.shuffle', 'shuffle', (['X_clusterc', 'y_clusterc'], {'random_state': 'rng'}), '(X_clusterc, y_clusterc, random_state=rng)\n', (10252, 10294), False, 'from sklearn.utils import shuffle\n'), ((10329, 10377), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weighting"""', 'weightings'], {}), "('weighting', weightings)\n", (10352, 10377), False, 'import pytest\n'), ((10379, 10417), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""k"""', 'k_values'], {}), "('k', k_values)\n", (10402, 10417), False, 'import pytest\n'), ((10419, 10457), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""c"""', 'c_values'], {}), "('c', c_values)\n", (10442, 10457), False, 'import pytest\n'), ((10820, 10845), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (10841, 10845), True, 'import numpy as np\n'), ((11032, 11080), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weighting"""', 'weightings'], {}), "('weighting', weightings)\n", (11055, 11080), False, 'import pytest\n'), ((1260, 1296), 'sklearn_extra.robust.RobustWeightedClassifier', 'RobustWeightedClassifier', ([], {'max_iter': '(1)'}), '(max_iter=1)\n', (1284, 1296), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((1558, 1598), 'sklearn_extra.robust.RobustWeightedClassifier', 'RobustWeightedClassifier', ([], {'loss': '"""invalid"""'}), "(loss='invalid')\n", (1582, 1598), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((1863, 1908), 'sklearn_extra.robust.RobustWeightedClassifier', 'RobustWeightedClassifier', ([], {'weighting': '"""invalid"""'}), "(weighting='invalid')\n", (1887, 1908), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((2163, 2210), 'sklearn_extra.robust.RobustWeightedClassifier', 'RobustWeightedClassifier', ([], {'multi_class': '"""invalid"""'}), "(multi_class='invalid')\n", (2187, 2210), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((3455, 3582), 'sklearn_extra.robust.RobustWeightedClassifier', 'RobustWeightedClassifier', ([], {'loss': 'loss', 'max_iter': '(100)', 'weighting': 'weighting', 'k': 'k', 'c': 'c', 'multi_class': 'multi_class', 'random_state': 'rng'}), '(loss=loss, max_iter=100, weighting=weighting, k=k,\n c=c, multi_class=multi_class, random_state=rng)\n', (3479, 3582), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((4002, 4048), 'sklearn_extra.robust.RobustWeightedClassifier', 'RobustWeightedClassifier', ([], {'multi_class': '"""binary"""'}), "(multi_class='binary')\n", (4026, 4048), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((4447, 4594), 'sklearn_extra.robust.RobustWeightedClassifier', 'RobustWeightedClassifier', ([], {'loss': 'loss', 'max_iter': '(100)', 'weighting': 'weighting', 'k': '(0)', 'c': '(10000000.0)', 'burn_in': '(0)', 'multi_class': 'multi_class', 'random_state': 'rng'}), '(loss=loss, max_iter=100, weighting=weighting, k=0,\n c=10000000.0, burn_in=0, multi_class=multi_class, random_state=rng)\n', (4471, 4594), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((4673, 4715), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': 'loss', 'random_state': 'rng'}), '(loss=loss, random_state=rng)\n', (4686, 4715), False, 'from sklearn.linear_model import SGDClassifier, SGDRegressor, HuberRegressor\n'), ((5179, 5313), 'sklearn_extra.robust.RobustWeightedClassifier', 'RobustWeightedClassifier', ([], {'max_iter': '(100)', 'weighting': 'weighting', 'k': '(0)', 'c': '(10000000.0)', 'burn_in': '(0)', 'multi_class': '"""binary"""', 'random_state': 'rng'}), "(max_iter=100, weighting=weighting, k=0, c=\n 10000000.0, burn_in=0, multi_class='binary', random_state=rng)\n", (5203, 5313), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((5383, 5426), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""log"""', 'random_state': 'rng'}), "(loss='log', random_state=rng)\n", (5396, 5426), False, 'from sklearn.linear_model import SGDClassifier, SGDRegressor, HuberRegressor\n'), ((6169, 6293), 'sklearn_extra.robust.RobustWeightedClassifier', 'RobustWeightedClassifier', ([], {'max_iter': '(100)', 'weighting': 'weighting', 'k': '(5)', 'c': '(1)', 'burn_in': '(0)', 'multi_class': '"""binary"""', 'random_state': 'rng'}), "(max_iter=100, weighting=weighting, k=5, c=1,\n burn_in=0, multi_class='binary', random_state=rng)\n", (6193, 6293), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((6577, 6689), 'sklearn_extra.robust.RobustWeightedClassifier', 'RobustWeightedClassifier', ([], {'max_iter': '(100)', 'weighting': 'weighting', 'k': '(0)', 'c': '(10000000.0)', 'burn_in': '(0)', 'random_state': 'rng'}), '(max_iter=100, weighting=weighting, k=0, c=\n 10000000.0, burn_in=0, random_state=rng)\n', (6601, 6689), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((6751, 6794), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""log"""', 'random_state': 'rng'}), "(loss='log', random_state=rng)\n", (6764, 6794), False, 'from sklearn.linear_model import SGDClassifier, SGDRegressor, HuberRegressor\n'), ((7780, 7902), 'sklearn_extra.robust.RobustWeightedRegressor', 'RobustWeightedRegressor', ([], {'loss': 'loss', 'max_iter': '(50)', 'weighting': 'weighting', 'k': 'k', 'c': 'c', 'random_state': 'rng', 'n_iter_no_change': '(20)'}), '(loss=loss, max_iter=50, weighting=weighting, k=k, c\n =c, random_state=rng, n_iter_no_change=20)\n', (7803, 7902), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((8244, 8345), 'sklearn_extra.robust.RobustWeightedRegressor', 'RobustWeightedRegressor', ([], {'max_iter': '(100)', 'weighting': 'weighting', 'k': '(5)', 'c': '(1)', 'burn_in': '(0)', 'random_state': 'rng'}), '(max_iter=100, weighting=weighting, k=5, c=1,\n burn_in=0, random_state=rng)\n', (8267, 8345), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((8813, 8934), 'sklearn_extra.robust.RobustWeightedRegressor', 'RobustWeightedRegressor', ([], {'loss': 'loss', 'max_iter': '(100)', 'weighting': 'weighting', 'k': '(0)', 'c': '(10000000.0)', 'burn_in': '(0)', 'random_state': 'rng'}), '(loss=loss, max_iter=100, weighting=weighting, k=0,\n c=10000000.0, burn_in=0, random_state=rng)\n', (8836, 8934), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((9005, 9046), 'sklearn.linear_model.SGDRegressor', 'SGDRegressor', ([], {'loss': 'loss', 'random_state': 'rng'}), '(loss=loss, random_state=rng)\n', (9017, 9046), False, 'from sklearn.linear_model import SGDClassifier, SGDRegressor, HuberRegressor\n'), ((9591, 9731), 'sklearn_extra.robust.RobustWeightedRegressor', 'RobustWeightedRegressor', ([], {'max_iter': '(100)', 'weighting': '"""huber"""', 'k': '(5)', 'c': '(1)', 'burn_in': '(0)', 'sgd_args': "{'learning_rate': 'adaptive'}", 'random_state': 'rng'}), "(max_iter=100, weighting='huber', k=5, c=1, burn_in=\n 0, sgd_args={'learning_rate': 'adaptive'}, random_state=rng)\n", (9614, 9731), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((9818, 9834), 'sklearn.linear_model.HuberRegressor', 'HuberRegressor', ([], {}), '()\n', (9832, 9834), False, 'from sklearn.linear_model import SGDClassifier, SGDRegressor, HuberRegressor\n'), ((10512, 10616), 'sklearn_extra.robust.RobustWeightedKMeans', 'RobustWeightedKMeans', ([], {'n_clusters': '(2)', 'max_iter': '(50)', 'weighting': 'weighting', 'k': '(5)', 'c': 'None', 'random_state': 'rng'}), '(n_clusters=2, max_iter=50, weighting=weighting, k=5, c\n =None, random_state=rng)\n', (10532, 10616), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((11131, 11241), 'sklearn_extra.robust.RobustWeightedKMeans', 'RobustWeightedKMeans', ([], {'n_clusters': '(2)', 'max_iter': '(100)', 'weighting': 'weighting', 'k': '(0)', 'c': '(10000000.0)', 'random_state': 'rng'}), '(n_clusters=2, max_iter=100, weighting=weighting, k=0,\n c=10000000.0, random_state=rng)\n', (11151, 11241), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((11304, 11331), 'sklearn.cluster.KMeans', 'KMeans', (['(2)'], {'random_state': 'rng'}), '(2, random_state=rng)\n', (11310, 11331), False, 'from sklearn.cluster import KMeans\n'), ((11739, 11800), 'sklearn_extra.robust.RobustWeightedKMeans', 'RobustWeightedKMeans', ([], {'n_clusters': 'n_clusters', 'random_state': 'rng'}), '(n_clusters=n_clusters, random_state=rng)\n', (11759, 11800), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((12298, 12331), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['X1', 'X2'], {}), '(X1, X2)\n', (12323, 12331), False, 'from sklearn.utils._testing import assert_array_almost_equal, assert_almost_equal\n'), ((904, 932), 'numpy.array', 'np.array', (['[[-1, -1], [1, 1]]'], {}), '([[-1, -1], [1, 1]])\n', (912, 932), True, 'import numpy as np\n'), ((1361, 1397), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': 'msg'}), '(UserWarning, match=msg)\n', (1373, 1397), False, 'import pytest\n'), ((1656, 1692), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (1669, 1692), False, 'import pytest\n'), ((1955, 1991), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (1968, 1991), False, 'import pytest\n'), ((2271, 2307), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (2284, 2307), False, 'import pytest\n'), ((2476, 2512), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (2489, 2512), False, 'import pytest\n'), ((2609, 2645), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (2622, 2645), False, 'import pytest\n'), ((2743, 2779), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (2756, 2779), False, 'import pytest\n'), ((2889, 2925), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (2902, 2925), False, 'import pytest\n'), ((3064, 3100), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (3077, 3100), False, 'import pytest\n'), ((3850, 3887), 'numpy.array', 'np.array', (['[[-1, -1], [1, 1], [3, -1]]'], {}), '([[-1, -1], [1, 1], [3, -1]])\n', (3858, 3887), True, 'import numpy as np\n'), ((4088, 4124), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (4101, 4124), False, 'import pytest\n'), ((4846, 4881), 'numpy.mean', 'np.mean', (['((pred1 > 0) == (pred2 > 0))'], {}), '((pred1 > 0) == (pred2 > 0))\n', (4853, 4881), True, 'import numpy as np\n'), ((4922, 4943), 'numpy.mean', 'np.mean', (['(pred1 == y_c)'], {}), '(pred1 == y_c)\n', (4929, 4943), True, 'import numpy as np\n'), ((5032, 5060), 'numpy.array', 'np.array', (['[[-1, -1], [1, 1]]'], {}), '([[-1, -1], [1, 1]])\n', (5040, 5060), True, 'import numpy as np\n'), ((5862, 5891), 'numpy.linalg.norm', 'np.linalg.norm', (['(coef1 - coef2)'], {}), '(coef1 - coef2)\n', (5876, 5891), True, 'import numpy as np\n'), ((5909, 5948), 'numpy.linalg.norm', 'np.linalg.norm', (['(intercept1 - intercept2)'], {}), '(intercept1 - intercept2)\n', (5923, 5948), True, 'import numpy as np\n'), ((6388, 6413), 'numpy.mean', 'np.mean', (['clf.weights_[:3]'], {}), '(clf.weights_[:3])\n', (6395, 6413), True, 'import numpy as np\n'), ((6416, 6441), 'numpy.mean', 'np.mean', (['clf.weights_[3:]'], {}), '(clf.weights_[3:])\n', (6423, 6441), True, 'import numpy as np\n'), ((6965, 7008), 'numpy.mean', 'np.mean', (['((pred1 > 1 / 2) == (pred2 > 1 / 2))'], {}), '((pred1 > 1 / 2) == (pred2 > 1 / 2))\n', (6972, 7008), True, 'import numpy as np\n'), ((7253, 7293), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': 'msg'}), '(AttributeError, match=msg)\n', (7266, 7293), False, 'import pytest\n'), ((7996, 8020), 'numpy.abs', 'np.abs', (['(reg.coef_[0] - 1)'], {}), '(reg.coef_[0] - 1)\n', (8002, 8020), True, 'import numpy as np\n'), ((8038, 8063), 'numpy.abs', 'np.abs', (['reg.intercept_[0]'], {}), '(reg.intercept_[0])\n', (8044, 8063), True, 'import numpy as np\n'), ((8450, 8475), 'numpy.mean', 'np.mean', (['reg.weights_[1:]'], {}), '(reg.weights_[1:])\n', (8457, 8475), True, 'import numpy as np\n'), ((9192, 9227), 'numpy.linalg.norm', 'np.linalg.norm', (['(pred1[i] - pred2[i])'], {}), '(pred1[i] - pred2[i])\n', (9206, 9227), True, 'import numpy as np\n'), ((9272, 9291), 'numpy.mean', 'np.mean', (['difference'], {}), '(difference)\n', (9279, 9291), True, 'import numpy as np\n'), ((9900, 9937), 'numpy.abs', 'np.abs', (['(reg1.coef_[0] - reg2.coef_[0])'], {}), '(reg1.coef_[0] - reg2.coef_[0])\n', (9906, 9937), True, 'import numpy as np\n'), ((10077, 10105), 'numpy.array', 'np.array', (['[[-1, -1], [1, 1]]'], {}), '([[-1, -1], [1, 1]])\n', (10085, 10105), True, 'import numpy as np\n'), ((10908, 10936), 'numpy.array', 'np.array', (['[[-1, -1], [1, 1]]'], {}), '([[-1, -1], [1, 1]])\n', (10916, 10936), True, 'import numpy as np\n'), ((11583, 11618), 'numpy.linalg.norm', 'np.linalg.norm', (['(pred1[i] - pred2[i])'], {}), '(pred1[i] - pred2[i])\n', (11597, 11618), True, 'import numpy as np\n'), ((11663, 11682), 'numpy.mean', 'np.mean', (['difference'], {}), '(difference)\n', (11670, 11682), True, 'import numpy as np\n'), ((7122, 7160), 'sklearn_extra.robust.RobustWeightedClassifier', 'RobustWeightedClassifier', ([], {'loss': '"""hinge"""'}), "(loss='hinge')\n", (7146, 7160), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((12203, 12254), 'sklearn_extra.robust.RobustWeightedKMeans', 'RobustWeightedKMeans', ([], {'n_clusters': '(2)', 'random_state': '(42)'}), '(n_clusters=2, random_state=42)\n', (12223, 12254), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((2522, 2554), 'sklearn_extra.robust.RobustWeightedKMeans', 'RobustWeightedKMeans', ([], {'max_iter': '(0)'}), '(max_iter=0)\n', (2542, 2554), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((2655, 2680), 'sklearn_extra.robust.RobustWeightedKMeans', 'RobustWeightedKMeans', ([], {'c': '(0)'}), '(c=0)\n', (2675, 2680), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((2789, 2825), 'sklearn_extra.robust.RobustWeightedClassifier', 'RobustWeightedClassifier', ([], {'burn_in': '(-1)'}), '(burn_in=-1)\n', (2813, 2825), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((2935, 2978), 'sklearn_extra.robust.RobustWeightedClassifier', 'RobustWeightedClassifier', ([], {'burn_in': '(1)', 'eta0': '(0)'}), '(burn_in=1, eta0=0)\n', (2959, 2978), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((3110, 3136), 'sklearn_extra.robust.RobustWeightedKMeans', 'RobustWeightedKMeans', ([], {'k': '(-1)'}), '(k=-1)\n', (3130, 3136), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n'), ((12082, 12133), 'sklearn_extra.robust.RobustWeightedKMeans', 'RobustWeightedKMeans', ([], {'n_clusters': '(2)', 'random_state': '(42)'}), '(n_clusters=2, random_state=42)\n', (12102, 12133), False, 'from sklearn_extra.robust import RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans\n')] |
#%%
import os
import sys
import joblib
from numpy.lib.function_base import select
import sklearn
import warnings
import tarfile
import urllib
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from pandas.plotting import scatter_matrix
from sklearn import impute
warnings.filterwarnings('ignore')
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = ".."
CHAPTER_ID = "end_to_end_project"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, ".images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/"
HOUSING_PATH = os.path.join("../data", "housing")
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"
#%%
from scipy import stats
from scipy.stats import randint
from scipy.stats import geom, expon
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, cross_val_score, GridSearchCV, RandomizedSearchCV
from sklearn.svm import SVR
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error
# %%
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id +'.'+fig_extension)
print(f"Saving fig {fig_id}")
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
if not os.path.isdir(housing_path):
os.makedirs(housing_path)
tgz_path = os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
def download_background():
# Download the California image
images_path = os.path.join(PROJECT_ROOT_DIR, "images", "end_to_end_project")
os.makedirs(images_path, exist_ok=True)
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/"
filename = "california.png"
print("Downloading", filename)
url = DOWNLOAD_ROOT + "images/end_to_end_project/" + filename
urllib.request.urlretrieve(url, os.path.join(images_path, filename))
return True
def plot_housing_price_distribution(housing):
california_img = mpimg.imread(os.path.join(IMAGES_PATH, "california.png"))
params = {
's': housing['population'] / 100,
'label': 'population',
'figsize': (10,7),
'c': "median_house_value",
'cmap': plt.get_cmap('jet'),
'colorbar': False,
'sharex': False
}
ax = housing.plot(kind='scatter', x='longitude', y='latitude', alpha=.4, **params)
plt.imshow(california_img, extent=[-124.55, -113.80, 32.45, 42.05], alpha=.5, cmap=plt.get_cmap("jet"))
plt.ylabel("Latitude", fontsize=14)
plt.xlabel("Longitude", fontsize=14)
prices = housing["median_house_value"]
tick_values = np.linspace(prices.min(), prices.max(), 11)
cbar = plt.colorbar(ticks=tick_values/prices.max())
cbar.ax.set_yticklabels(["$%dk"%(round(v/1000)) for v in tick_values], fontsize=14)
cbar.set_label('Median House Value', fontsize=16)
plt.legend(fontsize=16)
save_fig("california_housing_prices_plot")
plt.show()
return
# %%
if __name__ == '__main__':
# fetch_housing_data()
load_housing_data()
# EDA
housing = load_housing_data()
housing.info()
housing.ocean_proximity.value_counts()
housing.describe()
housing.hist(bins=50, figsize=(20, 15))
save_fig("attribute_histogram_plots")
train_set, test_set = train_test_split(housing, test_size=.2, random_state=42)
#%%
# Data split
housing['income_cat'] = pd.cut(housing['median_income'],
bins=[0., 1.5, 3.0, 4.5, 6., np.inf],
labels=[1, 2, 3, 4, 5])
housing['income_cat'].value_counts()
split = StratifiedShuffleSplit(n_splits=1, test_size=.2, random_state=42)
for train_index, test_index in split.split(housing, housing['income_cat']):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
strat_train_set['income_cat'].value_counts() / len(strat_train_set)
strat_test_set['income_cat'].value_counts() / len(strat_test_set)
for set_ in (strat_train_set, strat_test_set):
set_.drop("income_cat", axis=1, inplace=True)
# %%
# Discover and visualize the data to gain insights
housing = train_set.copy()
plot_housing_price_distribution(housing)
# %%
# correlation
corr_matrix = housing.corr()
corr_matrix['median_house_value'].sort_values(ascending=False)
attributes = ["median_house_value", "median_income", "total_rooms", "housing_median_age"]
scatter_matrix(housing[attributes], figsize=(12, 8))
save_fig("scatter_matrix_plot")
#%%
housing.loc[:, 'rooms_per_household'] = housing.total_rooms / housing.households
housing.loc[:, 'bedrooms_per_room'] = housing.total_bedrooms / housing.total_rooms
housing.loc[:, 'population_per_household'] = housing.population / housing.households
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
housing.plot(kind="scatter", x="rooms_per_household", y="median_house_value",
alpha=0.2)
plt.axis([0, 5, 0, 520000])
plt.show()
housing.describe()
# %%
"""Prepare the data for Machine Learning algorithms"""
housing = strat_train_set.drop("median_house_value", axis=1) # drop labels for training set
housing_labels = strat_train_set["median_house_value"].copy()
housing_nums = housing.drop("ocean_proximity", axis=1)
imputer = SimpleImputer(strategy='median')
imputer.fit(housing_nums)
imputer.statistics_
X = imputer.transform(housing_nums)
housing_tr = pd.DataFrame(X, columns=housing_nums.columns, index=housing_nums.index)
housing_tr.head()
#%%
# preprocess the categorical input feature, `ocean_proximity`
housing_cat = housing[["ocean_proximity"]]
cat_encoder = OneHotEncoder(sparse=False)
cat_encoder.categories
housing_cat_1hot = cat_encoder.fit_transform(housing_cat)
housing_cat_1hot
# %%
# create a custom transformer to add extra attributes:
rooms_ix, bedrooms_ix, population_ix, households_ix = 3, 4, 5, 6
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self, add_bedrooms_per_room=True):
super().__init__()
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y = None):
return self
def transform(self, X):
rooms_per_household = X[:, rooms_ix] / X[:, households_ix]
population_per_household = X[:, population_ix] / X[:, households_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room]
return np.c_[X, rooms_per_household, population_per_household]
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attribs = attr_adder.transform(housing.values)
housing_extra_attribs = pd.DataFrame(
housing_extra_attribs,
columns=list(housing.columns) +["rooms_per_household", "population_per_household"],
index = housing.index
)
housing_extra_attribs.head()
# %%
# build a pipeline for preprocessing the numerical attributes
num_pipeline = Pipeline([
('imputer', SimpleImputer(strategy='median')),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler())
])
housing_num_tr = num_pipeline.fit_transform(housing_nums)
housing_num_tr
num_attribs = list(housing_nums)
cat_attribs = ['ocean_proximity']
full_pipeline = ColumnTransformer([
('num', num_pipeline, num_attribs),
('cat', OneHotEncoder(), cat_attribs)
])
housing_prepared = full_pipeline.fit_transform(housing)
housing_prepared.shape
# %%
""" Select and train a model """
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_labels)
some_data = housing.iloc[:5]
some_labels = housing_labels.iloc[:5]
some_data_prepared = full_pipeline.transform(some_data)
lin_reg.predict(some_data_prepared)
housing_predictions = lin_reg.predict(housing_prepared)
lin_mse = mean_squared_error(housing_predictions, housing_labels)
lin_rmse = np.sqrt(lin_mse)
lin_rmse
lin_mae = mean_absolute_error(housing_labels, housing_predictions)
lin_mae
# %%
tree_reg = DecisionTreeRegressor(random_state=42)
tree_reg.fit(housing_prepared, housing_labels)
housing_predictions = tree_reg.predict(housing_prepared)
tree_mse = mean_squared_error(housing_labels, housing_predictions)
tree_rmse = np.sqrt(tree_mse)
tree_rmse
scores = cross_val_score(tree_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
tree_rmse_scores = np.sqrt(-scores)
def display_scores(scores):
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
display_scores(tree_rmse_scores)
lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels,
scoring="neg_mean_squared_error", cv=10)
lin_rmse_scores = np.sqrt(-lin_scores)
display_scores(lin_rmse_scores)
#%%
forest_reg = RandomForestRegressor(n_estimators=100, random_state=42, n_jobs=-1)
forest_reg.fit(housing_prepared, housing_labels)
housing_predictions = forest_reg.predict(housing_prepared)
forest_mse = mean_squared_error(housing_labels, housing_predictions)
forest_rmse = np.sqrt(forest_mse)
forest_rmse
forest_scores = cross_val_score(forest_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
forest_rmse_scores = np.sqrt(-forest_scores)
display_scores(forest_rmse_scores)
# %%
scores = cross_val_score(lin_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
pd.Series(np.sqrt(-scores)).describe()
# %%
svm_reg = SVR(kernel='linear')
svm_reg.fit(housing_prepared, housing_labels)
housing_predictions = svm_reg.predict(housing_prepared)
svm_mse = mean_squared_error(housing_labels, housing_predictions)
svm_rmse = np.sqrt(svm_mse)
svm_rmse
# %%
# GridSearch
param_grid = [
{'n_estimators':[3, 10, 30], 'max_features':[2,4,6,8]},
{'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]},
]
forest_reg = RandomForestRegressor(random_state=42)
grid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring='neg_mean_squared_error', return_train_score=True, n_jobs=-1)
grid_search.fit(housing_prepared, housing_labels)
grid_search.best_params_
grid_search.best_estimator_
# each hyperparameter combination tested during the grid search
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
# %%
# RandomizedSearchCV
param_distribs = {
'n_estimators': randint(low=1, high=200),
'max_features': randint(low=1, high=8),
}
forest_reg = RandomForestRegressor(random_state=42)
rnd_search = RandomizedSearchCV(forest_reg, param_distribs, n_iter=100, cv=5, scoring='neg_mean_squared_error', random_state=42, n_jobs=-1)
rnd_search.fit(housing_prepared, housing_labels)
# %%
cvres = rnd_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
# %%
feature_importances = grid_search.best_estimator_.feature_importances_
feature_importances
# %%
extra_attribs = ["rooms_per_hhold", "pop_per_hhold", "bedrooms_per_room"]
#cat_encoder = cat_pipeline.named_steps["cat_encoder"] # old solution
cat_encoder = full_pipeline.named_transformers_["cat"]
cat_one_hot_attribs = list(cat_encoder.categories_[0])
attributes = num_attribs + extra_attribs + cat_one_hot_attribs
sorted(zip(feature_importances, attributes), reverse=True)
# %%
| [
"sklearn.model_selection.StratifiedShuffleSplit",
"sklearn.model_selection.GridSearchCV",
"tarfile.open",
"numpy.sqrt",
"sklearn.tree.DecisionTreeRegressor",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.rc",
"scipy.stats.randint",
"sklearn.ensemble.RandomForestRegressor",
"urllib.r... | [((344, 377), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (367, 377), False, 'import warnings\n'), ((379, 407), 'matplotlib.rc', 'mpl.rc', (['"""axes"""'], {'labelsize': '(14)'}), "('axes', labelsize=14)\n", (385, 407), True, 'import matplotlib as mpl\n'), ((408, 437), 'matplotlib.rc', 'mpl.rc', (['"""xtick"""'], {'labelsize': '(12)'}), "('xtick', labelsize=12)\n", (414, 437), True, 'import matplotlib as mpl\n'), ((438, 467), 'matplotlib.rc', 'mpl.rc', (['"""ytick"""'], {'labelsize': '(12)'}), "('ytick', labelsize=12)\n", (444, 467), True, 'import matplotlib as mpl\n'), ((569, 622), 'os.path.join', 'os.path.join', (['PROJECT_ROOT_DIR', '""".images"""', 'CHAPTER_ID'], {}), "(PROJECT_ROOT_DIR, '.images', CHAPTER_ID)\n", (581, 622), False, 'import os\n'), ((623, 662), 'os.makedirs', 'os.makedirs', (['IMAGES_PATH'], {'exist_ok': '(True)'}), '(IMAGES_PATH, exist_ok=True)\n', (634, 662), False, 'import os\n'), ((758, 792), 'os.path.join', 'os.path.join', (['"""../data"""', '"""housing"""'], {}), "('../data', 'housing')\n", (770, 792), False, 'import os\n'), ((4401, 4501), 'pandas.cut', 'pd.cut', (["housing['median_income']"], {'bins': '[0.0, 1.5, 3.0, 4.5, 6.0, np.inf]', 'labels': '[1, 2, 3, 4, 5]'}), "(housing['median_income'], bins=[0.0, 1.5, 3.0, 4.5, 6.0, np.inf],\n labels=[1, 2, 3, 4, 5])\n", (4407, 4501), True, 'import pandas as pd\n'), ((4605, 4671), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(1)', 'test_size': '(0.2)', 'random_state': '(42)'}), '(n_splits=1, test_size=0.2, random_state=42)\n', (4627, 4671), False, 'from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, cross_val_score, GridSearchCV, RandomizedSearchCV\n'), ((5403, 5455), 'pandas.plotting.scatter_matrix', 'scatter_matrix', (['housing[attributes]'], {'figsize': '(12, 8)'}), '(housing[attributes], figsize=(12, 8))\n', (5417, 5455), False, 'from pandas.plotting import scatter_matrix\n'), ((5951, 5978), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 5, 0, 520000]'], {}), '([0, 5, 0, 520000])\n', (5959, 5978), True, 'import matplotlib.pyplot as plt\n'), ((5979, 5989), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5987, 5989), True, 'import matplotlib.pyplot as plt\n'), ((6293, 6325), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""median"""'}), "(strategy='median')\n", (6306, 6325), False, 'from sklearn.impute import SimpleImputer\n'), ((6422, 6493), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': 'housing_nums.columns', 'index': 'housing_nums.index'}), '(X, columns=housing_nums.columns, index=housing_nums.index)\n', (6434, 6493), True, 'import pandas as pd\n'), ((6639, 6666), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (6652, 6666), False, 'from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler\n'), ((8592, 8610), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (8608, 8610), False, 'from sklearn.linear_model import LinearRegression\n'), ((8885, 8940), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['housing_predictions', 'housing_labels'], {}), '(housing_predictions, housing_labels)\n', (8903, 8940), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((8952, 8968), 'numpy.sqrt', 'np.sqrt', (['lin_mse'], {}), '(lin_mse)\n', (8959, 8968), True, 'import numpy as np\n'), ((8990, 9046), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['housing_labels', 'housing_predictions'], {}), '(housing_labels, housing_predictions)\n', (9009, 9046), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((9072, 9110), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'random_state': '(42)'}), '(random_state=42)\n', (9093, 9110), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((9227, 9282), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['housing_labels', 'housing_predictions'], {}), '(housing_labels, housing_predictions)\n', (9245, 9282), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((9295, 9312), 'numpy.sqrt', 'np.sqrt', (['tree_mse'], {}), '(tree_mse)\n', (9302, 9312), True, 'import numpy as np\n'), ((9333, 9438), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['tree_reg', 'housing_prepared', 'housing_labels'], {'scoring': '"""neg_mean_squared_error"""', 'cv': '(10)'}), "(tree_reg, housing_prepared, housing_labels, scoring=\n 'neg_mean_squared_error', cv=10)\n", (9348, 9438), False, 'from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, cross_val_score, GridSearchCV, RandomizedSearchCV\n'), ((9453, 9469), 'numpy.sqrt', 'np.sqrt', (['(-scores)'], {}), '(-scores)\n', (9460, 9469), True, 'import numpy as np\n'), ((9658, 9762), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['lin_reg', 'housing_prepared', 'housing_labels'], {'scoring': '"""neg_mean_squared_error"""', 'cv': '(10)'}), "(lin_reg, housing_prepared, housing_labels, scoring=\n 'neg_mean_squared_error', cv=10)\n", (9673, 9762), False, 'from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, cross_val_score, GridSearchCV, RandomizedSearchCV\n'), ((9805, 9825), 'numpy.sqrt', 'np.sqrt', (['(-lin_scores)'], {}), '(-lin_scores)\n', (9812, 9825), True, 'import numpy as np\n'), ((9877, 9944), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(100)', 'random_state': '(42)', 'n_jobs': '(-1)'}), '(n_estimators=100, random_state=42, n_jobs=-1)\n', (9898, 9944), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((10067, 10122), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['housing_labels', 'housing_predictions'], {}), '(housing_labels, housing_predictions)\n', (10085, 10122), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((10137, 10156), 'numpy.sqrt', 'np.sqrt', (['forest_mse'], {}), '(forest_mse)\n', (10144, 10156), True, 'import numpy as np\n'), ((10186, 10293), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['forest_reg', 'housing_prepared', 'housing_labels'], {'scoring': '"""neg_mean_squared_error"""', 'cv': '(10)'}), "(forest_reg, housing_prepared, housing_labels, scoring=\n 'neg_mean_squared_error', cv=10)\n", (10201, 10293), False, 'from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, cross_val_score, GridSearchCV, RandomizedSearchCV\n'), ((10310, 10333), 'numpy.sqrt', 'np.sqrt', (['(-forest_scores)'], {}), '(-forest_scores)\n', (10317, 10333), True, 'import numpy as np\n'), ((10385, 10489), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['lin_reg', 'housing_prepared', 'housing_labels'], {'scoring': '"""neg_mean_squared_error"""', 'cv': '(10)'}), "(lin_reg, housing_prepared, housing_labels, scoring=\n 'neg_mean_squared_error', cv=10)\n", (10400, 10489), False, 'from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, cross_val_score, GridSearchCV, RandomizedSearchCV\n'), ((10540, 10560), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (10543, 10560), False, 'from sklearn.svm import SVR\n'), ((10674, 10729), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['housing_labels', 'housing_predictions'], {}), '(housing_labels, housing_predictions)\n', (10692, 10729), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((10741, 10757), 'numpy.sqrt', 'np.sqrt', (['svm_mse'], {}), '(svm_mse)\n', (10748, 10757), True, 'import numpy as np\n'), ((10958, 10996), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': '(42)'}), '(random_state=42)\n', (10979, 10996), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((11011, 11127), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['forest_reg', 'param_grid'], {'cv': '(5)', 'scoring': '"""neg_mean_squared_error"""', 'return_train_score': '(True)', 'n_jobs': '(-1)'}), "(forest_reg, param_grid, cv=5, scoring='neg_mean_squared_error',\n return_train_score=True, n_jobs=-1)\n", (11023, 11127), False, 'from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, cross_val_score, GridSearchCV, RandomizedSearchCV\n'), ((11591, 11629), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': '(42)'}), '(random_state=42)\n', (11612, 11629), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((11643, 11774), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['forest_reg', 'param_distribs'], {'n_iter': '(100)', 'cv': '(5)', 'scoring': '"""neg_mean_squared_error"""', 'random_state': '(42)', 'n_jobs': '(-1)'}), "(forest_reg, param_distribs, n_iter=100, cv=5, scoring=\n 'neg_mean_squared_error', random_state=42, n_jobs=-1)\n", (11661, 11774), False, 'from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, cross_val_score, GridSearchCV, RandomizedSearchCV\n'), ((1685, 1740), 'os.path.join', 'os.path.join', (['IMAGES_PATH', "(fig_id + '.' + fig_extension)"], {}), "(IMAGES_PATH, fig_id + '.' + fig_extension)\n", (1697, 1740), False, 'import os\n'), ((1829, 1884), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'format': 'fig_extension', 'dpi': 'resolution'}), '(path, format=fig_extension, dpi=resolution)\n', (1840, 1884), True, 'import matplotlib.pyplot as plt\n'), ((2061, 2102), 'os.path.join', 'os.path.join', (['housing_path', '"""housing.tgz"""'], {}), "(housing_path, 'housing.tgz')\n", (2073, 2102), False, 'import os\n'), ((2107, 2156), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['housing_url', 'tgz_path'], {}), '(housing_url, tgz_path)\n', (2133, 2156), False, 'import urllib\n'), ((2175, 2197), 'tarfile.open', 'tarfile.open', (['tgz_path'], {}), '(tgz_path)\n', (2187, 2197), False, 'import tarfile\n'), ((2335, 2376), 'os.path.join', 'os.path.join', (['housing_path', '"""housing.csv"""'], {}), "(housing_path, 'housing.csv')\n", (2347, 2376), False, 'import os\n'), ((2388, 2409), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (2399, 2409), True, 'import pandas as pd\n'), ((2494, 2556), 'os.path.join', 'os.path.join', (['PROJECT_ROOT_DIR', '"""images"""', '"""end_to_end_project"""'], {}), "(PROJECT_ROOT_DIR, 'images', 'end_to_end_project')\n", (2506, 2556), False, 'import os\n'), ((2561, 2600), 'os.makedirs', 'os.makedirs', (['images_path'], {'exist_ok': '(True)'}), '(images_path, exist_ok=True)\n', (2572, 2600), False, 'import os\n'), ((3484, 3519), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Latitude"""'], {'fontsize': '(14)'}), "('Latitude', fontsize=14)\n", (3494, 3519), True, 'import matplotlib.pyplot as plt\n'), ((3524, 3560), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Longitude"""'], {'fontsize': '(14)'}), "('Longitude', fontsize=14)\n", (3534, 3560), True, 'import matplotlib.pyplot as plt\n'), ((3871, 3894), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(16)'}), '(fontsize=16)\n', (3881, 3894), True, 'import matplotlib.pyplot as plt\n'), ((3946, 3956), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3954, 3956), True, 'import matplotlib.pyplot as plt\n'), ((4302, 4359), 'sklearn.model_selection.train_test_split', 'train_test_split', (['housing'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(housing, test_size=0.2, random_state=42)\n', (4318, 4359), False, 'from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, cross_val_score, GridSearchCV, RandomizedSearchCV\n'), ((11505, 11529), 'scipy.stats.randint', 'randint', ([], {'low': '(1)', 'high': '(200)'}), '(low=1, high=200)\n', (11512, 11529), False, 'from scipy.stats import randint\n'), ((11551, 11573), 'scipy.stats.randint', 'randint', ([], {'low': '(1)', 'high': '(8)'}), '(low=1, high=8)\n', (11558, 11573), False, 'from scipy.stats import randint\n'), ((1801, 1819), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1817, 1819), True, 'import matplotlib.pyplot as plt\n'), ((1978, 2005), 'os.path.isdir', 'os.path.isdir', (['housing_path'], {}), '(housing_path)\n', (1991, 2005), False, 'import os\n'), ((2015, 2040), 'os.makedirs', 'os.makedirs', (['housing_path'], {}), '(housing_path)\n', (2026, 2040), False, 'import os\n'), ((2853, 2888), 'os.path.join', 'os.path.join', (['images_path', 'filename'], {}), '(images_path, filename)\n', (2865, 2888), False, 'import os\n'), ((2993, 3036), 'os.path.join', 'os.path.join', (['IMAGES_PATH', '"""california.png"""'], {}), "(IMAGES_PATH, 'california.png')\n", (3005, 3036), False, 'import os\n'), ((3205, 3224), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (3217, 3224), True, 'import matplotlib.pyplot as plt\n'), ((11409, 11429), 'numpy.sqrt', 'np.sqrt', (['(-mean_score)'], {}), '(-mean_score)\n', (11416, 11429), True, 'import numpy as np\n'), ((11941, 11961), 'numpy.sqrt', 'np.sqrt', (['(-mean_score)'], {}), '(-mean_score)\n', (11948, 11961), True, 'import numpy as np\n'), ((3458, 3477), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (3470, 3477), True, 'import matplotlib.pyplot as plt\n'), ((8070, 8102), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""median"""'}), "(strategy='median')\n", (8083, 8102), False, 'from sklearn.impute import SimpleImputer\n'), ((8174, 8190), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (8188, 8190), False, 'from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler\n'), ((8427, 8442), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (8440, 8442), False, 'from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler\n'), ((10495, 10511), 'numpy.sqrt', 'np.sqrt', (['(-scores)'], {}), '(-scores)\n', (10502, 10511), True, 'import numpy as np\n')] |
import getpass
import os
from PIL import Image
from tqdm import tqdm
import astropy.units as u
import numpy as np
from astropy.coordinates import SkyCoord
import splusdata
from splus_ifusci import SCube, make_RGB_with_overlay
def make_large_galaxies(conn):
galaxies = ["NGC1365", "FCC37", "ARP244", "quasar_3C273", 'NGC4030',
"NGC3464",
"NGC3314A",
"NGC3511",
"NGC0428", "NGC7089", "HydraCluster"]
coordinates = np.array([[53.40166, -36.140277],
[51.33458, -36.385],
[180.47208, -18.87694],
[187.2779, 2.0525],
[180.0986, -1.1002],
[163.6666945533915, -21.06532985884086],
[159.30363493989583, -27.683956942836808],
[165.8485378153153, -23.083694639214354],
[18.23199, 0.98148],
[323.3625, -0.823333],
[159.17416, -27.525444]]) * u.degree
sizes = [600] * len(galaxies)
sizes[0] = 1800
sizes[1] = 900
sizes[2] = 2400
sizes[-2] = 2400
sizes[-1] = 2400
for i in tqdm(range(len(galaxies)), desc="Processing objects"):
galaxy = galaxies[i]
coords = SkyCoord(*coordinates[i])
size = sizes[i]
scube = SCube(galaxy, coords, size, conn=conn,
coord_unit=(u.hourangle, u.degree))
scube.download_stamps(redo=False)
scube.make_cube()
halpha, halpha_err = scube.calc_halpha()
# Making RGB image
flam = scube.get_flam().value
rgb_bands = ["I", "R", "G"]
rgb = [flam[scube.bands.index(b)] for b in rgb_bands]
outimg = f"{galaxy}_{size}x{size}_RGB.jpg"
make_RGB_with_overlay(*rgb, outimg, overlay=halpha.value)
img = Image.open(outimg)
# Including logo
logo = Image.open("splus_logo.gif").convert("RGBA")
l, h = logo.size
ln = int(img.size[0] / 3.)
hn = int(ln * h / l)
logon = logo.resize((ln, hn))
img.paste(logon, (img.size[0]-ln, img.size[1]-hn), logon)
img.save(outimg.replace(".jpg", "_logo.jpg"))
if __name__ == "__main__":
#Connect with S-PLUS
username = getpass.getuser() # Change to your S-PLUS username
password = getpass.getpass(f"Password for {username}:")
conn = splusdata.connect(username, password)
# conn = None
make_large_galaxies(conn)
| [
"PIL.Image.open",
"splusdata.connect",
"astropy.coordinates.SkyCoord",
"getpass.getpass",
"splus_ifusci.make_RGB_with_overlay",
"numpy.array",
"getpass.getuser",
"splus_ifusci.SCube"
] | [((2304, 2321), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (2319, 2321), False, 'import getpass\n'), ((2370, 2414), 'getpass.getpass', 'getpass.getpass', (['f"""Password for {username}:"""'], {}), "(f'Password for {username}:')\n", (2385, 2414), False, 'import getpass\n'), ((2426, 2463), 'splusdata.connect', 'splusdata.connect', (['username', 'password'], {}), '(username, password)\n', (2443, 2463), False, 'import splusdata\n'), ((486, 819), 'numpy.array', 'np.array', (['[[53.40166, -36.140277], [51.33458, -36.385], [180.47208, -18.87694], [\n 187.2779, 2.0525], [180.0986, -1.1002], [163.6666945533915, -\n 21.06532985884086], [159.30363493989583, -27.683956942836808], [\n 165.8485378153153, -23.083694639214354], [18.23199, 0.98148], [323.3625,\n -0.823333], [159.17416, -27.525444]]'], {}), '([[53.40166, -36.140277], [51.33458, -36.385], [180.47208, -\n 18.87694], [187.2779, 2.0525], [180.0986, -1.1002], [163.6666945533915,\n -21.06532985884086], [159.30363493989583, -27.683956942836808], [\n 165.8485378153153, -23.083694639214354], [18.23199, 0.98148], [323.3625,\n -0.823333], [159.17416, -27.525444]])\n', (494, 819), True, 'import numpy as np\n'), ((1309, 1334), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['*coordinates[i]'], {}), '(*coordinates[i])\n', (1317, 1334), False, 'from astropy.coordinates import SkyCoord\n'), ((1375, 1449), 'splus_ifusci.SCube', 'SCube', (['galaxy', 'coords', 'size'], {'conn': 'conn', 'coord_unit': '(u.hourangle, u.degree)'}), '(galaxy, coords, size, conn=conn, coord_unit=(u.hourangle, u.degree))\n', (1380, 1449), False, 'from splus_ifusci import SCube, make_RGB_with_overlay\n'), ((1811, 1868), 'splus_ifusci.make_RGB_with_overlay', 'make_RGB_with_overlay', (['*rgb', 'outimg'], {'overlay': 'halpha.value'}), '(*rgb, outimg, overlay=halpha.value)\n', (1832, 1868), False, 'from splus_ifusci import SCube, make_RGB_with_overlay\n'), ((1883, 1901), 'PIL.Image.open', 'Image.open', (['outimg'], {}), '(outimg)\n', (1893, 1901), False, 'from PIL import Image\n'), ((1943, 1971), 'PIL.Image.open', 'Image.open', (['"""splus_logo.gif"""'], {}), "('splus_logo.gif')\n", (1953, 1971), False, 'from PIL import Image\n')] |
"""Video Super-resolution dataset."""
import os
import random
from PIL import Image
import numpy as np
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import common.modes
import datasets._isr
def update_argparser(parser):
datasets._isr.update_argparser(parser)
parser.add_argument(
'--train_temporal_size',
help='Number of frames for training',
default=5,
type=int)
parser.add_argument(
'--eval_temporal_size',
help='Number of frames for evaluation',
default=5,
type=int)
parser.add_argument(
'--train_temporal_padding_size',
help='Number of frames for training',
default=3,
type=int)
parser.add_argument(
'--eval_temporal_padding_size',
help='Number of frames for evaluation',
default=3,
type=int)
parser.set_defaults(
train_batch_size=16,
eval_batch_size=1,
)
class _SingleVideoSuperResolutionDataset(data.Dataset):
def __init__(self, mode, params, video_name, lr_files, hr_files):
super(_SingleVideoSuperResolutionDataset, self).__init__()
self.mode = mode
self.params = params
self.video_name = video_name
self.lr_files = lr_files
self.hr_files = hr_files
self.temporal_size = {
common.modes.TRAIN: params.train_temporal_size,
common.modes.EVAL: params.eval_temporal_size,
common.modes.PREDICT: params.eval_temporal_size,
}[mode]
self.temporal_padding_size = {
common.modes.TRAIN: params.train_temporal_padding_size,
common.modes.EVAL: params.eval_temporal_padding_size,
common.modes.PREDICT: params.eval_temporal_padding_size,
}[mode]
def __getitem__(self, index):
t = index * self.temporal_size
lr_files = [
self.lr_files[min(len(self.lr_files) - 1, max(0, i))]
for i in range(t - self.temporal_padding_size, t + self.temporal_size +
self.temporal_padding_size)
]
hr_files = [self.hr_files[i] for i in range(t, t + self.temporal_size)]
if self.mode == common.modes.PREDICT:
lr_images = [
transforms.functional.to_tensor(np.asarray(Image.open(lr_file[1])))
for lr_file in lr_files
]
lr_images = torch.stack(lr_images, dim=1)
hr_files = [hr_file[0] for hr_file in hr_files]
return lr_images, hr_files
lr_images, hr_images = self._load_item(lr_files, hr_files)
lr_images, hr_images = self._sample_patch(lr_images, hr_images)
lr_images, hr_images = self._augment(lr_images, hr_images)
lr_images = [np.ascontiguousarray(lr_image) for lr_image in lr_images]
hr_images = [np.ascontiguousarray(hr_image) for hr_image in hr_images]
lr_images = [
transforms.functional.to_tensor(lr_image) for lr_image in lr_images
]
hr_images = [
transforms.functional.to_tensor(hr_image) for hr_image in hr_images
]
lr_images = torch.stack(lr_images, dim=1)
hr_images = torch.stack(hr_images, dim=1)
return lr_images, hr_images
def _load_item(self, lr_files, hr_files):
lr_images = [np.asarray(Image.open(lr_file[1])) for lr_file in lr_files]
hr_images = [np.asarray(Image.open(hr_file[1])) for hr_file in hr_files]
return lr_images, hr_images
def _sample_patch(self, lr_images, hr_images):
if self.mode == common.modes.TRAIN:
# sample patch while training
x = random.randrange(
self.params.ignored_boundary_size, lr_images[0].shape[0] -
self.params.lr_patch_size + 1 - self.params.ignored_boundary_size)
y = random.randrange(
self.params.ignored_boundary_size, lr_images[0].shape[1] -
self.params.lr_patch_size + 1 - self.params.ignored_boundary_size)
lr_images = [
lr_image[x:x + self.params.lr_patch_size, y:y +
self.params.lr_patch_size] for lr_image in lr_images
]
hr_images = [
hr_image[x * self.params.scale:(x + self.params.lr_patch_size) *
self.params.scale, y *
self.params.scale:(y + self.params.lr_patch_size) *
self.params.scale] for hr_image in hr_images
]
return lr_images, hr_images
def _augment(self, lr_images, hr_images):
if self.mode == common.modes.TRAIN:
# augmentation while training
if random.random() < 0.5:
lr_images = [lr_image[::-1] for lr_image in lr_images]
hr_images = [hr_image[::-1] for hr_image in hr_images]
if random.random() < 0.5:
lr_images = [lr_image[:, ::-1] for lr_image in lr_images]
hr_images = [hr_image[:, ::-1] for hr_image in hr_images]
if random.random() < 0.5:
lr_images = [np.swapaxes(lr_image, 0, 1) for lr_image in lr_images]
hr_images = [np.swapaxes(hr_image, 0, 1) for hr_image in hr_images]
if random.random() < 0.5:
lr_images = reversed(lr_images)
hr_images = reversed(hr_images)
return lr_images, hr_images
def __len__(self):
if len(self.hr_files) % self.temporal_size:
raise NotImplementedError
return len(self.hr_files) // self.temporal_size
class VideoSuperResolutionDataset(data.ConcatDataset):
def __init__(self, mode, params, lr_files, hr_files):
video_datasets = []
for (v, l), (_, h) in zip(lr_files, hr_files):
video_datasets.append(
_SingleVideoSuperResolutionDataset(mode, params, v, l, h))
if mode == common.modes.TRAIN:
video_datasets = video_datasets * params.num_patches
super(VideoSuperResolutionDataset, self).__init__(video_datasets)
class _SingleVideoSuperResolutionHDF5Dataset(_SingleVideoSuperResolutionDataset
):
def __init__(
self,
mode,
params,
video_name,
lr_files,
hr_files,
lr_cache_file,
hr_cache_file,
lib_hdf5='h5py',
init_hdf5=False,
):
super(_SingleVideoSuperResolutionHDF5Dataset, self).__init__(
mode,
params,
video_name,
lr_files,
hr_files,
)
self.lr_cache_file = common.io.Hdf5(lr_cache_file, lib_hdf5)
self.hr_cache_file = common.io.Hdf5(hr_cache_file, lib_hdf5)
if init_hdf5:
cache_dir = os.path.dirname(lr_cache_file)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
for lr_file in self.lr_files:
self.lr_cache_file.add(lr_file[0], np.asarray(Image.open(lr_file[1])))
if self.mode != common.modes.PREDICT:
for hr_file in self.hr_files:
self.hr_cache_file.add(hr_file[0], np.asarray(Image.open(hr_file[1])))
def _load_item(self, lr_files, hr_files):
lr_images = [self.lr_cache_file.get(lr_file[0]) for lr_file in lr_files]
hr_images = [self.hr_cache_file.get(hr_file[0]) for hr_file in hr_files]
return lr_images, hr_images
class VideoSuperResolutionHDF5Dataset(data.ConcatDataset):
def __init__(
self,
mode,
params,
lr_files,
hr_files,
lr_cache_file,
hr_cache_file,
lib_hdf5='h5py',
):
video_datasets = []
init_hdf5 = not os.path.exists(lr_cache_file)
for (v, l), (_, h) in zip(lr_files, hr_files):
video_datasets.append(
_SingleVideoSuperResolutionHDF5Dataset(
mode,
params,
v,
l,
h,
lr_cache_file,
hr_cache_file,
lib_hdf5=lib_hdf5,
init_hdf5=init_hdf5))
if mode == common.modes.TRAIN:
video_datasets = video_datasets * params.num_patches
super(VideoSuperResolutionHDF5Dataset, self).__init__(video_datasets)
| [
"torchvision.transforms.functional.to_tensor",
"os.path.exists",
"PIL.Image.open",
"os.makedirs",
"random.randrange",
"torch.stack",
"numpy.ascontiguousarray",
"numpy.swapaxes",
"os.path.dirname",
"random.random"
] | [((2940, 2969), 'torch.stack', 'torch.stack', (['lr_images'], {'dim': '(1)'}), '(lr_images, dim=1)\n', (2951, 2969), False, 'import torch\n'), ((2986, 3015), 'torch.stack', 'torch.stack', (['hr_images'], {'dim': '(1)'}), '(hr_images, dim=1)\n', (2997, 3015), False, 'import torch\n'), ((2261, 2290), 'torch.stack', 'torch.stack', (['lr_images'], {'dim': '(1)'}), '(lr_images, dim=1)\n', (2272, 2290), False, 'import torch\n'), ((2591, 2621), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['lr_image'], {}), '(lr_image)\n', (2611, 2621), True, 'import numpy as np\n'), ((2666, 2696), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['hr_image'], {}), '(hr_image)\n', (2686, 2696), True, 'import numpy as np\n'), ((2750, 2791), 'torchvision.transforms.functional.to_tensor', 'transforms.functional.to_tensor', (['lr_image'], {}), '(lr_image)\n', (2781, 2791), True, 'import torchvision.transforms as transforms\n'), ((2850, 2891), 'torchvision.transforms.functional.to_tensor', 'transforms.functional.to_tensor', (['hr_image'], {}), '(hr_image)\n', (2881, 2891), True, 'import torchvision.transforms as transforms\n'), ((3416, 3562), 'random.randrange', 'random.randrange', (['self.params.ignored_boundary_size', '(lr_images[0].shape[0] - self.params.lr_patch_size + 1 - self.params.\n ignored_boundary_size)'], {}), '(self.params.ignored_boundary_size, lr_images[0].shape[0] -\n self.params.lr_patch_size + 1 - self.params.ignored_boundary_size)\n', (3432, 3562), False, 'import random\n'), ((3590, 3736), 'random.randrange', 'random.randrange', (['self.params.ignored_boundary_size', '(lr_images[0].shape[1] - self.params.lr_patch_size + 1 - self.params.\n ignored_boundary_size)'], {}), '(self.params.ignored_boundary_size, lr_images[0].shape[1] -\n self.params.lr_patch_size + 1 - self.params.ignored_boundary_size)\n', (3606, 3736), False, 'import random\n'), ((6252, 6282), 'os.path.dirname', 'os.path.dirname', (['lr_cache_file'], {}), '(lr_cache_file)\n', (6267, 6282), False, 'import os\n'), ((7126, 7155), 'os.path.exists', 'os.path.exists', (['lr_cache_file'], {}), '(lr_cache_file)\n', (7140, 7155), False, 'import os\n'), ((3122, 3144), 'PIL.Image.open', 'Image.open', (['lr_file[1]'], {}), '(lr_file[1])\n', (3132, 3144), False, 'from PIL import Image\n'), ((3199, 3221), 'PIL.Image.open', 'Image.open', (['hr_file[1]'], {}), '(hr_file[1])\n', (3209, 3221), False, 'from PIL import Image\n'), ((4354, 4369), 'random.random', 'random.random', ([], {}), '()\n', (4367, 4369), False, 'import random\n'), ((4512, 4527), 'random.random', 'random.random', ([], {}), '()\n', (4525, 4527), False, 'import random\n'), ((4676, 4691), 'random.random', 'random.random', ([], {}), '()\n', (4689, 4691), False, 'import random\n'), ((4860, 4875), 'random.random', 'random.random', ([], {}), '()\n', (4873, 4875), False, 'import random\n'), ((6296, 6321), 'os.path.exists', 'os.path.exists', (['cache_dir'], {}), '(cache_dir)\n', (6310, 6321), False, 'import os\n'), ((6331, 6353), 'os.makedirs', 'os.makedirs', (['cache_dir'], {}), '(cache_dir)\n', (6342, 6353), False, 'import os\n'), ((4720, 4747), 'numpy.swapaxes', 'np.swapaxes', (['lr_image', '(0)', '(1)'], {}), '(lr_image, 0, 1)\n', (4731, 4747), True, 'import numpy as np\n'), ((4796, 4823), 'numpy.swapaxes', 'np.swapaxes', (['hr_image', '(0)', '(1)'], {}), '(hr_image, 0, 1)\n', (4807, 4823), True, 'import numpy as np\n'), ((2176, 2198), 'PIL.Image.open', 'Image.open', (['lr_file[1]'], {}), '(lr_file[1])\n', (2186, 2198), False, 'from PIL import Image\n'), ((6445, 6467), 'PIL.Image.open', 'Image.open', (['lr_file[1]'], {}), '(lr_file[1])\n', (6455, 6467), False, 'from PIL import Image\n'), ((6608, 6630), 'PIL.Image.open', 'Image.open', (['hr_file[1]'], {}), '(hr_file[1])\n', (6618, 6630), False, 'from PIL import Image\n')] |
import matplotlib.pyplot as plt
import numpy as np
filename = "Lab_1/sum.txt"
data = np.loadtxt(filename, delimiter=',', dtype='float')
t_sum, nthreads, time1, count1, n_sum, time2, count2 = data.T
threads = [[], [], [], [], [], [], [], [], [], []]
counts = [[], [], [], [], [], [], [], [], [], []]
for i in range(0, len(nthreads)):
th = int(nthreads[i] - 1)
threads[th].append(time1[i])
counts[th].append(count1[i])
plt.figure()
for i in range(0, 10):
plt.plot(counts[i], threads[i])
plt.legend(["1 thread", "2 threads", "3 threads", "4 threads", "5 threads", "6 threads", "7 threads", "8 threads", "9 threads", "10 threads"])
plt.show()
| [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((88, 138), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'delimiter': '""","""', 'dtype': '"""float"""'}), "(filename, delimiter=',', dtype='float')\n", (98, 138), True, 'import numpy as np\n'), ((428, 440), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (438, 440), True, 'import matplotlib.pyplot as plt\n'), ((500, 646), 'matplotlib.pyplot.legend', 'plt.legend', (["['1 thread', '2 threads', '3 threads', '4 threads', '5 threads',\n '6 threads', '7 threads', '8 threads', '9 threads', '10 threads']"], {}), "(['1 thread', '2 threads', '3 threads', '4 threads', '5 threads',\n '6 threads', '7 threads', '8 threads', '9 threads', '10 threads'])\n", (510, 646), True, 'import matplotlib.pyplot as plt\n'), ((643, 653), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (651, 653), True, 'import matplotlib.pyplot as plt\n'), ((466, 497), 'matplotlib.pyplot.plot', 'plt.plot', (['counts[i]', 'threads[i]'], {}), '(counts[i], threads[i])\n', (474, 497), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import math
def random_Cauchy(mu, S):
y = np.random.uniform(0, 1, S)
return mu*np.tan(np.pi * (y-0.5))
def Cauchy(x, mu, bias=0):
return 1 / np.pi * mu / (mu**2 + (x-bias)**2)
def Laplacian(x, b, bias=0):
return 1 / (2 * b) * np.exp(-np.abs(x-bias)/b)
def Exp(x, round=0):
if round > 0:
val = 0
for i in range(round):
val += np.power(x, i) / math.factorial(i)
return val
else:
return np.exp(x) | [
"numpy.abs",
"numpy.tan",
"numpy.power",
"math.factorial",
"numpy.exp",
"numpy.random.uniform"
] | [((66, 92), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'S'], {}), '(0, 1, S)\n', (83, 92), True, 'import numpy as np\n'), ((107, 132), 'numpy.tan', 'np.tan', (['(np.pi * (y - 0.5))'], {}), '(np.pi * (y - 0.5))\n', (113, 132), True, 'import numpy as np\n'), ((479, 488), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (485, 488), True, 'import numpy as np\n'), ((400, 414), 'numpy.power', 'np.power', (['x', 'i'], {}), '(x, i)\n', (408, 414), True, 'import numpy as np\n'), ((417, 434), 'math.factorial', 'math.factorial', (['i'], {}), '(i)\n', (431, 434), False, 'import math\n'), ((276, 292), 'numpy.abs', 'np.abs', (['(x - bias)'], {}), '(x - bias)\n', (282, 292), True, 'import numpy as np\n')] |
import sys
import unittest
import copy
import numpy as np
from scipy.linalg import block_diag
import pyinduct as pi
import pyinduct.hyperbolic.feedforward as hff
import pyinduct.parabolic as parabolic
import pyinduct.simulation as sim
from pyinduct.tests import show_plots
import pyqtgraph as pg
class SimpleInput(sim.SimulationInput):
"""
the simplest input we can imagine
"""
def __init__(self):
super().__init__("SimpleInput")
def _calc_output(self, **kwargs):
return 0
class MonotonousInput(sim.SimulationInput):
"""
an input that ramps up
"""
def __init__(self):
super().__init__("MonotonousInput")
def _calc_output(self, **kwargs):
t = kwargs["time"]
extra_data = np.sin(t)
if np.isclose(t % 2, 0):
extra_data = np.nan
return dict(output=kwargs["time"], extra_data=extra_data)
class CorrectInput(sim.SimulationInput):
"""
a diligent input
"""
def __init__(self, output, limits=(0, 1), der_order=0):
super().__init__(self)
self.out = np.ones(der_order + 1) * output
self.t_min, self.t_max = limits
def _calc_output(self, **kwargs):
if "time" not in kwargs:
raise ValueError("mandatory key not found!")
if "weights" not in kwargs:
raise ValueError("mandatory key not found!")
if "weight_lbl" not in kwargs:
raise ValueError("mandatory key not found!")
return dict(output=self.out)
class AlternatingInput(sim.SimulationInput):
"""
a simple alternating input, composed of smooth transitions
"""
def _calc_output(self, **kwargs):
t = kwargs["time"] % 2
if t < 1:
res = self.tr_up(t)
else:
res = self.tr_down(t)
return dict(output=res - .5)
def __init__(self):
super().__init__(self)
self.tr_up = pi.SmoothTransition(states=(0, 1),
interval=(0, 1),
method="poly")
self.tr_down = pi.SmoothTransition(states=(1, 0),
interval=(1, 2),
method="poly")
class SimulationInputTest(unittest.TestCase):
def setUp(self):
pass
def test_abstract_funcs(self):
# raise type error since abstract method is not implemented
self.assertRaises(TypeError, sim.SimulationInput)
# method implemented, should work
u = SimpleInput()
def test_call_arguments(self):
a = np.eye(2, 2)
b = np.array([[0], [1]])
u = CorrectInput(output=1, limits=(0, 1))
ic = np.zeros((2, 1))
ss = sim.StateSpace({1: a}, {0: {1: b}}, input_handle=u)
# if caller provides correct kwargs no exception should be raised
res = sim.simulate_state_space(ss, ic, pi.Domain((0, 1), num=10))
def test_storage(self):
a = np.eye(2, 2)
b = np.array([[0], [1]])
u = MonotonousInput()
ic = np.zeros((2, 1))
ss = sim.StateSpace(a, b, input_handle=u)
# run simulation to fill the internal storage
domain = pi.Domain((0, 10), num=11)
bigger_domain = pi.Domain((-1, 11), num=13)
res = sim.simulate_state_space(ss, ic, domain)
# don't return any entries that aren't there
self.assertRaises(KeyError, u.get_results, domain, "Unknown Entry")
# default key is "output"
ed = u.get_results(domain)
ed_explicit = u.get_results(domain, result_key="output")
self.assertTrue(np.array_equal(ed, ed_explicit))
# return an np.ndarray as default
self.assertIsInstance(ed, np.ndarray)
# return EvalData if corresponding flag is set
self.assertIsInstance(u.get_results(domain, as_eval_data=True),
pi.EvalData)
# if data has to be extrapolated, just repeat the last values
res = u.get_results(bigger_domain)
self.assertEqual(res[0], res[1])
self.assertEqual(res[-2], res[-1])
# nan values in the data storage should be ignored
res = u.get_results(bigger_domain, result_key="extra_data")
# storage contains values
self.assertTrue(u._time_storage)
self.assertTrue(u._value_storage)
# clear it
u.clear_cache()
# storage should be empty
self.assertFalse(u._time_storage)
self.assertFalse(u._value_storage)
# double clearing should work
u.clear_cache()
class CanonicalFormTest(unittest.TestCase):
def setUp(self):
self.cf = sim.CanonicalForm()
self.u = SimpleInput()
def test_add_to(self):
a = np.eye(5)
self.cf.add_to(dict(name="E", order=0, exponent=1), a)
self.assertTrue(np.array_equal(self.cf.matrices["E"][0][1], a))
self.cf.add_to(dict(name="E", order=0, exponent=1), 5 * a)
self.assertTrue(np.array_equal(self.cf.matrices["E"][0][1], 6 * a))
b = np.eye(10)
self.assertRaises(ValueError,
self.cf.add_to,
dict(name="E", order=0, exponent=1), b)
self.cf.add_to(dict(name="E", order=2, exponent=1), b)
self.assertTrue(np.array_equal(self.cf.matrices["E"][2][1], b))
f = np.atleast_2d(np.array(range(5))).T
self.assertRaises(ValueError,
self.cf.add_to,
dict(name="E", order=0, exponent=1), f)
self.cf.add_to(dict(name="f"), f)
self.assertTrue(np.array_equal(self.cf.matrices["f"], f))
# try to add something with derivative or exponent to f: value should
# end up in f
self.cf.add_to(dict(name="f"), f)
self.assertTrue(np.array_equal(self.cf.matrices["f"], 2 * f))
c = np.atleast_2d(np.array(range(5))).T
# that one should be easy
self.cf.add_to(dict(name="G", order=0, exponent=1), c, column=0)
self.assertTrue(np.array_equal(self.cf.matrices["G"][0][1], c))
# here G01 as to be expanded
self.cf.add_to(dict(name="G", order=0, exponent=1), c, column=1)
self.assertTrue(np.array_equal(self.cf.matrices["G"][0][1],
np.hstack((c, c))))
# here G01 as to be expanded again
self.cf.add_to(dict(name="G", order=0, exponent=1), c, column=3)
self.assertTrue(np.array_equal(self.cf.matrices["G"][0][1],
np.hstack((c, c, np.zeros_like(c), c))))
# input derivatives can occur
self.cf.add_to(dict(name="G", order=1, exponent=1), c, column=0)
self.assertTrue(np.array_equal(self.cf.matrices["G"][1][1], c))
# expansion should still work
self.cf.add_to(dict(name="G", order=1, exponent=1), c, column=1)
self.assertTrue(np.array_equal(self.cf.matrices["G"][1][1],
np.hstack((c, c))))
class ParseTest(unittest.TestCase):
def setUp(self):
# scalars
self.scalars = pi.Scalars(np.vstack(list(range(3))))
# callbacks
self.u = pi.ConstantTrajectory(7)
u1 = CorrectInput(output=1)
u2 = CorrectInput(output=2)
self.u_vec = pi.SimulationInputVector([u1, u2])
self.u_dt = CorrectInput(output=1, der_order=1)
u1_dt = CorrectInput(output=1, der_order=1)
u2_dt = CorrectInput(output=2, der_order=1)
self.u_vec_dt = pi.SimulationInputVector([u1_dt, u2_dt])
# inputs
self.input = pi.Input(self.u)
self.vec_input_1 = pi.Input(self.u_vec, index=0)
self.vec_input_2 = pi.Input(self.u_vec, index=1)
self.input_dt = pi.Input(self.u_dt, order=1)
self.vec_input_dt_1 = pi.Input(self.u_vec_dt, index=0, order=1)
self.vec_input_dt_2 = pi.Input(self.u_vec_dt, index=1, order=1)
# scale function
def heavyside(z):
if z < 0.5:
return 0
elif z == 0.5:
return .5
else:
return 1
base = pi.Base(pi.Function(heavyside))
pi.register_base("heavyside_base", base)
# distributed base
nodes = pi.Domain((0, 1), num=3)
self.distributed_base = pi.LagrangeFirstOrder.cure_interval(nodes)
pi.register_base("distributed_base", self.distributed_base)
fractions = [pi.ComposedFunctionVector(f, s) for f, s in
zip(self.distributed_base, nodes)]
self.composed_base = pi.Base(fractions)
pi.register_base("composed_base", self.composed_base)
# lumped base
self.lumped_base = pi.Base([pi.ConstantFunction(1)])
pi.register_base("lumped_base", self.lumped_base)
# Test Functions
self.test_funcs = pi.TestFunction("distributed_base")
self.test_funcs_at0 = self.test_funcs(0)
self.test_funcs_at1 = self.test_funcs(1)
self.test_funcs_dz = self.test_funcs.derive(1)
self.test_funcs_dz_at1 = self.test_funcs_dz(1)
self.comp_test_funcs = pi.TestFunction("composed_base")
self.comp_test_funcs_at0 = self.comp_test_funcs(0)
self.comp_test_funcs_at1 = self.comp_test_funcs(1)
self.comp_test_funcs_dz = self.comp_test_funcs.derive(1)
self.comp_test_funcs_dz_at1 = self.comp_test_funcs_dz(1)
# Scalar Functions
self.scalar_func = pi.ScalarFunction("heavyside_base")
# Distributed / Field Variables
self.field_var = pi.FieldVariable("distributed_base")
self.field_var_at1 = self.field_var(1)
self.field_var_dz = self.field_var.derive(spat_order=1)
self.field_var_dz_at1 = self.field_var_dz(1)
self.field_var_ddt = self.field_var.derive(temp_order=2)
self.field_var_ddt_at0 = self.field_var_ddt(0)
self.field_var_ddt_at1 = self.field_var_ddt(1)
self.comp_field_var = pi.FieldVariable("composed_base")
self.comp_field_var_at1 = self.comp_field_var(1)
self.comp_field_var_dz = self.comp_field_var.derive(spat_order=1)
self.odd_weight_field_var = pi.FieldVariable(
"distributed_base", weight_label="special_weights")
# Field variable 2
self.lumped_var = pi.FieldVariable("lumped_base")
# ---------------------------------------------------------------------
# Construction of Equation Terms
# ---------------------------------------------------------------------
# inputs
self.input_term1 = pi.ScalarTerm(pi.Product(self.test_funcs_at1,
self.input))
self.input_term1_swapped = pi.ScalarTerm(pi.Product(self.input,
self.test_funcs_at1)
)
self.input_term2 = pi.ScalarTerm(pi.Product(self.test_funcs_dz_at1,
self.input))
self.input_term3 = pi.IntegralTerm(pi.Product(self.test_funcs,
self.input),
limits=(0, 1))
self.input_term3_swapped = pi.IntegralTerm(pi.Product(self.input,
self.test_funcs),
limits=(0, 1))
self.input_term3_scaled = pi.IntegralTerm(
pi.Product(pi.Product(self.scalar_func, self.test_funcs),
self.input),
limits=(0, 1))
self.input_term3_scaled_first_half = pi.IntegralTerm(
pi.Product(pi.Product(self.scalar_func, self.test_funcs),
self.input),
limits=(0, .5))
self.input_term3_scaled_second_half = pi.IntegralTerm(
pi.Product(pi.Product(self.scalar_func, self.test_funcs),
self.input),
limits=(.5, 1))
self.input_term_dt = pi.IntegralTerm(pi.Product(self.test_funcs,
self.input_dt),
limits=(0, 1))
self.input_term_vectorial1 = pi.ScalarTerm(
pi.Product(self.test_funcs_at0, self.vec_input_1))
self.input_term_vectorial2 = pi.ScalarTerm(
pi.Product(self.test_funcs_at1, self.vec_input_2))
self.input_term_vectorial_dt1 = pi.ScalarTerm(
pi.Product(self.test_funcs_at0, self.vec_input_dt_1))
self.input_term_vectorial_dt2 = pi.ScalarTerm(
pi.Product(self.test_funcs_at1, self.vec_input_dt_2))
# pure test function terms
self.func_term = pi.ScalarTerm(self.test_funcs_at1)
self.func_term_int = pi.IntegralTerm(pi.Product(self.test_funcs,
self.test_funcs),
limits=(0, 1))
self.comp_func_term = pi.ScalarTerm(self.comp_test_funcs_at1)
self.comp_func_term_int = pi.IntegralTerm(
pi.Product(self.comp_test_funcs, self.comp_test_funcs),
limits=(0, 1))
# pure field variable terms
self.field_term_at1 = pi.ScalarTerm(self.field_var_at1)
self.field_term_dz_at1 = pi.ScalarTerm(self.field_var_dz_at1)
self.field_term_ddt_at1 = pi.ScalarTerm(self.field_var_ddt_at1)
self.field_int = pi.IntegralTerm(self.field_var, limits=(0, 1))
self.field_int_half = pi.IntegralTerm(self.field_var, limits=(0, .5))
self.field_dz_int = pi.IntegralTerm(self.field_var_dz, (0, 1))
self.field_ddt_int = pi.IntegralTerm(self.field_var_ddt, (0, 1))
self.comp_field_term_at1 = pi.ScalarTerm(self.comp_field_var_at1)
self.comp_field_int = pi.IntegralTerm(self.comp_field_var,
limits=(0, 1))
self.comp_field_dz_int = pi.IntegralTerm(self.comp_field_var,
limits=(0, 1))
# products
self.prod_term_fs_at1 = pi.ScalarTerm(
pi.Product(self.field_var_at1, self.scalars))
self.prod_int_fs = pi.IntegralTerm(pi.Product(self.field_var,
self.scalars),
(0, 1))
self.prod_int_f_f = pi.IntegralTerm(pi.Product(self.field_var,
self.test_funcs),
(0, 1))
self.prod_int_f_f_swapped = pi.IntegralTerm(pi.Product(self.test_funcs,
self.field_var),
(0, 1))
self.prod_int_f_at1_f = pi.IntegralTerm(
pi.Product(self.field_var_at1, self.test_funcs), (0, 1))
self.prod_int_f_f_at1 = pi.IntegralTerm(
pi.Product(self.field_var, self.test_funcs_at1), (0, 1))
self.prod_term_f_at1_f_at1 = pi.ScalarTerm(
pi.Product(self.field_var_at1, self.test_funcs_at1))
self.prod_int_fddt_f = pi.IntegralTerm(
pi.Product(self.field_var_ddt, self.test_funcs), (0, 1))
self.prod_term_fddt_at0_f_at0 = pi.ScalarTerm(
pi.Product(self.field_var_ddt_at0, self.test_funcs_at0))
self.prod_term_f_at1_dphi_at1 = pi.ScalarTerm(
pi.Product(self.field_var_at1, self.test_funcs_dz_at1))
self.temp_int = pi.IntegralTerm(pi.Product(self.field_var_ddt,
self.test_funcs),
limits=(0, 1))
self.spat_int = pi.IntegralTerm(pi.Product(self.field_var_dz,
self.test_funcs_dz),
limits=(0, 1))
self.spat_int_asymmetric = pi.IntegralTerm(pi.Product(self.field_var_dz,
self.test_funcs),
limits=(0, 1))
self.prod_term_tf_at0_lv_at0 = pi.ScalarTerm(
pi.Product(self.test_funcs(0), self.lumped_var(0)))
self.prod_term_tf_at0_lv_at0_swapped = pi.ScalarTerm(
pi.Product(self.lumped_var(0), self.test_funcs(0)))
self.prod_int_sf_fv = pi.IntegralTerm(pi.Product(self.scalar_func,
self.field_var),
limits=(0, 1))
self.prod_int_sf_fv_swapped = pi.IntegralTerm(
pi.Product(self.field_var, self.scalar_func),
limits=(0, 1))
self.alternating_weights_term = pi.IntegralTerm(
self.odd_weight_field_var,
limits=(0, 1))
def test_Input_term(self):
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term2, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[0], [-2], [2]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[.25], [.5], [.25]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3_swapped, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[.25], [.5], [.25]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3_scaled, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[.0], [.25], [.25]]))
terms_fh = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3_scaled_first_half, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms_fh["G"][0][1],
np.array([[.0], [.0], [.0]]))
terms_sh = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3_scaled_second_half, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms_sh["G"][0][1],
np.array([[.0], [.25], [.25]]))
# vectorial inputs
terms = sim.parse_weak_formulation(sim.WeakFormulation(
[self.input_term_vectorial1, self.input_term_vectorial2],
name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[1, 0],
[0, 0],
[0, 1]]))
# time derivatives of inputs
terms = sim.parse_weak_formulation(sim.WeakFormulation(
self.input_term_dt,
name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][1][1]))
np.testing.assert_array_almost_equal(terms["G"][1][1],
np.array([[.25], [.5], [.25]]))
# time derivative of vectorial inputs
terms = sim.parse_weak_formulation(sim.WeakFormulation(
[self.input_term_vectorial_dt1, self.input_term_vectorial_dt2],
name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][1][1]))
np.testing.assert_array_almost_equal(terms["G"][1][1],
np.array([[1, 0],
[0, 0],
[0, 1]]))
def test_TestFunction_term(self):
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.func_term, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["f"]))
np.testing.assert_array_almost_equal(terms["f"],
np.array([[0], [0], [1]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.func_term_int, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["f"]))
np.testing.assert_array_almost_equal(terms["f"],
np.array([[1 / 6],
[1 / 3],
[1 / 6]]))
if 0:
# composed
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.comp_func_term, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["f"]))
np.testing.assert_array_almost_equal(terms["f"],
np.array([[0, 0],
[0, .5],
[1, 1]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.comp_func_term_int, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["f"]))
np.testing.assert_array_almost_equal(terms["f"],
np.array([[1 / 6 + 0],
[1 / 3 + .25],
[1 / 6 + 1]]))
def test_FieldVariable_term(self):
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.field_term_at1, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[0, 0, 1]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.field_term_ddt_at1, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][2][1]))
np.testing.assert_array_almost_equal(terms["E"][2][1],
np.array([[0, 0, 1]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.field_term_dz_at1, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[0, -2, 2]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.field_int, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[.25, .5, .25]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.field_int_half, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[.25, .25, 0]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.field_dz_int, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[-1, 0, 1]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.field_ddt_int, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][2][1]))
np.testing.assert_array_almost_equal(terms["E"][2][1],
np.array([[.25, .5, .25]]))
# composed
# terms = sim.parse_weak_formulation(
# sim.WeakFormulation(self.comp_field_term_at1, name="test"),
# finalize=False).get_dynamic_terms()["composed_base"]
# self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
# np.testing.assert_array_almost_equal(terms["E"][0][1],
# np.array([[1, 0], [0, .5], [0, 1]]))
# terms = sim.parse_weak_formulation(
# sim.WeakFormulation(self.comp_field_int, name="test"),
# finalize=False).get_dynamic_terms()["composed_base"]
# self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
# np.testing.assert_array_almost_equal(terms["E"][0][1],
# np.array([[[.25, 0],
# [.5, .5],
# [.25, 1]]]))
def test_Product_term(self):
# TODO create test functionality that will automatically check if Case
# is also valid for swapped arguments
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.prod_term_fs_at1, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[0, 0, 0],
[0, 0, 1],
[0, 0, 2]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.prod_int_fs, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[0, 0, 0],
[0.25, .5, .25],
[.5, 1, .5]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.prod_int_f_f, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[1 / 6, 1 / 12, 0],
[1 / 12, 1 / 3, 1 / 12],
[0, 1 / 12, 1 / 6]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.prod_int_f_f_swapped, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[1 / 6, 1 / 12, 0],
[1 / 12, 1 / 3, 1 / 12],
[0, 1 / 12, 1 / 6]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.prod_int_f_at1_f, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[0, 0, 0.25],
[0, 0, 0.5],
[0, 0, .25]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.prod_int_f_f_at1, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[0, 0, 0],
[0, 0, 0],
[0.25, 0.5, .25]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.prod_term_f_at1_f_at1, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[0, 0, 0],
[0, 0, 0],
[0, 0, 1]]))
# more complex terms
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.prod_int_fddt_f, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][2][1]))
np.testing.assert_array_almost_equal(terms["E"][2][1],
np.array([[1 / 6, 1 / 12, 0],
[1 / 12, 1 / 3, 1 / 12],
[0, 1 / 12, 1 / 6]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.prod_term_fddt_at0_f_at0, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][2][1]))
np.testing.assert_array_almost_equal(terms["E"][2][1],
np.array([[1, 0, 0],
[0, 0, 0],
[0, 0, 0]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.spat_int, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[2, -2, 0],
[-2, 4, -2],
[0, -2, 2]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.spat_int_asymmetric, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[-.5, .5, 0],
[-.5, 0, .5],
[0, -.5, .5]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.prod_term_f_at1_dphi_at1, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[0, 0, 0],
[0, 0, -2],
[0, 0, 2]]))
desired = np.array([[0, 0.25, 0.25]])
terms1 = sim.parse_weak_formulation(
sim.WeakFormulation(self.prod_int_sf_fv, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms1["E"][0][1], desired)
terms2 = sim.parse_weak_formulation(
sim.WeakFormulation(self.prod_int_sf_fv_swapped, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms2["E"][0][1], desired)
desired = np.array([[1], [0], [0]])
terms1 = sim.parse_weak_formulation(
sim.WeakFormulation(self.prod_term_tf_at0_lv_at0, name="test"),
finalize=False).get_dynamic_terms()["lumped_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms1["E"][0][1], desired)
terms2 = sim.parse_weak_formulation(
sim.WeakFormulation(self.prod_term_tf_at0_lv_at0_swapped, name="test"),
finalize=False).get_dynamic_terms()["lumped_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms2["E"][0][1], desired)
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term1, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[0], [0], [1]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term1_swapped, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[0], [0], [1]]))
def test_alternating_weights(self):
self.assertRaises(ValueError, sim.parse_weak_formulation,
sim.WeakFormulation([self.alternating_weights_term,
self.field_int],
name=""))
def _test_composed_function_vector(self, N):
nf = 2
funcs0 = [pi.ConstantFunction(0, domain=(0, 1))] * nf
funcs1 = list(pi.LagrangeFirstOrder.cure_interval(pi.Domain((0, 1), nf)))
funcs01 = funcs0 + funcs1 + funcs0
funcs10 = funcs1 + funcs0 + funcs0
scalars01 = [0] * 2 * nf + [0, 1]
scalars10 = [0] * 2 * nf + [1, 0]
def register_cfv_test_base(n_fracs, n_funcs, n_scalars, label):
assert n_fracs <= min(len(funcs01), len(scalars01))
assert n_funcs <= 2
assert n_scalars <= 2
sel_funcs = [funcs10, funcs01][:n_funcs]
sel_scalars = [scalars10, scalars01][:n_scalars]
base = list()
for i in range(n_fracs):
base.append(pi.ComposedFunctionVector(
[f[i] for f in sel_funcs],
[s[i] for s in sel_scalars]))
pi.register_base(label, pi.Base(base))
register_cfv_test_base(N, 2, 2, "baseN22")
fv = pi.FieldVariable("baseN22")
tf = pi.TestFunction("baseN22")
wf = pi.WeakFormulation([
pi.IntegralTerm(pi.Product(fv, tf), limits=(0, 1)),
pi.ScalarTerm(pi.Product(fv.derive(temp_order=1)(0), tf(1))),
], name="wfN22")
cf = pi.parse_weak_formulation(wf)
scal_prod1 = pi.calculate_scalar_product_matrix(pi.Base(funcs1),
pi.Base(funcs1))
scal_prod_mat = block_diag(scal_prod1, scal_prod1, 1, 1)
# print(scal_prod_mat[:N, :N])
# print(cf.dynamic_forms["baseN22"].matrices["E"][0][1])
np.testing.assert_array_almost_equal(
scal_prod_mat[:N, :N],
cf.dynamic_forms["baseN22"].matrices["E"][0][1]
)
prod_mat = np.diag([1, 0, 1, 0, 0], -1) + np.diag([0] * 4 + [1] * 2)
# print(prod_mat[:N, :N])
# print(cf.dynamic_forms["baseN22"].matrices["E"][1][1])
np.testing.assert_array_almost_equal(
prod_mat[:N, :N],
cf.dynamic_forms["baseN22"].matrices["E"][1][1]
)
pi.deregister_base("baseN22")
def test_composed_function_vector(self):
# todo: fix bug for i=1, at the moment there is no
# way to distinguish (in _compute_product_of_scalars) between a
# composed function vector with N entries + approximation order 1
# and a pi.Function and approximation order N
# for i in [6, 5, 4, 3, 2, 1]:
for i in [6, 5, 4, 3, 2]:
print("i = ", i)
self._test_composed_function_vector(i)
def tearDown(self):
pi.deregister_base("heavyside_base")
pi.deregister_base("distributed_base")
pi.deregister_base("composed_base")
pi.deregister_base("lumped_base")
class StateSpaceTests(unittest.TestCase):
def setUp(self):
# setup temp and spat domain
self.time_domain = pi.Domain((0, 1), num=10)
node_cnt = 3
spat_domain = pi.Domain((0, 1), num=node_cnt)
lag_base = pi.LagrangeFirstOrder.cure_interval(spat_domain)
pi.register_base("swm_base", lag_base)
# input
self.u = CorrectInput(output=5, limits=(0, 10))
# self.u = CorrectInput(limits=self.time_domain.bounds)
field_var = pi.FieldVariable("swm_base")
field_var_ddt = field_var.derive(temp_order=2)
field_var_dz = field_var.derive(spat_order=1)
psi = pi.TestFunction("swm_base")
psi_dz = psi.derive(1)
# enter string with mass equations
int1 = pi.IntegralTerm(pi.Product(field_var_ddt, psi),
spat_domain.bounds)
s1 = pi.ScalarTerm(pi.Product(field_var_ddt(0), psi(0)))
int2 = pi.IntegralTerm(pi.Product(field_var_dz, psi_dz),
spat_domain.bounds)
s2 = pi.ScalarTerm(pi.Product(pi.Input(self.u), psi(1)), -1)
string_eq = sim.WeakFormulation([int1, s1, int2, s2], name="swm")
self.ce = sim.parse_weak_formulation(string_eq)
self.ic = np.zeros((6, ))
def test_convert_to_state_space(self):
ss = sim.create_state_space(self.ce)
self.assertEqual(ss.A[1].shape, (6, 6))
np.testing.assert_array_almost_equal(
ss.A[1],
np.array([[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[-2.25, 3, -.75, 0, 0, 0],
[7.5, -18, 10.5, 0, 0, 0],
[-3.75, 21, -17.25, 0, 0, 0]]))
self.assertEqual(ss.B[0][1].shape, (6, 1))
np.testing.assert_array_almost_equal(
ss.B[0][1],
np.array([[0], [0], [0], [0.125], [-1.75], [6.875]]))
self.assertEqual(self.ce.input_function, self.u)
def test_simulate_state_space(self):
"""
using the diligent input this test makes sure, that the solver doesn't
evaluate the provided input outside the given time domain
"""
ss = sim.create_state_space(self.ce)
t, q = sim.simulate_state_space(ss, self.ic, self.time_domain)
# print(self.u._time_storage)
# print(self.time_domain.points)
# print(t.points)
# check that the demanded time range has been simulated
np.testing.assert_array_almost_equal(t.points, self.time_domain.points)
def tearDown(self):
pi.deregister_base("swm_base")
class StringMassTest(unittest.TestCase):
example_data = None
def create_test_data(self):
if self.example_data is None:
self.setUp()
self.test_fem(show=False)
self.tearDown()
return copy.copy(self.example_data)
def setUp(self):
z_start = 0
z_end = 1
z_bounds = (z_start, z_end)
z_step = 0.1
self.dz = pi.Domain(bounds=z_bounds, num=9)
t_start = 0
t_end = 10
t_step = 0.01
self.dt = pi.Domain(bounds=(t_start, t_end), step=t_step)
self.params = pi.Parameters
self.params.node_distance = 0.1
self.params.m = 1.0
self.params.order = 8
self.params.sigma = 1
self.params.tau = 1
self.y_end = 10
self.u = hff.FlatString(0, self.y_end, z_start, z_end, 0, 5, self.params)
def x(z, t):
"""
initial conditions for testing
"""
return 0
def x_dt(z, t):
"""
initial conditions for testing
"""
return 0
# initial conditions
self.ic = np.array([
pi.Function(lambda z: x(z, 0), domain=z_bounds), # x(z, 0)
pi.Function(lambda z: x_dt(z, 0), domain=z_bounds), # dx_dt(z, 0)
])
def test_fem(self, show=True):
"""
use best documented fem case to test all steps in simulation process
"""
# enter string with mass equations
nodes = pi.Domain(self.dz.bounds, num=11)
fem_base = pi.LagrangeSecondOrder.cure_interval(nodes)
pi.register_base("fem_base", fem_base)
field_var = pi.FieldVariable("fem_base")
field_var_ddt = field_var.derive(temp_order=2)
field_var_dz = field_var.derive(spat_order=1)
psi = pi.TestFunction("fem_base")
psi_dz = psi.derive(1)
# enter string with mass equations
int1 = pi.IntegralTerm(pi.Product(field_var_ddt, psi),
self.dz.bounds,
scale=self.params.sigma)
s1 = pi.ScalarTerm(pi.Product(field_var_ddt(0), psi(0)),
scale=self.params.m)
int2 = pi.IntegralTerm(pi.Product(field_var_dz, psi_dz),
self.dz.bounds,
scale=self.params.sigma)
s2 = pi.ScalarTerm(pi.Product(pi.Input(self.u), psi(1)),
scale=-self.params.sigma)
# derive sate-space system
string_pde = sim.WeakFormulation([int1, s1, int2, s2], name="fem_test")
self.cf = sim.parse_weak_formulation(string_pde)
ss = sim.create_state_space(self.cf)
# generate initial conditions for weights
q0 = np.array([pi.project_on_base(self.ic[idx], fem_base)
for idx in range(2)]).flatten()
# simulate
t, q = sim.simulate_state_space(ss, q0, self.dt)
# calculate result data
eval_data = []
for der_idx in range(2):
eval_data.append(sim.evaluate_approximation(
"fem_base",
q[:, der_idx*fem_base.fractions.size:(der_idx + 1)*fem_base.fractions.size],
t, self.dz))
eval_data[-1].name = "{0}{1}".format(self.cf.name, "_" + "".join(
["d" for x in range(der_idx)]) + "t" if der_idx > 0 else "")
pi.deregister_base("fem_base")
# display results
if show_plots and show:
win = pi.PgAnimatedPlot(eval_data[:2],
title="fem approx and derivative")
win2 = pi.PgSurfacePlot(eval_data[0])
pi.show(show_mpl=False)
# test for correct transition
self.assertAlmostEqual(eval_data[0].output_data[-1, 0],
self.y_end,
places=3)
# save some test data for later use
self.example_data = eval_data
def test_modal(self):
order = 8
def char_eq(w):
return w * (np.sin(w) + self.params.m * w * np.cos(w))
def phi_k_factory(freq, derivative_order=0):
def eig_func(z):
return (np.cos(freq * z)
- self.params.m * freq * np.sin(freq * z))
def eig_func_dz(z):
return (-freq * (np.sin(freq * z)
+ self.params.m * freq * np.cos(freq * z)))
def eig_func_ddz(z):
return (freq ** 2 * (-np.cos(freq * z)
+ self.params.m * freq * np.sin(freq * z)))
if derivative_order == 0:
return eig_func
elif derivative_order == 1:
return eig_func_dz
elif derivative_order == 2:
return eig_func_ddz
else:
raise ValueError
# create eigenfunctions
eig_frequencies = pi.find_roots(char_eq,
grid=np.arange(0, 1e3, 2),
n_roots=order,
rtol=1e-2)
print("eigenfrequencies:")
print(eig_frequencies)
# create eigen function vectors
class SWMFunctionVector(pi.ComposedFunctionVector):
"""
String With Mass Function Vector, necessary due to manipulated
scalar product
"""
def __init__(self, function, function_at_0):
super().__init__(function, function_at_0)
@property
def func(self):
return self.members["funcs"][0]
@property
def scalar(self):
return self.members["scalars"][0]
eig_vectors = np.array([SWMFunctionVector(pi.Function(
phi_k_factory(eig_frequencies[n]),
derivative_handles=[
phi_k_factory(eig_frequencies[n], der_order)
for der_order in range(1, 3)],
domain=self.dz.bounds,
nonzero=self.dz.bounds),
phi_k_factory(eig_frequencies[n])(0))
for n in range(order)])
composed_modal_base = pi.Base(eig_vectors)
# normalize base
norm_comp_mod_base = pi.normalize_base(composed_modal_base)
norm_mod_base = pi.Base(np.array(
[vec.func
for vec in norm_comp_mod_base.fractions]))
pi.register_base("norm_modal_base", norm_mod_base, overwrite=True)
# debug print eigenfunctions
if 0:
func_vals = []
for vec in eig_vectors:
func_vals.append(np.vectorize(vec.func)(self.dz))
norm_func_vals = []
for func in norm_mod_base.fractions:
norm_func_vals.append(np.vectorize(func)(self.dz))
clrs = ["r", "g", "b", "c", "m", "y", "k", "w"]
for n in range(1, order + 1, len(clrs)):
pw_phin_k = pg.plot(title="phin_k for k in [{0}, {1}]".format(n, min(n + len(clrs), order)))
for k in range(len(clrs)):
if k + n > order:
break
pw_phin_k.plot(x=np.array(self.dz), y=norm_func_vals[n + k - 1], pen=clrs[k])
pi.show(show_mpl=False)
# create terms of weak formulation
terms = [pi.IntegralTerm(pi.Product(pi.FieldVariable("norm_modal_base", order=(2, 0)),
pi.TestFunction("norm_modal_base")),
self.dz.bounds, scale=-1),
pi.ScalarTerm(pi.Product(
pi.FieldVariable("norm_modal_base", order=(2, 0), location=0),
pi.TestFunction("norm_modal_base", location=0)),
scale=-1),
pi.ScalarTerm(pi.Product(pi.Input(self.u),
pi.TestFunction("norm_modal_base", location=1))),
pi.ScalarTerm(
pi.Product(pi.FieldVariable("norm_modal_base", location=1),
pi.TestFunction("norm_modal_base", order=1, location=1)),
scale=-1),
pi.ScalarTerm(pi.Product(pi.FieldVariable("norm_modal_base", location=0),
pi.TestFunction("norm_modal_base", order=1,
location=0))),
pi.IntegralTerm(pi.Product(pi.FieldVariable("norm_modal_base"),
pi.TestFunction("norm_modal_base", order=2)),
self.dz.bounds)]
modal_pde = sim.WeakFormulation(terms, name="swm_lib-modal")
# simulate
eval_data = sim.simulate_system(modal_pde, self.ic, self.dt, self.dz, derivative_orders=(1, 0))
# display results
if show_plots:
win = pi.PgAnimatedPlot(eval_data[0:2], title="modal approx and derivative")
win2 = pi.PgSurfacePlot(eval_data[0])
pi.show(show_mpl=False)
pi.deregister_base("norm_modal_base")
# test for correct transition
self.assertTrue(np.isclose(eval_data[0].output_data[-1, 0], self.y_end, atol=1e-3))
def tearDown(self):
pass
class MultipleODETest(unittest.TestCase):
def desired_test_pr12(self):
"""
Let us consider the system of ordinary differential equations
x1^(3)(t) = x2(t) + u(t)
x2^(1)(t) = x1^(2)(t) + u(t).
Desired state space model for x = (x1, x1^(1), x1^(2), x2)^T
[ 0 1 0 0 ] [0]
x^(1) = [ 0 0 1 0 ] x + [0] u.
[ 0 0 0 1 ] [1]
[ 0 0 1 0 ] [1]
"""
a_desired = np.array([[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]])
b_desired = np.array([[0], [0], [1], [1]])
dummy_domain = pi.Domain(bounds=(-1, 1), num=2)
dummy_point = 0
pi.register_base("base_1", pi.Base(
pi.ConstantFunction(1, domain=dummy_domain.bounds)))
pi.register_base("base_2", pi.Base(
pi.ConstantFunction(1, domain=dummy_domain.bounds)))
x1 = pi.FieldVariable("base_1")(dummy_point)
x2 = pi.FieldVariable("base_2")(dummy_point)
u = pi.Input(pi.ConstantTrajectory(0))
weak_form_1 = pi.WeakFormulation([
pi.ScalarTerm(x1.derive(temp_order=3), scale=-1),
pi.ScalarTerm(x2),
pi.ScalarTerm(u)
], name="sys_1")
weak_form_2 = pi.WeakFormulation([
pi.ScalarTerm(x2.derive(temp_order=1), scale=-1),
pi.ScalarTerm(x1.derive(temp_order=2)),
pi.ScalarTerm(u)
], name="sys_2", dominant_lbl="base_2")
weak_forms = [weak_form_1, weak_form_2]
canonical_equations = [pi.parse_weak_formulation(form)
for form in weak_forms]
state_space_form = pi.create_state_space(canonical_equations)
np.testing.assert_array_almost_equal(state_space_form.A[1], a_desired)
np.testing.assert_array_almost_equal(state_space_form.B[0][1], b_desired)
class MultiplePDETest(unittest.TestCase):
"""
This TestCase covers the implementation of the parsing and simulation of
coupled pde systems.
"""
def setUp(self):
l1 = 1
l2 = 4
l3 = 6
l4 = 10
self.dz1 = pi.Domain(bounds=(0, l1), num=100)
self.dz2 = pi.Domain(bounds=(l1, l2), num=100)
self.dz3 = pi.Domain(bounds=(l2, l3), num=100)
self.dz4 = pi.Domain(bounds=(l3, l4), num=100)
t_start = 0
t_end = 10
t_step = 0.01
self.dt = pi.Domain(bounds=(t_start, t_end), step=t_step)
v1 = 1
v2 = 2
v3 = 3
mass = 1
def x(z, t):
"""
initial conditions for testing
"""
return 0
# initial conditions
self.ic1 = np.array([pi.Function(lambda z: x(z, 0),
domain=self.dz1.bounds)])
self.ic2 = np.array([pi.Function(lambda z: x(z, 0),
domain=self.dz2.bounds)])
self.ic3 = np.array([pi.Function(lambda z: x(z, 0),
domain=self.dz3.bounds)])
self.ic4 = np.array([
pi.Function(lambda z: x(z, 0), domain=self.dz4.bounds),
pi.Function(lambda z: x(z, 0), domain=self.dz4.bounds)])
# weak formulations
nodes1 = pi.Domain(self.dz1.bounds, num=3)
nodes2 = pi.Domain(self.dz2.bounds, num=3)
nodes3 = pi.Domain(self.dz3.bounds, num=3)
nodes4 = pi.Domain(self.dz4.bounds, num=15)
base1 = pi.LagrangeFirstOrder.cure_interval(nodes1)
base2 = pi.LagrangeFirstOrder.cure_interval(nodes2)
base3 = pi.LagrangeFirstOrder.cure_interval(nodes3)
base4 = pi.LagrangeFirstOrder.cure_interval(nodes4)
pi.register_base("base_1", base1)
pi.register_base("base_2", base2)
pi.register_base("base_3", base3)
pi.register_base("base_4", base4)
traj = AlternatingInput()
u = pi.Input(traj)
x1 = pi.FieldVariable("base_1")
psi_1 = pi.TestFunction("base_1")
self.weak_form_1 = pi.WeakFormulation([
pi.IntegralTerm(pi.Product(x1.derive(temp_order=1), psi_1), limits=self.dz1.bounds),
pi.IntegralTerm(pi.Product(x1, psi_1.derive(1)), limits=self.dz1.bounds, scale=-v1),
pi.ScalarTerm(pi.Product(u, psi_1(0)), scale=-v1),
pi.ScalarTerm(pi.Product(x1(l1), psi_1(l1)), scale=v1),
], name="sys_1")
x2 = pi.FieldVariable("base_2")
psi_2 = pi.TestFunction("base_2")
self.weak_form_2 = pi.WeakFormulation([
pi.IntegralTerm(pi.Product(x2.derive(temp_order=1), psi_2), limits=self.dz2.bounds),
pi.IntegralTerm(pi.Product(x2, psi_2.derive(1)), limits=self.dz2.bounds, scale=-v2),
pi.ScalarTerm(pi.Product(x1(l1), psi_2(l1)), scale=-v2),
pi.ScalarTerm(pi.Product(x2(l2), psi_2(l2)), scale=v2),
], name="sys_2")
x3 = pi.FieldVariable("base_3")
psi_3 = pi.TestFunction("base_3")
self.weak_form_3 = pi.WeakFormulation([
pi.IntegralTerm(pi.Product(x3.derive(temp_order=1), psi_3), limits=self.dz3.bounds),
pi.IntegralTerm(pi.Product(x3, psi_3.derive(1)), limits=self.dz3.bounds, scale=-v3),
pi.ScalarTerm(pi.Product(x2(l2), psi_3(l2)), scale=-v3),
pi.ScalarTerm(pi.Product(x3(l3), psi_3(l3)), scale=v3),
], name="sys_3")
x4 = pi.FieldVariable("base_4")
psi_4 = pi.TestFunction("base_4")
self.weak_form_4 = pi.WeakFormulation([
pi.IntegralTerm(pi.Product(x4.derive(temp_order=2), psi_4), limits=self.dz4.bounds, scale=-1),
pi.IntegralTerm(pi.Product(x4.derive(spat_order=1), psi_4.derive(1)), limits=self.dz4.bounds, scale=-1),
pi.ScalarTerm(pi.Product(x4.derive(temp_order=2)(l4), psi_4(l4)), scale=-mass),
pi.ScalarTerm(pi.Product(x3(l3), psi_4(l3)), scale=-1),
], name="sys_4")
def test_single_system(self):
results = pi.simulate_system(self.weak_form_1, self.ic1, self.dt, self.dz1)
if show_plots:
win = pi.PgAnimatedPlot(results)
pi.show(show_mpl=False)
def test_coupled_system(self):
"""
test the coupled system
"""
weak_forms = [self.weak_form_1, self.weak_form_2]
ics = {self.weak_form_1.name: self.ic1, self.weak_form_2.name: self.ic2}
spat_domains = {self.weak_form_1.name: self.dz1, self.weak_form_2.name: self.dz2}
derivatives = {self.weak_form_1.name: (0, 0), self.weak_form_2.name: (0, 0)}
res = pi.simulate_systems(weak_forms, ics, self.dt, spat_domains, derivatives)
if show_plots:
win = pi.PgAnimatedPlot(res)
pi.show(show_mpl=False)
def test_triple_system(self):
"""
three coupled systems
"""
weak_forms = [self.weak_form_1, self.weak_form_2, self.weak_form_3]
ics = {self.weak_form_1.name: self.ic1,
self.weak_form_2.name: self.ic2,
self.weak_form_3.name: self.ic3}
spat_domains = {self.weak_form_1.name: self.dz1,
self.weak_form_2.name: self.dz2,
self.weak_form_3.name: self.dz3}
derivatives = {self.weak_form_1.name: (0, 0),
self.weak_form_2.name: (0, 0),
self.weak_form_3.name: (0, 0)}
res = pi.simulate_systems(weak_forms, ics, self.dt, spat_domains, derivatives)
if show_plots:
win = pi.PgAnimatedPlot(res)
pi.show(show_mpl=False)
def test_triple_system_with_swm(self):
"""
three coupled systems where the output at l4 is the input for
a string with mass
"""
weak_forms = [self.weak_form_1, self.weak_form_2, self.weak_form_3,
self.weak_form_4]
ics = {self.weak_form_1.name: self.ic1,
self.weak_form_2.name: self.ic2,
self.weak_form_3.name: self.ic3,
self.weak_form_4.name: self.ic4}
spat_domains = {self.weak_form_1.name: self.dz1,
self.weak_form_2.name: self.dz2,
self.weak_form_3.name: self.dz3,
self.weak_form_4.name: self.dz4}
derivatives = {self.weak_form_1.name: (0, 0),
self.weak_form_2.name: (0, 0),
self.weak_form_3.name: (0, 0),
self.weak_form_4.name: (1, 1)}
res = pi.simulate_systems(weak_forms, ics, self.dt, spat_domains, derivatives)
if show_plots:
win = pi.PgAnimatedPlot(res)
pi.show(show_mpl=False)
def tearDown(self):
pi.deregister_base("base_1")
pi.deregister_base("base_2")
pi.deregister_base("base_3")
pi.deregister_base("base_4")
class RadFemTrajectoryTest(unittest.TestCase):
"""
Test FEM simulation with pi.LagrangeFirstOrder and pi.LagrangeSecondOrder
test functions and generic trajectory generator RadTrajectory for the
reaction-advection-diffusion equation.
"""
def setUp(self):
self.param = [2., -1.5, -3., 2., .5]
self.a2, self.a1, self.a0, self.alpha, self.beta = self.param
self.l = 1.
spatial_disc = 11
self.dz = pi.Domain(bounds=(0, self.l), num=spatial_disc)
self.T = 1.
temporal_disc = 50
self.dt = pi.Domain(bounds=(0, self.T), num=temporal_disc)
# create test functions
self.nodes_1 = pi.Domain(self.dz.bounds, num=spatial_disc)
self.base_1 = pi.LagrangeFirstOrder.cure_interval(self.nodes_1)
pi.register_base("base_1", self.base_1)
self.nodes_2 = pi.Domain(self.dz.bounds, num=spatial_disc)
self.base_2 = pi.LagrangeSecondOrder.cure_interval(self.nodes_1)
pi.register_base("base_2", self.base_2)
@unittest.skip # needs border homogenization to work
def test_dd(self):
# TODO adopt this test case
# trajectory
bound_cond_type = 'dirichlet'
actuation_type = 'dirichlet'
u = parabolic.RadFeedForward(self.l,
self.T,
self.param,
bound_cond_type, actuation_type)
# derive state-space system
rad_pde = parabolic.get_parabolic_dirichlet_weak_form("base_2",
"base_2",
u,
self.param,
self.dz.bounds)
ce = sim.parse_weak_formulation(rad_pde)
ss = sim.create_state_space(ce)
# simulate system
t, q = sim.simulate_state_space(ss,
np.zeros(self.base_2.shape),
self.dt)
eval_d = sim.evaluate_approximation("base_1",
q,
t,
self.dz,
spat_order=1)
# display results
if show_plots:
win1 = pi.PgAnimatedPlot([eval_d], title="Test")
win2 = pi.PgSurfacePlot(eval_d)
pi.show(show_mpl=False)
# TODO add Test here
return t, q
@unittest.skip # needs border homogenization to work
def test_dd(self):
# TODO adopt this test case
# trajectory
bound_cond_type = 'robin'
actuation_type = 'dirichlet'
u = parabolic.RadFeedForward(self.l, self.T, self.param, bound_cond_type, actuation_type)
# integral terms
int1 = pi.IntegralTerm(pi.Product(pi.TemporalDerivedFieldVariable("base_2", order=1),
pi.TestFunction("base_2", order=0)), self.dz.bounds)
int2 = pi.IntegralTerm(pi.Product(pi.SpatialDerivedFieldVariable("base_2", order=0),
pi.TestFunction("base_2", order=2)), self.dz.bounds, -self.a2)
int3 = pi.IntegralTerm(pi.Product(pi.SpatialDerivedFieldVariable("base_2", order=1),
pi.TestFunction("base_2", order=0)), self.dz.bounds, -self.a1)
int4 = pi.IntegralTerm(pi.Product(pi.SpatialDerivedFieldVariable("base_2", order=0),
pi.TestFunction("base_2", order=0)), self.dz.bounds, -self.a0)
# scalar terms from int 2
s1 = pi.ScalarTerm(pi.Product(pi.SpatialDerivedFieldVariable("base_2", order=1, location=self.l),
pi.TestFunction("base_2", order=0, location=self.l)), -self.a2)
s2 = pi.ScalarTerm(pi.Product(pi.SpatialDerivedFieldVariable("base_2", order=0, location=0),
pi.TestFunction("base_2", order=0, location=0)), self.a2 * self.alpha)
s3 = pi.ScalarTerm(pi.Product(pi.SpatialDerivedFieldVariable("base_2", order=0, location=0),
pi.TestFunction("base_2", order=1, location=0)), -self.a2)
s4 = pi.ScalarTerm(pi.Product(pi.Input(u),
pi.TestFunction("base_2", order=1, location=self.l)), self.a2)
# derive state-space system
rad_pde = sim.WeakFormulation([int1, int2, int3, int4, s1, s2, s3, s4], name="rad_pde")
ce = sim.parse_weak_formulation(rad_pde)
ss = sim.create_state_space(ce)
# simulate system
t, q = sim.simulate_state_space(ss, np.zeros(self.base_2.shape), self.dt)
# TODO add test here
return t, q
@unittest.skip # needs border homogenization to work
def test_dr(self):
# trajectory
bound_cond_type = 'dirichlet'
actuation_type = 'robin'
u = parabolic.RadFeedForward(self.l,
self.T,
self.param,
bound_cond_type,
actuation_type)
# integral terms
int1 = pi.IntegralTerm(pi.Product(pi.TemporalDerivedFieldVariable("base_1", order=1),
pi.TestFunction("base_1", order=0)), self.dz.bounds)
int2 = pi.IntegralTerm(pi.Product(pi.SpatialDerivedFieldVariable("base_1", order=1),
pi.TestFunction("base_1", order=1)), self.dz.bounds, self.a2)
int3 = pi.IntegralTerm(pi.Product(pi.SpatialDerivedFieldVariable("base_1", order=0),
pi.TestFunction("base_1", order=1)), self.dz.bounds, self.a1)
int4 = pi.IntegralTerm(pi.Product(pi.SpatialDerivedFieldVariable("base_1", order=0),
pi.TestFunction("base_1", order=0)), self.dz.bounds, -self.a0)
# scalar terms from int 2
s1 = pi.ScalarTerm(pi.Product(pi.SpatialDerivedFieldVariable("base_1", order=0, location=self.l),
pi.TestFunction("base_1", order=0, location=self.l)), -self.a1)
s2 = pi.ScalarTerm(pi.Product(pi.SpatialDerivedFieldVariable("base_1", order=0, location=self.l),
pi.TestFunction("base_1", order=0, location=self.l)), self.a2 * self.beta)
s3 = pi.ScalarTerm(pi.Product(pi.SpatialDerivedFieldVariable("base_1", order=1, location=0),
pi.TestFunction("base_1", order=0, location=0)), self.a2)
s4 = pi.ScalarTerm(pi.Product(pi.Input(u),
pi.TestFunction("base_1", order=0, location=self.l)), -self.a2)
rad_pde = sim.WeakFormulation([int1, int2, int3, int4, s1, s2, s3, s4], "rad_pde")
# derive state-space system
ce = sim.parse_weak_formulation(rad_pde)
ss = sim.create_state_space(ce)
# simulate system
t, q = sim.simulate_state_space(ss, np.zeros(self.base_1.fractions.shape), self.dt)
# check if (x'(0,t_end) - 1.) < 0.1
self.assertLess(np.abs(self.base_1.fractions[0].derive(1)(sys.float_info.min) * (q[-1, 0] - q[-1, 1])) - 1, 0.1)
def test_rr(self):
# trajectory
bound_cond_type = 'robin'
actuation_type = 'robin'
u = parabolic.RadFeedForward(self.l, self.T, self.param, bound_cond_type, actuation_type)
# derive state-space system
rad_pde, extra_labels = parabolic.get_parabolic_robin_weak_form("base_1", "base_1", u, self.param, self.dz.bounds)
ce = sim.parse_weak_formulation(rad_pde)
ss = sim.create_state_space(ce)
# simulate system
t, q = sim.simulate_state_space(ss, np.zeros(self.base_1.fractions.shape), self.dt)
for lbl in extra_labels:
pi.deregister_base(lbl)
# check if (x(0,t_end) - 1.) < 0.1
self.assertLess(np.abs(self.base_1.fractions[0].derive(0)(0) * q[-1, 0]) - 1, 0.1)
def test_rr_const_trajectory(self):
# TODO if it is only testing ConstantTrajectory should it better be moved to test_visualization ?
# const trajectory simulation call test
u = pi.ConstantTrajectory(1)
# derive state-space system
rad_pde, extra_labels = pi.parabolic.get_parabolic_robin_weak_form("base_1", "base_1", u, self.param, self.dz.bounds)
ce = sim.parse_weak_formulation(rad_pde)
ss = sim.create_state_space(ce)
# simulate system
t, q = sim.simulate_state_space(ss, np.zeros(self.base_1.fractions.shape), self.dt)
# deregister extra labels
for lbl in extra_labels:
pi.deregister_base(lbl)
# TODO add a Test here
def tearDown(self):
pi.deregister_base("base_1")
pi.deregister_base("base_2")
class RadDirichletModalVsWeakFormulationTest(unittest.TestCase):
"""
"""
def test_comparison(self):
actuation_type = 'dirichlet'
bound_cond_type = 'dirichlet'
param = [1., -2., -1., None, None]
adjoint_param = pi.SecondOrderEigenfunction.get_adjoint_problem(param)
a2, a1, a0, _, _ = param
l = 1.
spatial_disc = 10
dz = pi.Domain(bounds=(0, l), num=spatial_disc)
t_end = 1.
temporal_disc = 50
dt = pi.Domain(bounds=(0, t_end), num=temporal_disc)
(omega, eig_values
) = pi.SecondOrderDirichletEigenfunction.eigfreq_eigval_hint(
param=param,
l=dz.bounds[-1],
n_roots=spatial_disc)
norm_fak = np.ones(omega.shape) * np.sqrt(2)
eig_base = pi.Base([pi.SecondOrderDirichletEigenfunction(omega[i],
param,
dz.bounds[-1],
norm_fak[i])
for i in range(spatial_disc)])
pi.register_base("eig_base", eig_base)
adjoint_eig_base = pi.Base(
[pi.SecondOrderDirichletEigenfunction(omega[i],
adjoint_param,
dz.bounds[-1],
norm_fak[i])
for i in range(spatial_disc)])
pi.register_base("adjoint_eig_base", adjoint_eig_base)
# derive initial field variable x(z,0) and weights
start_state = pi.Function(lambda z: 0., domain=(0, l))
initial_weights = pi.project_on_base(start_state, adjoint_eig_base)
# init trajectory
u = parabolic.RadFeedForward(l,
t_end,
param,
bound_cond_type,
actuation_type)
# ------------- determine (A,B) with weak-formulation (pyinduct)
# derive sate-space system
rad_pde = \
parabolic.get_parabolic_dirichlet_weak_form("eig_base",
"adjoint_eig_base",
u, param, dz.bounds)
ce = sim.parse_weak_formulation(rad_pde)
ss_weak = sim.create_state_space(ce)
# ------------- determine (A,B) with modal transformation
a_mat = np.diag(eig_values)
b_mat = -a2 * np.atleast_2d(
[fraction(l) for fraction in adjoint_eig_base.derive(1).fractions]).T
ss_modal = sim.StateSpace(a_mat, b_mat, input_handle=u)
# check if ss_modal.(A,B) is close to ss_weak.(A,B)
np.testing.assert_array_almost_equal(
np.sort(np.linalg.eigvals(ss_weak.A[1])),
np.sort(np.linalg.eigvals(ss_modal.A[1])))
np.testing.assert_array_almost_equal(ss_weak.B[0][1], ss_modal.B[0][1])
# TODO can the result be tested?
# display results
if show_plots:
t, q = sim.simulate_state_space(ss_modal, initial_weights, dt)
eval_d = sim.evaluate_approximation("eig_base",
q,
t,
dz,
spat_order=0)
win2 = pi.PgSurfacePlot(eval_d)
pi.show(show_mpl=False)
pi.deregister_base("eig_base")
pi.deregister_base("adjoint_eig_base")
class RadRobinModalVsWeakFormulationTest(unittest.TestCase):
"""
"""
def test_comparison(self):
actuation_type = 'robin'
bound_cond_type = 'robin'
param = [2., 1.5, -3., -1., -.5]
adjoint_param = pi.SecondOrderEigenfunction.get_adjoint_problem(param)
a2, a1, a0, alpha, beta = param
l = 1.
spatial_disc = 10
dz = pi.Domain(bounds=(0, l), num=spatial_disc)
t_end = 1.
temporal_disc = 50
dt = pi.Domain(bounds=(0, t_end), num=temporal_disc)
n = 10
eig_freq, eig_val = parabolic.compute_rad_robin_eigenfrequencies(param,
l,
n)
init_eig_base = pi.Base(
[pi.SecondOrderRobinEigenfunction(om, param, dz.bounds[-1])
for om in eig_freq])
init_adjoint_eig_base = pi.Base(
[pi.SecondOrderRobinEigenfunction(om, adjoint_param, dz.bounds[-1])
for om in eig_freq])
# normalize eigenfunctions and adjoint eigenfunctions
eig_base, adjoint_eig_base = pi.normalize_base(init_eig_base,
init_adjoint_eig_base)
# register bases
pi.register_base("eig_base", eig_base)
pi.register_base("adjoint_eig_base", adjoint_eig_base)
# derive initial field variable x(z,0) and weights
start_state = pi.Function(lambda z: 0., domain=(0, l))
initial_weights = pi.project_on_base(start_state, adjoint_eig_base)
# init trajectory
u = parabolic.RadFeedForward(l, t_end, param, bound_cond_type, actuation_type)
# determine pair (A, B) by weak-formulation (pyinduct)
rad_pde, extra_labels = parabolic.get_parabolic_robin_weak_form("eig_base", "adjoint_eig_base", u, param, dz.bounds)
ce = sim.parse_weak_formulation(rad_pde)
ss_weak = sim.create_state_space(ce)
# determine pair (A, B) by modal transformation
a_mat = np.diag(np.real_if_close(eig_val))
b_mat = a2 * np.atleast_2d([fraction(l) for fraction in adjoint_eig_base.fractions]).T
ss_modal = sim.StateSpace(a_mat, b_mat, input_handle=u)
# check if ss_modal.(A,B) is close to ss_weak.(A,B)
np.testing.assert_array_almost_equal(np.sort(np.linalg.eigvals(ss_weak.A[1])), np.sort(np.linalg.eigvals(ss_modal.A[1])),
decimal=5)
np.testing.assert_array_almost_equal(ss_weak.B[0][1], ss_modal.B[0][1])
t_end, q = sim.simulate_state_space(ss_modal, initial_weights, dt)
eval_d = sim.evaluate_approximation("eig_base", q, t_end, dz,
spat_order=1)
# display results
if show_plots:
win1 = pi.PgAnimatedPlot([eval_d], title="Test")
win2 = pi.PgSurfacePlot(eval_d)
pi.show(show_mpl=False)
pi.deregister_base(extra_labels[0])
pi.deregister_base(extra_labels[1])
pi.deregister_base("eig_base")
pi.deregister_base("adjoint_eig_base")
class EvaluateApproximationTestCase(unittest.TestCase):
def setUp(self):
self.node_cnt = 5
self.time_step = 1e-1
self.dates = pi.Domain((0, 10), step=self.time_step)
self.spat_dom = pi.Domain((0, 1), num=50)
# create bases functions
self.nodes = pi.Domain(self.spat_dom.bounds, num=self.node_cnt)
self.fe_funcs = pi.LagrangeSecondOrder.cure_interval(self.nodes)
# create a slow rising, nearly horizontal line
self.weights = np.array(list(range(
self.node_cnt * self.dates.points.size))).reshape(
(self.dates.points.size, len(self.nodes)))
self.p = None
def test_eval_simple(self):
pi.register_base("fe_base", self.fe_funcs)
eval_data = sim.evaluate_approximation("fe_base",
self.weights,
self.dates,
self.spat_dom,
1)
pi.deregister_base("fe_base")
if show_plots:
p = pi.PgAnimatedPlot(eval_data)
pi.show(show_mpl=False)
def test_eval_composed(self):
c_base = pi.Base([pi.ComposedFunctionVector(f, f(0))
for f in self.fe_funcs])
pi.register_base("fe_comp_base", c_base)
ev = sim.evaluate_approximation("fe_comp_base",
self.weights,
self.dates,
self.spat_dom,
0)
pi.deregister_base("fe_comp_base")
# split the results into separate ED instances
evs = [pi.EvalData(ev.input_data[:-1], ev.output_data[..., i])
for i in range(ev.output_data.shape[-1])]
if show_plots:
p = pi.PgAnimatedPlot(evs)
pi.show(show_mpl=False)
class SetDominantLabel(unittest.TestCase):
def setUp(self):
self.limits = (0, 1)
domain = pi.Domain(bounds=self.limits, num=100)
nodes = pi.Domain(domain.bounds, num=3)
base = pi.LagrangeSecondOrder.cure_interval(nodes)
pi.register_base("base_1", base)
pi.register_base("base_2", base)
pi.register_base("base_3", base)
self.x1 = pi.FieldVariable("base_1")
self.psi_1 = pi.TestFunction("base_1")
self.x2 = pi.FieldVariable("base_2")
self.psi_2 = pi.TestFunction("base_2")
self.x3 = pi.FieldVariable("base_3")
self.psi_3 = pi.TestFunction("base_3")
def test_valid(self):
weak_form_1 = pi.WeakFormulation([
pi.IntegralTerm(
pi.Product(self.x1.derive(temp_order=1), self.psi_1),
limits=self.limits)],
name="sys_1")
weak_form_2 = pi.WeakFormulation([
pi.IntegralTerm(
pi.Product(self.x2.derive(temp_order=1), self.psi_2),
limits=self.limits),
], name="sys_2")
weak_form_3 = pi.WeakFormulation([
pi.IntegralTerm(pi.Product(self.x3.derive(temp_order=1), self.psi_3),
limits=self.limits),
pi.ScalarTerm(pi.Product(self.x3(0), self.psi_3(0))),
], name="sys_3")
ces = sim.parse_weak_formulations([weak_form_1, weak_form_2,
weak_form_3])
sim.set_dominant_labels(ces)
for i, ce in zip(range(3), ces):
self.assertEqual("base_{}".format(i + 1), ce.dominant_lbl)
def test_non_valid_algebraic(self):
weak_form_1 = pi.WeakFormulation([
pi.IntegralTerm(
pi.Product(self.x1.derive(temp_order=0), self.psi_1),
limits=self.limits),
pi.ScalarTerm(pi.Product(self.x2(0), self.psi_1(0))),
], name="sys_1")
weak_form_2 = pi.WeakFormulation([
pi.IntegralTerm(pi.Product(self.x2.derive(temp_order=1), self.psi_2),
limits=self.limits),
pi.ScalarTerm(pi.Product(self.x2(0), self.psi_2(0))),
], name="sys_2")
ces = sim.parse_weak_formulations([weak_form_1, weak_form_2])
self.assertRaises(ValueError, sim.set_dominant_labels, ces)
def test_non_valid_max_order_uniqueness(self):
weak_form_1 = pi.WeakFormulation([
pi.IntegralTerm(
pi.Product(self.x1.derive(temp_order=4), self.psi_1),
limits=self.limits),
], name="sys_1")
weak_form_2 = pi.WeakFormulation([
pi.IntegralTerm(
pi.Product(self.x1.derive(temp_order=4), self.psi_1),
limits=self.limits),
pi.IntegralTerm(
pi.Product(self.x2.derive(temp_order=1), self.psi_2),
limits=self.limits),
], name="sys_2")
weak_form_3 = pi.WeakFormulation([
pi.IntegralTerm(pi.Product(self.x3.derive(temp_order=1), self.psi_3),
limits=self.limits),
pi.ScalarTerm(pi.Product(self.x3(0), self.psi_3(0))),
], name="sys_3")
ces = sim.parse_weak_formulations([weak_form_1, weak_form_2,
weak_form_3])
self.assertRaises(ValueError, sim.set_dominant_labels, ces)
def test_non_valid_not_enough_labels(self):
weak_form_1 = pi.WeakFormulation([
pi.IntegralTerm(
pi.Product(self.x1.derive(temp_order=4), self.psi_1),
limits=self.limits),
], name="sys_1")
weak_form_2 = pi.WeakFormulation([
pi.IntegralTerm(
pi.Product(self.x1.derive(temp_order=4), self.psi_1),
limits=self.limits),
], name="sys_2")
ces = sim.parse_weak_formulations([weak_form_1, weak_form_2])
self.assertRaises(ValueError, sim.set_dominant_labels, ces)
def test_wrong_dominant_labels(self):
weak_form_1 = pi.WeakFormulation([
pi.IntegralTerm(
pi.Product(self.x1.derive(temp_order=4), self.psi_1),
limits=self.limits),
], name="sys_1", dominant_lbl="base_2")
weak_form_2 = pi.WeakFormulation([
pi.IntegralTerm(
pi.Product(self.x2.derive(temp_order=4), self.psi_1),
limits=self.limits),
], name="sys_2", dominant_lbl="base_1")
ces = sim.parse_weak_formulations([weak_form_1, weak_form_2])
self.assertWarns(UserWarning, sim.set_dominant_labels, ces)
def tearDown(self):
pi.deregister_base("base_1")
pi.deregister_base("base_2")
pi.deregister_base("base_3")
class SimulationInputVectorTestCase(unittest.TestCase):
def setUp(self):
self.inputs = np.array(
[CorrectInput(output=i, der_order=i) for i in range(5)])
def test_init(self):
# empty arg
input_vector = sim.SimulationInputVector([])
self.assertTrue(input_vector._input_vector == [])
# single arg
input_vector = sim.SimulationInputVector(self.inputs[1])
self.assertEqual(input_vector._input_vector, [self.inputs[1]])
# iterable arg
input_vector = sim.SimulationInputVector(self.inputs)
self.assertTrue(all(input_vector._input_vector == self.inputs))
def test_iter(self):
input_vector = sim.SimulationInputVector(self.inputs[:2])
itr = iter(input_vector)
val = next(itr)
self.assertEqual(val, self.inputs[0])
val = next(itr)
self.assertEqual(val, self.inputs[1])
with self.assertRaises(StopIteration):
next(itr)
def test_getitem(self):
# single val
input_vector = sim.SimulationInputVector(self.inputs)
val = input_vector[1]
self.assertEqual(val, self.inputs[1])
# slice
val = input_vector[2:4]
self.assertTrue(all(val == self.inputs[2:4]))
def test_append(self):
input_vector = sim.SimulationInputVector([])
input_vector.append(self.inputs[:2])
self.assertTrue(all(input_vector._input_vector == self.inputs[:2]))
input_vector.append(self.inputs[2:])
self.assertTrue(all(input_vector._input_vector == self.inputs))
def test_output(self):
kwargs = dict(time=1, weights=[1, 2, 3], weight_lbl="test")
input_vector = sim.SimulationInputVector([])
# empty content
input_vector(**kwargs)
# full content
input_vector = sim.SimulationInputVector(self.inputs)
outputs = [inp(**kwargs) for inp in self.inputs]
vec_outputs = input_vector(**kwargs)
self.assertTrue(
all([all(a == b) for a, b in zip(outputs, vec_outputs)])
)
| [
"pyinduct.SimulationInputVector",
"numpy.sqrt",
"pyinduct.EvalData",
"pyinduct.register_base",
"numpy.hstack",
"pyinduct.ConstantTrajectory",
"pyinduct.FieldVariable",
"pyinduct.Base",
"pyinduct.ConstantFunction",
"numpy.array",
"pyinduct.simulation.create_state_space",
"pyinduct.ScalarFunctio... | [((759, 768), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (765, 768), True, 'import numpy as np\n'), ((780, 800), 'numpy.isclose', 'np.isclose', (['(t % 2)', '(0)'], {}), '(t % 2, 0)\n', (790, 800), True, 'import numpy as np\n'), ((1926, 1992), 'pyinduct.SmoothTransition', 'pi.SmoothTransition', ([], {'states': '(0, 1)', 'interval': '(0, 1)', 'method': '"""poly"""'}), "(states=(0, 1), interval=(0, 1), method='poly')\n", (1945, 1992), True, 'import pyinduct as pi\n'), ((2098, 2164), 'pyinduct.SmoothTransition', 'pi.SmoothTransition', ([], {'states': '(1, 0)', 'interval': '(1, 2)', 'method': '"""poly"""'}), "(states=(1, 0), interval=(1, 2), method='poly')\n", (2117, 2164), True, 'import pyinduct as pi\n'), ((2612, 2624), 'numpy.eye', 'np.eye', (['(2)', '(2)'], {}), '(2, 2)\n', (2618, 2624), True, 'import numpy as np\n'), ((2637, 2657), 'numpy.array', 'np.array', (['[[0], [1]]'], {}), '([[0], [1]])\n', (2645, 2657), True, 'import numpy as np\n'), ((2721, 2737), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (2729, 2737), True, 'import numpy as np\n'), ((2751, 2808), 'pyinduct.simulation.StateSpace', 'sim.StateSpace', (['{(1): a}', '{(0): {(1): b}}'], {'input_handle': 'u'}), '({(1): a}, {(0): {(1): b}}, input_handle=u)\n', (2765, 2808), True, 'import pyinduct.simulation as sim\n'), ((2993, 3005), 'numpy.eye', 'np.eye', (['(2)', '(2)'], {}), '(2, 2)\n', (2999, 3005), True, 'import numpy as np\n'), ((3018, 3038), 'numpy.array', 'np.array', (['[[0], [1]]'], {}), '([[0], [1]])\n', (3026, 3038), True, 'import numpy as np\n'), ((3082, 3098), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (3090, 3098), True, 'import numpy as np\n'), ((3112, 3148), 'pyinduct.simulation.StateSpace', 'sim.StateSpace', (['a', 'b'], {'input_handle': 'u'}), '(a, b, input_handle=u)\n', (3126, 3148), True, 'import pyinduct.simulation as sim\n'), ((3221, 3247), 'pyinduct.Domain', 'pi.Domain', (['(0, 10)'], {'num': '(11)'}), '((0, 10), num=11)\n', (3230, 3247), True, 'import pyinduct as pi\n'), ((3272, 3299), 'pyinduct.Domain', 'pi.Domain', (['(-1, 11)'], {'num': '(13)'}), '((-1, 11), num=13)\n', (3281, 3299), True, 'import pyinduct as pi\n'), ((3314, 3354), 'pyinduct.simulation.simulate_state_space', 'sim.simulate_state_space', (['ss', 'ic', 'domain'], {}), '(ss, ic, domain)\n', (3338, 3354), True, 'import pyinduct.simulation as sim\n'), ((4693, 4712), 'pyinduct.simulation.CanonicalForm', 'sim.CanonicalForm', ([], {}), '()\n', (4710, 4712), True, 'import pyinduct.simulation as sim\n'), ((4784, 4793), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (4790, 4793), True, 'import numpy as np\n'), ((5085, 5095), 'numpy.eye', 'np.eye', (['(10)'], {}), '(10)\n', (5091, 5095), True, 'import numpy as np\n'), ((7224, 7248), 'pyinduct.ConstantTrajectory', 'pi.ConstantTrajectory', (['(7)'], {}), '(7)\n', (7245, 7248), True, 'import pyinduct as pi\n'), ((7342, 7376), 'pyinduct.SimulationInputVector', 'pi.SimulationInputVector', (['[u1, u2]'], {}), '([u1, u2])\n', (7366, 7376), True, 'import pyinduct as pi\n'), ((7561, 7601), 'pyinduct.SimulationInputVector', 'pi.SimulationInputVector', (['[u1_dt, u2_dt]'], {}), '([u1_dt, u2_dt])\n', (7585, 7601), True, 'import pyinduct as pi\n'), ((7641, 7657), 'pyinduct.Input', 'pi.Input', (['self.u'], {}), '(self.u)\n', (7649, 7657), True, 'import pyinduct as pi\n'), ((7686, 7715), 'pyinduct.Input', 'pi.Input', (['self.u_vec'], {'index': '(0)'}), '(self.u_vec, index=0)\n', (7694, 7715), True, 'import pyinduct as pi\n'), ((7743, 7772), 'pyinduct.Input', 'pi.Input', (['self.u_vec'], {'index': '(1)'}), '(self.u_vec, index=1)\n', (7751, 7772), True, 'import pyinduct as pi\n'), ((7798, 7826), 'pyinduct.Input', 'pi.Input', (['self.u_dt'], {'order': '(1)'}), '(self.u_dt, order=1)\n', (7806, 7826), True, 'import pyinduct as pi\n'), ((7858, 7899), 'pyinduct.Input', 'pi.Input', (['self.u_vec_dt'], {'index': '(0)', 'order': '(1)'}), '(self.u_vec_dt, index=0, order=1)\n', (7866, 7899), True, 'import pyinduct as pi\n'), ((7930, 7971), 'pyinduct.Input', 'pi.Input', (['self.u_vec_dt'], {'index': '(1)', 'order': '(1)'}), '(self.u_vec_dt, index=1, order=1)\n', (7938, 7971), True, 'import pyinduct as pi\n'), ((8225, 8265), 'pyinduct.register_base', 'pi.register_base', (['"""heavyside_base"""', 'base'], {}), "('heavyside_base', base)\n", (8241, 8265), True, 'import pyinduct as pi\n'), ((8310, 8334), 'pyinduct.Domain', 'pi.Domain', (['(0, 1)'], {'num': '(3)'}), '((0, 1), num=3)\n', (8319, 8334), True, 'import pyinduct as pi\n'), ((8367, 8409), 'pyinduct.LagrangeFirstOrder.cure_interval', 'pi.LagrangeFirstOrder.cure_interval', (['nodes'], {}), '(nodes)\n', (8402, 8409), True, 'import pyinduct as pi\n'), ((8418, 8477), 'pyinduct.register_base', 'pi.register_base', (['"""distributed_base"""', 'self.distributed_base'], {}), "('distributed_base', self.distributed_base)\n", (8434, 8477), True, 'import pyinduct as pi\n'), ((8629, 8647), 'pyinduct.Base', 'pi.Base', (['fractions'], {}), '(fractions)\n', (8636, 8647), True, 'import pyinduct as pi\n'), ((8656, 8709), 'pyinduct.register_base', 'pi.register_base', (['"""composed_base"""', 'self.composed_base'], {}), "('composed_base', self.composed_base)\n", (8672, 8709), True, 'import pyinduct as pi\n'), ((8802, 8851), 'pyinduct.register_base', 'pi.register_base', (['"""lumped_base"""', 'self.lumped_base'], {}), "('lumped_base', self.lumped_base)\n", (8818, 8851), True, 'import pyinduct as pi\n'), ((8904, 8939), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""distributed_base"""'], {}), "('distributed_base')\n", (8919, 8939), True, 'import pyinduct as pi\n'), ((9180, 9212), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""composed_base"""'], {}), "('composed_base')\n", (9195, 9212), True, 'import pyinduct as pi\n'), ((9516, 9551), 'pyinduct.ScalarFunction', 'pi.ScalarFunction', (['"""heavyside_base"""'], {}), "('heavyside_base')\n", (9533, 9551), True, 'import pyinduct as pi\n'), ((9618, 9654), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""distributed_base"""'], {}), "('distributed_base')\n", (9634, 9654), True, 'import pyinduct as pi\n'), ((10025, 10058), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""composed_base"""'], {}), "('composed_base')\n", (10041, 10058), True, 'import pyinduct as pi\n'), ((10227, 10295), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""distributed_base"""'], {'weight_label': '"""special_weights"""'}), "('distributed_base', weight_label='special_weights')\n", (10243, 10295), True, 'import pyinduct as pi\n'), ((10363, 10394), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""lumped_base"""'], {}), "('lumped_base')\n", (10379, 10394), True, 'import pyinduct as pi\n'), ((12836, 12870), 'pyinduct.ScalarTerm', 'pi.ScalarTerm', (['self.test_funcs_at1'], {}), '(self.test_funcs_at1)\n', (12849, 12870), True, 'import pyinduct as pi\n'), ((13109, 13148), 'pyinduct.ScalarTerm', 'pi.ScalarTerm', (['self.comp_test_funcs_at1'], {}), '(self.comp_test_funcs_at1)\n', (13122, 13148), True, 'import pyinduct as pi\n'), ((13362, 13395), 'pyinduct.ScalarTerm', 'pi.ScalarTerm', (['self.field_var_at1'], {}), '(self.field_var_at1)\n', (13375, 13395), True, 'import pyinduct as pi\n'), ((13429, 13465), 'pyinduct.ScalarTerm', 'pi.ScalarTerm', (['self.field_var_dz_at1'], {}), '(self.field_var_dz_at1)\n', (13442, 13465), True, 'import pyinduct as pi\n'), ((13500, 13537), 'pyinduct.ScalarTerm', 'pi.ScalarTerm', (['self.field_var_ddt_at1'], {}), '(self.field_var_ddt_at1)\n', (13513, 13537), True, 'import pyinduct as pi\n'), ((13564, 13610), 'pyinduct.IntegralTerm', 'pi.IntegralTerm', (['self.field_var'], {'limits': '(0, 1)'}), '(self.field_var, limits=(0, 1))\n', (13579, 13610), True, 'import pyinduct as pi\n'), ((13641, 13689), 'pyinduct.IntegralTerm', 'pi.IntegralTerm', (['self.field_var'], {'limits': '(0, 0.5)'}), '(self.field_var, limits=(0, 0.5))\n', (13656, 13689), True, 'import pyinduct as pi\n'), ((13717, 13759), 'pyinduct.IntegralTerm', 'pi.IntegralTerm', (['self.field_var_dz', '(0, 1)'], {}), '(self.field_var_dz, (0, 1))\n', (13732, 13759), True, 'import pyinduct as pi\n'), ((13789, 13832), 'pyinduct.IntegralTerm', 'pi.IntegralTerm', (['self.field_var_ddt', '(0, 1)'], {}), '(self.field_var_ddt, (0, 1))\n', (13804, 13832), True, 'import pyinduct as pi\n'), ((13869, 13907), 'pyinduct.ScalarTerm', 'pi.ScalarTerm', (['self.comp_field_var_at1'], {}), '(self.comp_field_var_at1)\n', (13882, 13907), True, 'import pyinduct as pi\n'), ((13939, 13990), 'pyinduct.IntegralTerm', 'pi.IntegralTerm', (['self.comp_field_var'], {'limits': '(0, 1)'}), '(self.comp_field_var, limits=(0, 1))\n', (13954, 13990), True, 'import pyinduct as pi\n'), ((14070, 14121), 'pyinduct.IntegralTerm', 'pi.IntegralTerm', (['self.comp_field_var'], {'limits': '(0, 1)'}), '(self.comp_field_var, limits=(0, 1))\n', (14085, 14121), True, 'import pyinduct as pi\n'), ((16880, 16937), 'pyinduct.IntegralTerm', 'pi.IntegralTerm', (['self.odd_weight_field_var'], {'limits': '(0, 1)'}), '(self.odd_weight_field_var, limits=(0, 1))\n', (16895, 16937), True, 'import pyinduct as pi\n'), ((32595, 32622), 'numpy.array', 'np.array', (['[[0, 0.25, 0.25]]'], {}), '([[0, 0.25, 0.25]])\n', (32603, 32622), True, 'import numpy as np\n'), ((32871, 32935), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (["terms1['E'][0][1]", 'desired'], {}), "(terms1['E'][0][1], desired)\n", (32907, 32935), True, 'import numpy as np\n'), ((33193, 33257), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (["terms2['E'][0][1]", 'desired'], {}), "(terms2['E'][0][1], desired)\n", (33229, 33257), True, 'import numpy as np\n'), ((33277, 33302), 'numpy.array', 'np.array', (['[[1], [0], [0]]'], {}), '([[1], [0], [0]])\n', (33285, 33302), True, 'import numpy as np\n'), ((33555, 33619), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (["terms1['E'][0][1]", 'desired'], {}), "(terms1['E'][0][1], desired)\n", (33591, 33619), True, 'import numpy as np\n'), ((33881, 33945), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (["terms2['E'][0][1]", 'desired'], {}), "(terms2['E'][0][1], desired)\n", (33917, 33945), True, 'import numpy as np\n'), ((35989, 36016), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""baseN22"""'], {}), "('baseN22')\n", (36005, 36016), True, 'import pyinduct as pi\n'), ((36030, 36056), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""baseN22"""'], {}), "('baseN22')\n", (36045, 36056), True, 'import pyinduct as pi\n'), ((36267, 36296), 'pyinduct.parse_weak_formulation', 'pi.parse_weak_formulation', (['wf'], {}), '(wf)\n', (36292, 36296), True, 'import pyinduct as pi\n'), ((36468, 36508), 'scipy.linalg.block_diag', 'block_diag', (['scal_prod1', 'scal_prod1', '(1)', '(1)'], {}), '(scal_prod1, scal_prod1, 1, 1)\n', (36478, 36508), False, 'from scipy.linalg import block_diag\n'), ((36621, 36734), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['scal_prod_mat[:N, :N]', "cf.dynamic_forms['baseN22'].matrices['E'][0][1]"], {}), "(scal_prod_mat[:N, :N], cf.\n dynamic_forms['baseN22'].matrices['E'][0][1])\n", (36657, 36734), True, 'import numpy as np\n'), ((36948, 37056), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['prod_mat[:N, :N]', "cf.dynamic_forms['baseN22'].matrices['E'][1][1]"], {}), "(prod_mat[:N, :N], cf.dynamic_forms[\n 'baseN22'].matrices['E'][1][1])\n", (36984, 37056), True, 'import numpy as np\n'), ((37094, 37123), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""baseN22"""'], {}), "('baseN22')\n", (37112, 37123), True, 'import pyinduct as pi\n'), ((37621, 37657), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""heavyside_base"""'], {}), "('heavyside_base')\n", (37639, 37657), True, 'import pyinduct as pi\n'), ((37666, 37704), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""distributed_base"""'], {}), "('distributed_base')\n", (37684, 37704), True, 'import pyinduct as pi\n'), ((37713, 37748), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""composed_base"""'], {}), "('composed_base')\n", (37731, 37748), True, 'import pyinduct as pi\n'), ((37757, 37790), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""lumped_base"""'], {}), "('lumped_base')\n", (37775, 37790), True, 'import pyinduct as pi\n'), ((37921, 37946), 'pyinduct.Domain', 'pi.Domain', (['(0, 1)'], {'num': '(10)'}), '((0, 1), num=10)\n', (37930, 37946), True, 'import pyinduct as pi\n'), ((37990, 38021), 'pyinduct.Domain', 'pi.Domain', (['(0, 1)'], {'num': 'node_cnt'}), '((0, 1), num=node_cnt)\n', (37999, 38021), True, 'import pyinduct as pi\n'), ((38041, 38089), 'pyinduct.LagrangeFirstOrder.cure_interval', 'pi.LagrangeFirstOrder.cure_interval', (['spat_domain'], {}), '(spat_domain)\n', (38076, 38089), True, 'import pyinduct as pi\n'), ((38098, 38136), 'pyinduct.register_base', 'pi.register_base', (['"""swm_base"""', 'lag_base'], {}), "('swm_base', lag_base)\n", (38114, 38136), True, 'import pyinduct as pi\n'), ((38295, 38323), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""swm_base"""'], {}), "('swm_base')\n", (38311, 38323), True, 'import pyinduct as pi\n'), ((38448, 38475), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""swm_base"""'], {}), "('swm_base')\n", (38463, 38475), True, 'import pyinduct as pi\n'), ((38936, 38989), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['[int1, s1, int2, s2]'], {'name': '"""swm"""'}), "([int1, s1, int2, s2], name='swm')\n", (38955, 38989), True, 'import pyinduct.simulation as sim\n'), ((39008, 39045), 'pyinduct.simulation.parse_weak_formulation', 'sim.parse_weak_formulation', (['string_eq'], {}), '(string_eq)\n', (39034, 39045), True, 'import pyinduct.simulation as sim\n'), ((39064, 39078), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (39072, 39078), True, 'import numpy as np\n'), ((39137, 39168), 'pyinduct.simulation.create_state_space', 'sim.create_state_space', (['self.ce'], {}), '(self.ce)\n', (39159, 39168), True, 'import pyinduct.simulation as sim\n'), ((40030, 40061), 'pyinduct.simulation.create_state_space', 'sim.create_state_space', (['self.ce'], {}), '(self.ce)\n', (40052, 40061), True, 'import pyinduct.simulation as sim\n'), ((40077, 40132), 'pyinduct.simulation.simulate_state_space', 'sim.simulate_state_space', (['ss', 'self.ic', 'self.time_domain'], {}), '(ss, self.ic, self.time_domain)\n', (40101, 40132), True, 'import pyinduct.simulation as sim\n'), ((40312, 40383), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['t.points', 'self.time_domain.points'], {}), '(t.points, self.time_domain.points)\n', (40348, 40383), True, 'import numpy as np\n'), ((40417, 40447), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""swm_base"""'], {}), "('swm_base')\n", (40435, 40447), True, 'import pyinduct as pi\n'), ((40692, 40720), 'copy.copy', 'copy.copy', (['self.example_data'], {}), '(self.example_data)\n', (40701, 40720), False, 'import copy\n'), ((40856, 40889), 'pyinduct.Domain', 'pi.Domain', ([], {'bounds': 'z_bounds', 'num': '(9)'}), '(bounds=z_bounds, num=9)\n', (40865, 40889), True, 'import pyinduct as pi\n'), ((40970, 41017), 'pyinduct.Domain', 'pi.Domain', ([], {'bounds': '(t_start, t_end)', 'step': 't_step'}), '(bounds=(t_start, t_end), step=t_step)\n', (40979, 41017), True, 'import pyinduct as pi\n'), ((41254, 41318), 'pyinduct.hyperbolic.feedforward.FlatString', 'hff.FlatString', (['(0)', 'self.y_end', 'z_start', 'z_end', '(0)', '(5)', 'self.params'], {}), '(0, self.y_end, z_start, z_end, 0, 5, self.params)\n', (41268, 41318), True, 'import pyinduct.hyperbolic.feedforward as hff\n'), ((41975, 42008), 'pyinduct.Domain', 'pi.Domain', (['self.dz.bounds'], {'num': '(11)'}), '(self.dz.bounds, num=11)\n', (41984, 42008), True, 'import pyinduct as pi\n'), ((42028, 42071), 'pyinduct.LagrangeSecondOrder.cure_interval', 'pi.LagrangeSecondOrder.cure_interval', (['nodes'], {}), '(nodes)\n', (42064, 42071), True, 'import pyinduct as pi\n'), ((42080, 42118), 'pyinduct.register_base', 'pi.register_base', (['"""fem_base"""', 'fem_base'], {}), "('fem_base', fem_base)\n", (42096, 42118), True, 'import pyinduct as pi\n'), ((42140, 42168), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""fem_base"""'], {}), "('fem_base')\n", (42156, 42168), True, 'import pyinduct as pi\n'), ((42293, 42320), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""fem_base"""'], {}), "('fem_base')\n", (42308, 42320), True, 'import pyinduct as pi\n'), ((43021, 43079), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['[int1, s1, int2, s2]'], {'name': '"""fem_test"""'}), "([int1, s1, int2, s2], name='fem_test')\n", (43040, 43079), True, 'import pyinduct.simulation as sim\n'), ((43098, 43136), 'pyinduct.simulation.parse_weak_formulation', 'sim.parse_weak_formulation', (['string_pde'], {}), '(string_pde)\n', (43124, 43136), True, 'import pyinduct.simulation as sim\n'), ((43150, 43181), 'pyinduct.simulation.create_state_space', 'sim.create_state_space', (['self.cf'], {}), '(self.cf)\n', (43172, 43181), True, 'import pyinduct.simulation as sim\n'), ((43389, 43430), 'pyinduct.simulation.simulate_state_space', 'sim.simulate_state_space', (['ss', 'q0', 'self.dt'], {}), '(ss, q0, self.dt)\n', (43413, 43430), True, 'import pyinduct.simulation as sim\n'), ((43891, 43921), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""fem_base"""'], {}), "('fem_base')\n", (43909, 43921), True, 'import pyinduct as pi\n'), ((46703, 46723), 'pyinduct.Base', 'pi.Base', (['eig_vectors'], {}), '(eig_vectors)\n', (46710, 46723), True, 'import pyinduct as pi\n'), ((46779, 46817), 'pyinduct.normalize_base', 'pi.normalize_base', (['composed_modal_base'], {}), '(composed_modal_base)\n', (46796, 46817), True, 'import pyinduct as pi\n'), ((46946, 47012), 'pyinduct.register_base', 'pi.register_base', (['"""norm_modal_base"""', 'norm_mod_base'], {'overwrite': '(True)'}), "('norm_modal_base', norm_mod_base, overwrite=True)\n", (46962, 47012), True, 'import pyinduct as pi\n'), ((49199, 49247), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['terms'], {'name': '"""swm_lib-modal"""'}), "(terms, name='swm_lib-modal')\n", (49218, 49247), True, 'import pyinduct.simulation as sim\n'), ((49288, 49376), 'pyinduct.simulation.simulate_system', 'sim.simulate_system', (['modal_pde', 'self.ic', 'self.dt', 'self.dz'], {'derivative_orders': '(1, 0)'}), '(modal_pde, self.ic, self.dt, self.dz, derivative_orders\n =(1, 0))\n', (49307, 49376), True, 'import pyinduct.simulation as sim\n'), ((49606, 49643), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""norm_modal_base"""'], {}), "('norm_modal_base')\n", (49624, 49643), True, 'import pyinduct as pi\n'), ((50366, 50432), 'numpy.array', 'np.array', (['[[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 1, 0]]'], {}), '([[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 1, 0]])\n', (50374, 50432), True, 'import numpy as np\n'), ((50543, 50573), 'numpy.array', 'np.array', (['[[0], [0], [1], [1]]'], {}), '([[0], [0], [1], [1]])\n', (50551, 50573), True, 'import numpy as np\n'), ((50598, 50630), 'pyinduct.Domain', 'pi.Domain', ([], {'bounds': '(-1, 1)', 'num': '(2)'}), '(bounds=(-1, 1), num=2)\n', (50607, 50630), True, 'import pyinduct as pi\n'), ((51654, 51696), 'pyinduct.create_state_space', 'pi.create_state_space', (['canonical_equations'], {}), '(canonical_equations)\n', (51675, 51696), True, 'import pyinduct as pi\n'), ((51706, 51776), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['state_space_form.A[1]', 'a_desired'], {}), '(state_space_form.A[1], a_desired)\n', (51742, 51776), True, 'import numpy as np\n'), ((51785, 51858), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['state_space_form.B[0][1]', 'b_desired'], {}), '(state_space_form.B[0][1], b_desired)\n', (51821, 51858), True, 'import numpy as np\n'), ((52124, 52158), 'pyinduct.Domain', 'pi.Domain', ([], {'bounds': '(0, l1)', 'num': '(100)'}), '(bounds=(0, l1), num=100)\n', (52133, 52158), True, 'import pyinduct as pi\n'), ((52178, 52213), 'pyinduct.Domain', 'pi.Domain', ([], {'bounds': '(l1, l2)', 'num': '(100)'}), '(bounds=(l1, l2), num=100)\n', (52187, 52213), True, 'import pyinduct as pi\n'), ((52233, 52268), 'pyinduct.Domain', 'pi.Domain', ([], {'bounds': '(l2, l3)', 'num': '(100)'}), '(bounds=(l2, l3), num=100)\n', (52242, 52268), True, 'import pyinduct as pi\n'), ((52288, 52323), 'pyinduct.Domain', 'pi.Domain', ([], {'bounds': '(l3, l4)', 'num': '(100)'}), '(bounds=(l3, l4), num=100)\n', (52297, 52323), True, 'import pyinduct as pi\n'), ((52404, 52451), 'pyinduct.Domain', 'pi.Domain', ([], {'bounds': '(t_start, t_end)', 'step': 't_step'}), '(bounds=(t_start, t_end), step=t_step)\n', (52413, 52451), True, 'import pyinduct as pi\n'), ((53257, 53290), 'pyinduct.Domain', 'pi.Domain', (['self.dz1.bounds'], {'num': '(3)'}), '(self.dz1.bounds, num=3)\n', (53266, 53290), True, 'import pyinduct as pi\n'), ((53308, 53341), 'pyinduct.Domain', 'pi.Domain', (['self.dz2.bounds'], {'num': '(3)'}), '(self.dz2.bounds, num=3)\n', (53317, 53341), True, 'import pyinduct as pi\n'), ((53359, 53392), 'pyinduct.Domain', 'pi.Domain', (['self.dz3.bounds'], {'num': '(3)'}), '(self.dz3.bounds, num=3)\n', (53368, 53392), True, 'import pyinduct as pi\n'), ((53410, 53444), 'pyinduct.Domain', 'pi.Domain', (['self.dz4.bounds'], {'num': '(15)'}), '(self.dz4.bounds, num=15)\n', (53419, 53444), True, 'import pyinduct as pi\n'), ((53462, 53505), 'pyinduct.LagrangeFirstOrder.cure_interval', 'pi.LagrangeFirstOrder.cure_interval', (['nodes1'], {}), '(nodes1)\n', (53497, 53505), True, 'import pyinduct as pi\n'), ((53522, 53565), 'pyinduct.LagrangeFirstOrder.cure_interval', 'pi.LagrangeFirstOrder.cure_interval', (['nodes2'], {}), '(nodes2)\n', (53557, 53565), True, 'import pyinduct as pi\n'), ((53582, 53625), 'pyinduct.LagrangeFirstOrder.cure_interval', 'pi.LagrangeFirstOrder.cure_interval', (['nodes3'], {}), '(nodes3)\n', (53617, 53625), True, 'import pyinduct as pi\n'), ((53642, 53685), 'pyinduct.LagrangeFirstOrder.cure_interval', 'pi.LagrangeFirstOrder.cure_interval', (['nodes4'], {}), '(nodes4)\n', (53677, 53685), True, 'import pyinduct as pi\n'), ((53695, 53728), 'pyinduct.register_base', 'pi.register_base', (['"""base_1"""', 'base1'], {}), "('base_1', base1)\n", (53711, 53728), True, 'import pyinduct as pi\n'), ((53737, 53770), 'pyinduct.register_base', 'pi.register_base', (['"""base_2"""', 'base2'], {}), "('base_2', base2)\n", (53753, 53770), True, 'import pyinduct as pi\n'), ((53779, 53812), 'pyinduct.register_base', 'pi.register_base', (['"""base_3"""', 'base3'], {}), "('base_3', base3)\n", (53795, 53812), True, 'import pyinduct as pi\n'), ((53821, 53854), 'pyinduct.register_base', 'pi.register_base', (['"""base_4"""', 'base4'], {}), "('base_4', base4)\n", (53837, 53854), True, 'import pyinduct as pi\n'), ((53902, 53916), 'pyinduct.Input', 'pi.Input', (['traj'], {}), '(traj)\n', (53910, 53916), True, 'import pyinduct as pi\n'), ((53931, 53957), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""base_1"""'], {}), "('base_1')\n", (53947, 53957), True, 'import pyinduct as pi\n'), ((53974, 53999), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_1"""'], {}), "('base_1')\n", (53989, 53999), True, 'import pyinduct as pi\n'), ((54412, 54438), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""base_2"""'], {}), "('base_2')\n", (54428, 54438), True, 'import pyinduct as pi\n'), ((54455, 54480), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_2"""'], {}), "('base_2')\n", (54470, 54480), True, 'import pyinduct as pi\n'), ((54899, 54925), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""base_3"""'], {}), "('base_3')\n", (54915, 54925), True, 'import pyinduct as pi\n'), ((54942, 54967), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_3"""'], {}), "('base_3')\n", (54957, 54967), True, 'import pyinduct as pi\n'), ((55386, 55412), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""base_4"""'], {}), "('base_4')\n", (55402, 55412), True, 'import pyinduct as pi\n'), ((55429, 55454), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_4"""'], {}), "('base_4')\n", (55444, 55454), True, 'import pyinduct as pi\n'), ((55965, 56030), 'pyinduct.simulate_system', 'pi.simulate_system', (['self.weak_form_1', 'self.ic1', 'self.dt', 'self.dz1'], {}), '(self.weak_form_1, self.ic1, self.dt, self.dz1)\n', (55983, 56030), True, 'import pyinduct as pi\n'), ((56556, 56628), 'pyinduct.simulate_systems', 'pi.simulate_systems', (['weak_forms', 'ics', 'self.dt', 'spat_domains', 'derivatives'], {}), '(weak_forms, ics, self.dt, spat_domains, derivatives)\n', (56575, 56628), True, 'import pyinduct as pi\n'), ((57387, 57459), 'pyinduct.simulate_systems', 'pi.simulate_systems', (['weak_forms', 'ics', 'self.dt', 'spat_domains', 'derivatives'], {}), '(weak_forms, ics, self.dt, spat_domains, derivatives)\n', (57406, 57459), True, 'import pyinduct as pi\n'), ((58493, 58565), 'pyinduct.simulate_systems', 'pi.simulate_systems', (['weak_forms', 'ics', 'self.dt', 'spat_domains', 'derivatives'], {}), '(weak_forms, ics, self.dt, spat_domains, derivatives)\n', (58512, 58565), True, 'import pyinduct as pi\n'), ((58700, 58728), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""base_1"""'], {}), "('base_1')\n", (58718, 58728), True, 'import pyinduct as pi\n'), ((58737, 58765), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""base_2"""'], {}), "('base_2')\n", (58755, 58765), True, 'import pyinduct as pi\n'), ((58774, 58802), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""base_3"""'], {}), "('base_3')\n", (58792, 58802), True, 'import pyinduct as pi\n'), ((58811, 58839), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""base_4"""'], {}), "('base_4')\n", (58829, 58839), True, 'import pyinduct as pi\n'), ((59301, 59348), 'pyinduct.Domain', 'pi.Domain', ([], {'bounds': '(0, self.l)', 'num': 'spatial_disc'}), '(bounds=(0, self.l), num=spatial_disc)\n', (59310, 59348), True, 'import pyinduct as pi\n'), ((59415, 59463), 'pyinduct.Domain', 'pi.Domain', ([], {'bounds': '(0, self.T)', 'num': 'temporal_disc'}), '(bounds=(0, self.T), num=temporal_disc)\n', (59424, 59463), True, 'import pyinduct as pi\n'), ((59520, 59563), 'pyinduct.Domain', 'pi.Domain', (['self.dz.bounds'], {'num': 'spatial_disc'}), '(self.dz.bounds, num=spatial_disc)\n', (59529, 59563), True, 'import pyinduct as pi\n'), ((59586, 59635), 'pyinduct.LagrangeFirstOrder.cure_interval', 'pi.LagrangeFirstOrder.cure_interval', (['self.nodes_1'], {}), '(self.nodes_1)\n', (59621, 59635), True, 'import pyinduct as pi\n'), ((59644, 59683), 'pyinduct.register_base', 'pi.register_base', (['"""base_1"""', 'self.base_1'], {}), "('base_1', self.base_1)\n", (59660, 59683), True, 'import pyinduct as pi\n'), ((59708, 59751), 'pyinduct.Domain', 'pi.Domain', (['self.dz.bounds'], {'num': 'spatial_disc'}), '(self.dz.bounds, num=spatial_disc)\n', (59717, 59751), True, 'import pyinduct as pi\n'), ((59774, 59824), 'pyinduct.LagrangeSecondOrder.cure_interval', 'pi.LagrangeSecondOrder.cure_interval', (['self.nodes_1'], {}), '(self.nodes_1)\n', (59810, 59824), True, 'import pyinduct as pi\n'), ((59833, 59872), 'pyinduct.register_base', 'pi.register_base', (['"""base_2"""', 'self.base_2'], {}), "('base_2', self.base_2)\n", (59849, 59872), True, 'import pyinduct as pi\n'), ((60099, 60188), 'pyinduct.parabolic.RadFeedForward', 'parabolic.RadFeedForward', (['self.l', 'self.T', 'self.param', 'bound_cond_type', 'actuation_type'], {}), '(self.l, self.T, self.param, bound_cond_type,\n actuation_type)\n', (60123, 60188), True, 'import pyinduct.parabolic as parabolic\n'), ((60351, 60450), 'pyinduct.parabolic.get_parabolic_dirichlet_weak_form', 'parabolic.get_parabolic_dirichlet_weak_form', (['"""base_2"""', '"""base_2"""', 'u', 'self.param', 'self.dz.bounds'], {}), "('base_2', 'base_2', u, self.\n param, self.dz.bounds)\n", (60394, 60450), True, 'import pyinduct.parabolic as parabolic\n'), ((60707, 60742), 'pyinduct.simulation.parse_weak_formulation', 'sim.parse_weak_formulation', (['rad_pde'], {}), '(rad_pde)\n', (60733, 60742), True, 'import pyinduct.simulation as sim\n'), ((60756, 60782), 'pyinduct.simulation.create_state_space', 'sim.create_state_space', (['ce'], {}), '(ce)\n', (60778, 60782), True, 'import pyinduct.simulation as sim\n'), ((60989, 61054), 'pyinduct.simulation.evaluate_approximation', 'sim.evaluate_approximation', (['"""base_1"""', 'q', 't', 'self.dz'], {'spat_order': '(1)'}), "('base_1', q, t, self.dz, spat_order=1)\n", (61015, 61054), True, 'import pyinduct.simulation as sim\n'), ((61694, 61783), 'pyinduct.parabolic.RadFeedForward', 'parabolic.RadFeedForward', (['self.l', 'self.T', 'self.param', 'bound_cond_type', 'actuation_type'], {}), '(self.l, self.T, self.param, bound_cond_type,\n actuation_type)\n', (61718, 61783), True, 'import pyinduct.parabolic as parabolic\n'), ((63446, 63523), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['[int1, int2, int3, int4, s1, s2, s3, s4]'], {'name': '"""rad_pde"""'}), "([int1, int2, int3, int4, s1, s2, s3, s4], name='rad_pde')\n", (63465, 63523), True, 'import pyinduct.simulation as sim\n'), ((63537, 63572), 'pyinduct.simulation.parse_weak_formulation', 'sim.parse_weak_formulation', (['rad_pde'], {}), '(rad_pde)\n', (63563, 63572), True, 'import pyinduct.simulation as sim\n'), ((63586, 63612), 'pyinduct.simulation.create_state_space', 'sim.create_state_space', (['ce'], {}), '(ce)\n', (63608, 63612), True, 'import pyinduct.simulation as sim\n'), ((63958, 64047), 'pyinduct.parabolic.RadFeedForward', 'parabolic.RadFeedForward', (['self.l', 'self.T', 'self.param', 'bound_cond_type', 'actuation_type'], {}), '(self.l, self.T, self.param, bound_cond_type,\n actuation_type)\n', (63982, 64047), True, 'import pyinduct.parabolic as parabolic\n'), ((65876, 65948), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['[int1, int2, int3, int4, s1, s2, s3, s4]', '"""rad_pde"""'], {}), "([int1, int2, int3, int4, s1, s2, s3, s4], 'rad_pde')\n", (65895, 65948), True, 'import pyinduct.simulation as sim\n'), ((65999, 66034), 'pyinduct.simulation.parse_weak_formulation', 'sim.parse_weak_formulation', (['rad_pde'], {}), '(rad_pde)\n', (66025, 66034), True, 'import pyinduct.simulation as sim\n'), ((66048, 66074), 'pyinduct.simulation.create_state_space', 'sim.create_state_space', (['ce'], {}), '(ce)\n', (66070, 66074), True, 'import pyinduct.simulation as sim\n'), ((66484, 66573), 'pyinduct.parabolic.RadFeedForward', 'parabolic.RadFeedForward', (['self.l', 'self.T', 'self.param', 'bound_cond_type', 'actuation_type'], {}), '(self.l, self.T, self.param, bound_cond_type,\n actuation_type)\n', (66508, 66573), True, 'import pyinduct.parabolic as parabolic\n'), ((66639, 66733), 'pyinduct.parabolic.get_parabolic_robin_weak_form', 'parabolic.get_parabolic_robin_weak_form', (['"""base_1"""', '"""base_1"""', 'u', 'self.param', 'self.dz.bounds'], {}), "('base_1', 'base_1', u, self.param,\n self.dz.bounds)\n", (66678, 66733), True, 'import pyinduct.parabolic as parabolic\n'), ((66743, 66778), 'pyinduct.simulation.parse_weak_formulation', 'sim.parse_weak_formulation', (['rad_pde'], {}), '(rad_pde)\n', (66769, 66778), True, 'import pyinduct.simulation as sim\n'), ((66792, 66818), 'pyinduct.simulation.create_state_space', 'sim.create_state_space', (['ce'], {}), '(ce)\n', (66814, 66818), True, 'import pyinduct.simulation as sim\n'), ((67350, 67374), 'pyinduct.ConstantTrajectory', 'pi.ConstantTrajectory', (['(1)'], {}), '(1)\n', (67371, 67374), True, 'import pyinduct as pi\n'), ((67444, 67542), 'pyinduct.parabolic.get_parabolic_robin_weak_form', 'pi.parabolic.get_parabolic_robin_weak_form', (['"""base_1"""', '"""base_1"""', 'u', 'self.param', 'self.dz.bounds'], {}), "('base_1', 'base_1', u, self.\n param, self.dz.bounds)\n", (67486, 67542), True, 'import pyinduct as pi\n'), ((67551, 67586), 'pyinduct.simulation.parse_weak_formulation', 'sim.parse_weak_formulation', (['rad_pde'], {}), '(rad_pde)\n', (67577, 67586), True, 'import pyinduct.simulation as sim\n'), ((67600, 67626), 'pyinduct.simulation.create_state_space', 'sim.create_state_space', (['ce'], {}), '(ce)\n', (67622, 67626), True, 'import pyinduct.simulation as sim\n'), ((67915, 67943), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""base_1"""'], {}), "('base_1')\n", (67933, 67943), True, 'import pyinduct as pi\n'), ((67952, 67980), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""base_2"""'], {}), "('base_2')\n", (67970, 67980), True, 'import pyinduct as pi\n'), ((68237, 68291), 'pyinduct.SecondOrderEigenfunction.get_adjoint_problem', 'pi.SecondOrderEigenfunction.get_adjoint_problem', (['param'], {}), '(param)\n', (68284, 68291), True, 'import pyinduct as pi\n'), ((68380, 68422), 'pyinduct.Domain', 'pi.Domain', ([], {'bounds': '(0, l)', 'num': 'spatial_disc'}), '(bounds=(0, l), num=spatial_disc)\n', (68389, 68422), True, 'import pyinduct as pi\n'), ((68483, 68530), 'pyinduct.Domain', 'pi.Domain', ([], {'bounds': '(0, t_end)', 'num': 'temporal_disc'}), '(bounds=(0, t_end), num=temporal_disc)\n', (68492, 68530), True, 'import pyinduct as pi\n'), ((68572, 68685), 'pyinduct.SecondOrderDirichletEigenfunction.eigfreq_eigval_hint', 'pi.SecondOrderDirichletEigenfunction.eigfreq_eigval_hint', ([], {'param': 'param', 'l': 'dz.bounds[-1]', 'n_roots': 'spatial_disc'}), '(param=param, l=dz.\n bounds[-1], n_roots=spatial_disc)\n', (68628, 68685), True, 'import pyinduct as pi\n'), ((69143, 69181), 'pyinduct.register_base', 'pi.register_base', (['"""eig_base"""', 'eig_base'], {}), "('eig_base', eig_base)\n", (69159, 69181), True, 'import pyinduct as pi\n'), ((69524, 69578), 'pyinduct.register_base', 'pi.register_base', (['"""adjoint_eig_base"""', 'adjoint_eig_base'], {}), "('adjoint_eig_base', adjoint_eig_base)\n", (69540, 69578), True, 'import pyinduct as pi\n'), ((69661, 69702), 'pyinduct.Function', 'pi.Function', (['(lambda z: 0.0)'], {'domain': '(0, l)'}), '(lambda z: 0.0, domain=(0, l))\n', (69672, 69702), True, 'import pyinduct as pi\n'), ((69728, 69777), 'pyinduct.project_on_base', 'pi.project_on_base', (['start_state', 'adjoint_eig_base'], {}), '(start_state, adjoint_eig_base)\n', (69746, 69777), True, 'import pyinduct as pi\n'), ((69817, 69891), 'pyinduct.parabolic.RadFeedForward', 'parabolic.RadFeedForward', (['l', 't_end', 'param', 'bound_cond_type', 'actuation_type'], {}), '(l, t_end, param, bound_cond_type, actuation_type)\n', (69841, 69891), True, 'import pyinduct.parabolic as parabolic\n'), ((70181, 70281), 'pyinduct.parabolic.get_parabolic_dirichlet_weak_form', 'parabolic.get_parabolic_dirichlet_weak_form', (['"""eig_base"""', '"""adjoint_eig_base"""', 'u', 'param', 'dz.bounds'], {}), "('eig_base', 'adjoint_eig_base',\n u, param, dz.bounds)\n", (70224, 70281), True, 'import pyinduct.parabolic as parabolic\n'), ((70403, 70438), 'pyinduct.simulation.parse_weak_formulation', 'sim.parse_weak_formulation', (['rad_pde'], {}), '(rad_pde)\n', (70429, 70438), True, 'import pyinduct.simulation as sim\n'), ((70457, 70483), 'pyinduct.simulation.create_state_space', 'sim.create_state_space', (['ce'], {}), '(ce)\n', (70479, 70483), True, 'import pyinduct.simulation as sim\n'), ((70567, 70586), 'numpy.diag', 'np.diag', (['eig_values'], {}), '(eig_values)\n', (70574, 70586), True, 'import numpy as np\n'), ((70725, 70769), 'pyinduct.simulation.StateSpace', 'sim.StateSpace', (['a_mat', 'b_mat'], {'input_handle': 'u'}), '(a_mat, b_mat, input_handle=u)\n', (70739, 70769), True, 'import pyinduct.simulation as sim\n'), ((70994, 71065), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['ss_weak.B[0][1]', 'ss_modal.B[0][1]'], {}), '(ss_weak.B[0][1], ss_modal.B[0][1])\n', (71030, 71065), True, 'import numpy as np\n'), ((71598, 71628), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""eig_base"""'], {}), "('eig_base')\n", (71616, 71628), True, 'import pyinduct as pi\n'), ((71637, 71675), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""adjoint_eig_base"""'], {}), "('adjoint_eig_base')\n", (71655, 71675), True, 'import pyinduct as pi\n'), ((71918, 71972), 'pyinduct.SecondOrderEigenfunction.get_adjoint_problem', 'pi.SecondOrderEigenfunction.get_adjoint_problem', (['param'], {}), '(param)\n', (71965, 71972), True, 'import pyinduct as pi\n'), ((72068, 72110), 'pyinduct.Domain', 'pi.Domain', ([], {'bounds': '(0, l)', 'num': 'spatial_disc'}), '(bounds=(0, l), num=spatial_disc)\n', (72077, 72110), True, 'import pyinduct as pi\n'), ((72171, 72218), 'pyinduct.Domain', 'pi.Domain', ([], {'bounds': '(0, t_end)', 'num': 'temporal_disc'}), '(bounds=(0, t_end), num=temporal_disc)\n', (72180, 72218), True, 'import pyinduct as pi\n'), ((72263, 72320), 'pyinduct.parabolic.compute_rad_robin_eigenfrequencies', 'parabolic.compute_rad_robin_eigenfrequencies', (['param', 'l', 'n'], {}), '(param, l, n)\n', (72307, 72320), True, 'import pyinduct.parabolic as parabolic\n'), ((72863, 72918), 'pyinduct.normalize_base', 'pi.normalize_base', (['init_eig_base', 'init_adjoint_eig_base'], {}), '(init_eig_base, init_adjoint_eig_base)\n', (72880, 72918), True, 'import pyinduct as pi\n'), ((73008, 73046), 'pyinduct.register_base', 'pi.register_base', (['"""eig_base"""', 'eig_base'], {}), "('eig_base', eig_base)\n", (73024, 73046), True, 'import pyinduct as pi\n'), ((73055, 73109), 'pyinduct.register_base', 'pi.register_base', (['"""adjoint_eig_base"""', 'adjoint_eig_base'], {}), "('adjoint_eig_base', adjoint_eig_base)\n", (73071, 73109), True, 'import pyinduct as pi\n'), ((73192, 73233), 'pyinduct.Function', 'pi.Function', (['(lambda z: 0.0)'], {'domain': '(0, l)'}), '(lambda z: 0.0, domain=(0, l))\n', (73203, 73233), True, 'import pyinduct as pi\n'), ((73259, 73308), 'pyinduct.project_on_base', 'pi.project_on_base', (['start_state', 'adjoint_eig_base'], {}), '(start_state, adjoint_eig_base)\n', (73277, 73308), True, 'import pyinduct as pi\n'), ((73348, 73422), 'pyinduct.parabolic.RadFeedForward', 'parabolic.RadFeedForward', (['l', 't_end', 'param', 'bound_cond_type', 'actuation_type'], {}), '(l, t_end, param, bound_cond_type, actuation_type)\n', (73372, 73422), True, 'import pyinduct.parabolic as parabolic\n'), ((73519, 73615), 'pyinduct.parabolic.get_parabolic_robin_weak_form', 'parabolic.get_parabolic_robin_weak_form', (['"""eig_base"""', '"""adjoint_eig_base"""', 'u', 'param', 'dz.bounds'], {}), "('eig_base', 'adjoint_eig_base', u,\n param, dz.bounds)\n", (73558, 73615), True, 'import pyinduct.parabolic as parabolic\n'), ((73625, 73660), 'pyinduct.simulation.parse_weak_formulation', 'sim.parse_weak_formulation', (['rad_pde'], {}), '(rad_pde)\n', (73651, 73660), True, 'import pyinduct.simulation as sim\n'), ((73679, 73705), 'pyinduct.simulation.create_state_space', 'sim.create_state_space', (['ce'], {}), '(ce)\n', (73701, 73705), True, 'import pyinduct.simulation as sim\n'), ((73928, 73972), 'pyinduct.simulation.StateSpace', 'sim.StateSpace', (['a_mat', 'b_mat'], {'input_handle': 'u'}), '(a_mat, b_mat, input_handle=u)\n', (73942, 73972), True, 'import pyinduct.simulation as sim\n'), ((74228, 74299), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['ss_weak.B[0][1]', 'ss_modal.B[0][1]'], {}), '(ss_weak.B[0][1], ss_modal.B[0][1])\n', (74264, 74299), True, 'import numpy as np\n'), ((74320, 74375), 'pyinduct.simulation.simulate_state_space', 'sim.simulate_state_space', (['ss_modal', 'initial_weights', 'dt'], {}), '(ss_modal, initial_weights, dt)\n', (74344, 74375), True, 'import pyinduct.simulation as sim\n'), ((74393, 74459), 'pyinduct.simulation.evaluate_approximation', 'sim.evaluate_approximation', (['"""eig_base"""', 'q', 't_end', 'dz'], {'spat_order': '(1)'}), "('eig_base', q, t_end, dz, spat_order=1)\n", (74419, 74459), True, 'import pyinduct.simulation as sim\n'), ((74703, 74738), 'pyinduct.deregister_base', 'pi.deregister_base', (['extra_labels[0]'], {}), '(extra_labels[0])\n', (74721, 74738), True, 'import pyinduct as pi\n'), ((74747, 74782), 'pyinduct.deregister_base', 'pi.deregister_base', (['extra_labels[1]'], {}), '(extra_labels[1])\n', (74765, 74782), True, 'import pyinduct as pi\n'), ((74791, 74821), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""eig_base"""'], {}), "('eig_base')\n", (74809, 74821), True, 'import pyinduct as pi\n'), ((74830, 74868), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""adjoint_eig_base"""'], {}), "('adjoint_eig_base')\n", (74848, 74868), True, 'import pyinduct as pi\n'), ((75025, 75064), 'pyinduct.Domain', 'pi.Domain', (['(0, 10)'], {'step': 'self.time_step'}), '((0, 10), step=self.time_step)\n', (75034, 75064), True, 'import pyinduct as pi\n'), ((75089, 75114), 'pyinduct.Domain', 'pi.Domain', (['(0, 1)'], {'num': '(50)'}), '((0, 1), num=50)\n', (75098, 75114), True, 'import pyinduct as pi\n'), ((75170, 75220), 'pyinduct.Domain', 'pi.Domain', (['self.spat_dom.bounds'], {'num': 'self.node_cnt'}), '(self.spat_dom.bounds, num=self.node_cnt)\n', (75179, 75220), True, 'import pyinduct as pi\n'), ((75245, 75293), 'pyinduct.LagrangeSecondOrder.cure_interval', 'pi.LagrangeSecondOrder.cure_interval', (['self.nodes'], {}), '(self.nodes)\n', (75281, 75293), True, 'import pyinduct as pi\n'), ((75575, 75617), 'pyinduct.register_base', 'pi.register_base', (['"""fe_base"""', 'self.fe_funcs'], {}), "('fe_base', self.fe_funcs)\n", (75591, 75617), True, 'import pyinduct as pi\n'), ((75638, 75724), 'pyinduct.simulation.evaluate_approximation', 'sim.evaluate_approximation', (['"""fe_base"""', 'self.weights', 'self.dates', 'self.spat_dom', '(1)'], {}), "('fe_base', self.weights, self.dates, self.\n spat_dom, 1)\n", (75664, 75724), True, 'import pyinduct.simulation as sim\n'), ((75916, 75945), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""fe_base"""'], {}), "('fe_base')\n", (75934, 75945), True, 'import pyinduct as pi\n'), ((76205, 76245), 'pyinduct.register_base', 'pi.register_base', (['"""fe_comp_base"""', 'c_base'], {}), "('fe_comp_base', c_base)\n", (76221, 76245), True, 'import pyinduct as pi\n'), ((76259, 76350), 'pyinduct.simulation.evaluate_approximation', 'sim.evaluate_approximation', (['"""fe_comp_base"""', 'self.weights', 'self.dates', 'self.spat_dom', '(0)'], {}), "('fe_comp_base', self.weights, self.dates, self.\n spat_dom, 0)\n", (76285, 76350), True, 'import pyinduct.simulation as sim\n'), ((76514, 76548), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""fe_comp_base"""'], {}), "('fe_comp_base')\n", (76532, 76548), True, 'import pyinduct as pi\n'), ((76944, 76982), 'pyinduct.Domain', 'pi.Domain', ([], {'bounds': 'self.limits', 'num': '(100)'}), '(bounds=self.limits, num=100)\n', (76953, 76982), True, 'import pyinduct as pi\n'), ((76999, 77030), 'pyinduct.Domain', 'pi.Domain', (['domain.bounds'], {'num': '(3)'}), '(domain.bounds, num=3)\n', (77008, 77030), True, 'import pyinduct as pi\n'), ((77046, 77089), 'pyinduct.LagrangeSecondOrder.cure_interval', 'pi.LagrangeSecondOrder.cure_interval', (['nodes'], {}), '(nodes)\n', (77082, 77089), True, 'import pyinduct as pi\n'), ((77099, 77131), 'pyinduct.register_base', 'pi.register_base', (['"""base_1"""', 'base'], {}), "('base_1', base)\n", (77115, 77131), True, 'import pyinduct as pi\n'), ((77140, 77172), 'pyinduct.register_base', 'pi.register_base', (['"""base_2"""', 'base'], {}), "('base_2', base)\n", (77156, 77172), True, 'import pyinduct as pi\n'), ((77181, 77213), 'pyinduct.register_base', 'pi.register_base', (['"""base_3"""', 'base'], {}), "('base_3', base)\n", (77197, 77213), True, 'import pyinduct as pi\n'), ((77233, 77259), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""base_1"""'], {}), "('base_1')\n", (77249, 77259), True, 'import pyinduct as pi\n'), ((77281, 77306), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_1"""'], {}), "('base_1')\n", (77296, 77306), True, 'import pyinduct as pi\n'), ((77325, 77351), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""base_2"""'], {}), "('base_2')\n", (77341, 77351), True, 'import pyinduct as pi\n'), ((77373, 77398), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_2"""'], {}), "('base_2')\n", (77388, 77398), True, 'import pyinduct as pi\n'), ((77417, 77443), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""base_3"""'], {}), "('base_3')\n", (77433, 77443), True, 'import pyinduct as pi\n'), ((77465, 77490), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_3"""'], {}), "('base_3')\n", (77480, 77490), True, 'import pyinduct as pi\n'), ((78207, 78275), 'pyinduct.simulation.parse_weak_formulations', 'sim.parse_weak_formulations', (['[weak_form_1, weak_form_2, weak_form_3]'], {}), '([weak_form_1, weak_form_2, weak_form_3])\n', (78234, 78275), True, 'import pyinduct.simulation as sim\n'), ((78327, 78355), 'pyinduct.simulation.set_dominant_labels', 'sim.set_dominant_labels', (['ces'], {}), '(ces)\n', (78350, 78355), True, 'import pyinduct.simulation as sim\n'), ((79058, 79113), 'pyinduct.simulation.parse_weak_formulations', 'sim.parse_weak_formulations', (['[weak_form_1, weak_form_2]'], {}), '([weak_form_1, weak_form_2])\n', (79085, 79113), True, 'import pyinduct.simulation as sim\n'), ((80057, 80125), 'pyinduct.simulation.parse_weak_formulations', 'sim.parse_weak_formulations', (['[weak_form_1, weak_form_2, weak_form_3]'], {}), '([weak_form_1, weak_form_2, weak_form_3])\n', (80084, 80125), True, 'import pyinduct.simulation as sim\n'), ((80708, 80763), 'pyinduct.simulation.parse_weak_formulations', 'sim.parse_weak_formulations', (['[weak_form_1, weak_form_2]'], {}), '([weak_form_1, weak_form_2])\n', (80735, 80763), True, 'import pyinduct.simulation as sim\n'), ((81343, 81398), 'pyinduct.simulation.parse_weak_formulations', 'sim.parse_weak_formulations', (['[weak_form_1, weak_form_2]'], {}), '([weak_form_1, weak_form_2])\n', (81370, 81398), True, 'import pyinduct.simulation as sim\n'), ((81500, 81528), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""base_1"""'], {}), "('base_1')\n", (81518, 81528), True, 'import pyinduct as pi\n'), ((81537, 81565), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""base_2"""'], {}), "('base_2')\n", (81555, 81565), True, 'import pyinduct as pi\n'), ((81574, 81602), 'pyinduct.deregister_base', 'pi.deregister_base', (['"""base_3"""'], {}), "('base_3')\n", (81592, 81602), True, 'import pyinduct as pi\n'), ((81853, 81882), 'pyinduct.simulation.SimulationInputVector', 'sim.SimulationInputVector', (['[]'], {}), '([])\n', (81878, 81882), True, 'import pyinduct.simulation as sim\n'), ((81986, 82027), 'pyinduct.simulation.SimulationInputVector', 'sim.SimulationInputVector', (['self.inputs[1]'], {}), '(self.inputs[1])\n', (82011, 82027), True, 'import pyinduct.simulation as sim\n'), ((82146, 82184), 'pyinduct.simulation.SimulationInputVector', 'sim.SimulationInputVector', (['self.inputs'], {}), '(self.inputs)\n', (82171, 82184), True, 'import pyinduct.simulation as sim\n'), ((82306, 82348), 'pyinduct.simulation.SimulationInputVector', 'sim.SimulationInputVector', (['self.inputs[:2]'], {}), '(self.inputs[:2])\n', (82331, 82348), True, 'import pyinduct.simulation as sim\n'), ((82664, 82702), 'pyinduct.simulation.SimulationInputVector', 'sim.SimulationInputVector', (['self.inputs'], {}), '(self.inputs)\n', (82689, 82702), True, 'import pyinduct.simulation as sim\n'), ((82933, 82962), 'pyinduct.simulation.SimulationInputVector', 'sim.SimulationInputVector', (['[]'], {}), '([])\n', (82958, 82962), True, 'import pyinduct.simulation as sim\n'), ((83321, 83350), 'pyinduct.simulation.SimulationInputVector', 'sim.SimulationInputVector', (['[]'], {}), '([])\n', (83346, 83350), True, 'import pyinduct.simulation as sim\n'), ((83453, 83491), 'pyinduct.simulation.SimulationInputVector', 'sim.SimulationInputVector', (['self.inputs'], {}), '(self.inputs)\n', (83478, 83491), True, 'import pyinduct.simulation as sim\n'), ((1090, 1112), 'numpy.ones', 'np.ones', (['(der_order + 1)'], {}), '(der_order + 1)\n', (1097, 1112), True, 'import numpy as np\n'), ((2925, 2950), 'pyinduct.Domain', 'pi.Domain', (['(0, 1)'], {'num': '(10)'}), '((0, 1), num=10)\n', (2934, 2950), True, 'import pyinduct as pi\n'), ((3644, 3675), 'numpy.array_equal', 'np.array_equal', (['ed', 'ed_explicit'], {}), '(ed, ed_explicit)\n', (3658, 3675), True, 'import numpy as np\n'), ((4881, 4927), 'numpy.array_equal', 'np.array_equal', (["self.cf.matrices['E'][0][1]", 'a'], {}), "(self.cf.matrices['E'][0][1], a)\n", (4895, 4927), True, 'import numpy as np\n'), ((5020, 5070), 'numpy.array_equal', 'np.array_equal', (["self.cf.matrices['E'][0][1]", '(6 * a)'], {}), "(self.cf.matrices['E'][0][1], 6 * a)\n", (5034, 5070), True, 'import numpy as np\n'), ((5329, 5375), 'numpy.array_equal', 'np.array_equal', (["self.cf.matrices['E'][2][1]", 'b'], {}), "(self.cf.matrices['E'][2][1], b)\n", (5343, 5375), True, 'import numpy as np\n'), ((5638, 5678), 'numpy.array_equal', 'np.array_equal', (["self.cf.matrices['f']", 'f'], {}), "(self.cf.matrices['f'], f)\n", (5652, 5678), True, 'import numpy as np\n'), ((5848, 5892), 'numpy.array_equal', 'np.array_equal', (["self.cf.matrices['f']", '(2 * f)'], {}), "(self.cf.matrices['f'], 2 * f)\n", (5862, 5892), True, 'import numpy as np\n'), ((6074, 6120), 'numpy.array_equal', 'np.array_equal', (["self.cf.matrices['G'][0][1]", 'c'], {}), "(self.cf.matrices['G'][0][1], c)\n", (6088, 6120), True, 'import numpy as np\n'), ((6761, 6807), 'numpy.array_equal', 'np.array_equal', (["self.cf.matrices['G'][1][1]", 'c'], {}), "(self.cf.matrices['G'][1][1], c)\n", (6775, 6807), True, 'import numpy as np\n'), ((8193, 8215), 'pyinduct.Function', 'pi.Function', (['heavyside'], {}), '(heavyside)\n', (8204, 8215), True, 'import pyinduct as pi\n'), ((8500, 8531), 'pyinduct.ComposedFunctionVector', 'pi.ComposedFunctionVector', (['f', 's'], {}), '(f, s)\n', (8525, 8531), True, 'import pyinduct as pi\n'), ((10656, 10699), 'pyinduct.Product', 'pi.Product', (['self.test_funcs_at1', 'self.input'], {}), '(self.test_funcs_at1, self.input)\n', (10666, 10699), True, 'import pyinduct as pi\n'), ((10802, 10845), 'pyinduct.Product', 'pi.Product', (['self.input', 'self.test_funcs_at1'], {}), '(self.input, self.test_funcs_at1)\n', (10812, 10845), True, 'import pyinduct as pi\n'), ((10999, 11045), 'pyinduct.Product', 'pi.Product', (['self.test_funcs_dz_at1', 'self.input'], {}), '(self.test_funcs_dz_at1, self.input)\n', (11009, 11045), True, 'import pyinduct as pi\n'), ((11143, 11182), 'pyinduct.Product', 'pi.Product', (['self.test_funcs', 'self.input'], {}), '(self.test_funcs, self.input)\n', (11153, 11182), True, 'import pyinduct as pi\n'), ((11347, 11386), 'pyinduct.Product', 'pi.Product', (['self.input', 'self.test_funcs'], {}), '(self.input, self.test_funcs)\n', (11357, 11386), True, 'import pyinduct as pi\n'), ((12141, 12183), 'pyinduct.Product', 'pi.Product', (['self.test_funcs', 'self.input_dt'], {}), '(self.test_funcs, self.input_dt)\n', (12151, 12183), True, 'import pyinduct as pi\n'), ((12366, 12415), 'pyinduct.Product', 'pi.Product', (['self.test_funcs_at0', 'self.vec_input_1'], {}), '(self.test_funcs_at0, self.vec_input_1)\n', (12376, 12415), True, 'import pyinduct as pi\n'), ((12481, 12530), 'pyinduct.Product', 'pi.Product', (['self.test_funcs_at1', 'self.vec_input_2'], {}), '(self.test_funcs_at1, self.vec_input_2)\n', (12491, 12530), True, 'import pyinduct as pi\n'), ((12600, 12652), 'pyinduct.Product', 'pi.Product', (['self.test_funcs_at0', 'self.vec_input_dt_1'], {}), '(self.test_funcs_at0, self.vec_input_dt_1)\n', (12610, 12652), True, 'import pyinduct as pi\n'), ((12721, 12773), 'pyinduct.Product', 'pi.Product', (['self.test_funcs_at1', 'self.vec_input_dt_2'], {}), '(self.test_funcs_at1, self.vec_input_dt_2)\n', (12731, 12773), True, 'import pyinduct as pi\n'), ((12916, 12960), 'pyinduct.Product', 'pi.Product', (['self.test_funcs', 'self.test_funcs'], {}), '(self.test_funcs, self.test_funcs)\n', (12926, 12960), True, 'import pyinduct as pi\n'), ((13212, 13266), 'pyinduct.Product', 'pi.Product', (['self.comp_test_funcs', 'self.comp_test_funcs'], {}), '(self.comp_test_funcs, self.comp_test_funcs)\n', (13222, 13266), True, 'import pyinduct as pi\n'), ((14250, 14294), 'pyinduct.Product', 'pi.Product', (['self.field_var_at1', 'self.scalars'], {}), '(self.field_var_at1, self.scalars)\n', (14260, 14294), True, 'import pyinduct as pi\n'), ((14339, 14379), 'pyinduct.Product', 'pi.Product', (['self.field_var', 'self.scalars'], {}), '(self.field_var, self.scalars)\n', (14349, 14379), True, 'import pyinduct as pi\n'), ((14530, 14573), 'pyinduct.Product', 'pi.Product', (['self.field_var', 'self.test_funcs'], {}), '(self.field_var, self.test_funcs)\n', (14540, 14573), True, 'import pyinduct as pi\n'), ((14734, 14777), 'pyinduct.Product', 'pi.Product', (['self.test_funcs', 'self.field_var'], {}), '(self.test_funcs, self.field_var)\n', (14744, 14777), True, 'import pyinduct as pi\n'), ((14963, 15010), 'pyinduct.Product', 'pi.Product', (['self.field_var_at1', 'self.test_funcs'], {}), '(self.field_var_at1, self.test_funcs)\n', (14973, 15010), True, 'import pyinduct as pi\n'), ((15082, 15129), 'pyinduct.Product', 'pi.Product', (['self.field_var', 'self.test_funcs_at1'], {}), '(self.field_var, self.test_funcs_at1)\n', (15092, 15129), True, 'import pyinduct as pi\n'), ((15204, 15255), 'pyinduct.Product', 'pi.Product', (['self.field_var_at1', 'self.test_funcs_at1'], {}), '(self.field_var_at1, self.test_funcs_at1)\n', (15214, 15255), True, 'import pyinduct as pi\n'), ((15318, 15365), 'pyinduct.Product', 'pi.Product', (['self.field_var_ddt', 'self.test_funcs'], {}), '(self.field_var_ddt, self.test_funcs)\n', (15328, 15365), True, 'import pyinduct as pi\n'), ((15442, 15497), 'pyinduct.Product', 'pi.Product', (['self.field_var_ddt_at0', 'self.test_funcs_at0'], {}), '(self.field_var_ddt_at0, self.test_funcs_at0)\n', (15452, 15497), True, 'import pyinduct as pi\n'), ((15567, 15621), 'pyinduct.Product', 'pi.Product', (['self.field_var_at1', 'self.test_funcs_dz_at1'], {}), '(self.field_var_at1, self.test_funcs_dz_at1)\n', (15577, 15621), True, 'import pyinduct as pi\n'), ((15664, 15711), 'pyinduct.Product', 'pi.Product', (['self.field_var_ddt', 'self.test_funcs'], {}), '(self.field_var_ddt, self.test_funcs)\n', (15674, 15711), True, 'import pyinduct as pi\n'), ((15859, 15908), 'pyinduct.Product', 'pi.Product', (['self.field_var_dz', 'self.test_funcs_dz'], {}), '(self.field_var_dz, self.test_funcs_dz)\n', (15869, 15908), True, 'import pyinduct as pi\n'), ((16067, 16113), 'pyinduct.Product', 'pi.Product', (['self.field_var_dz', 'self.test_funcs'], {}), '(self.field_var_dz, self.test_funcs)\n', (16077, 16113), True, 'import pyinduct as pi\n'), ((16535, 16579), 'pyinduct.Product', 'pi.Product', (['self.scalar_func', 'self.field_var'], {}), '(self.scalar_func, self.field_var)\n', (16545, 16579), True, 'import pyinduct as pi\n'), ((16766, 16810), 'pyinduct.Product', 'pi.Product', (['self.field_var', 'self.scalar_func'], {}), '(self.field_var, self.scalar_func)\n', (16776, 16810), True, 'import pyinduct as pi\n'), ((17175, 17208), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['G'][0][1]"], {}), "(terms['G'][0][1])\n", (17190, 17208), True, 'import numpy as np\n'), ((17318, 17344), 'numpy.array', 'np.array', (['[[0], [-2], [2]]'], {}), '([[0], [-2], [2]])\n', (17326, 17344), True, 'import numpy as np\n'), ((17527, 17560), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['G'][0][1]"], {}), "(terms['G'][0][1])\n", (17542, 17560), True, 'import numpy as np\n'), ((17670, 17703), 'numpy.array', 'np.array', (['[[0.25], [0.5], [0.25]]'], {}), '([[0.25], [0.5], [0.25]])\n', (17678, 17703), True, 'import numpy as np\n'), ((17891, 17924), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['G'][0][1]"], {}), "(terms['G'][0][1])\n", (17906, 17924), True, 'import numpy as np\n'), ((18034, 18067), 'numpy.array', 'np.array', (['[[0.25], [0.5], [0.25]]'], {}), '([[0.25], [0.5], [0.25]])\n', (18042, 18067), True, 'import numpy as np\n'), ((18254, 18287), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['G'][0][1]"], {}), "(terms['G'][0][1])\n", (18269, 18287), True, 'import numpy as np\n'), ((18397, 18430), 'numpy.array', 'np.array', (['[[0.0], [0.25], [0.25]]'], {}), '([[0.0], [0.25], [0.25]])\n', (18405, 18430), True, 'import numpy as np\n'), ((18631, 18664), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['G'][0][1]"], {}), "(terms['G'][0][1])\n", (18646, 18664), True, 'import numpy as np\n'), ((18777, 18808), 'numpy.array', 'np.array', (['[[0.0], [0.0], [0.0]]'], {}), '([[0.0], [0.0], [0.0]])\n', (18785, 18808), True, 'import numpy as np\n'), ((19010, 19043), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['G'][0][1]"], {}), "(terms['G'][0][1])\n", (19025, 19043), True, 'import numpy as np\n'), ((19156, 19189), 'numpy.array', 'np.array', (['[[0.0], [0.25], [0.25]]'], {}), '([[0.0], [0.25], [0.25]])\n', (19164, 19189), True, 'import numpy as np\n'), ((19448, 19481), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['G'][0][1]"], {}), "(terms['G'][0][1])\n", (19463, 19481), True, 'import numpy as np\n'), ((19591, 19625), 'numpy.array', 'np.array', (['[[1, 0], [0, 0], [0, 1]]'], {}), '([[1, 0], [0, 0], [0, 1]])\n', (19599, 19625), True, 'import numpy as np\n'), ((19969, 20002), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['G'][1][1]"], {}), "(terms['G'][1][1])\n", (19984, 20002), True, 'import numpy as np\n'), ((20112, 20145), 'numpy.array', 'np.array', (['[[0.25], [0.5], [0.25]]'], {}), '([[0.25], [0.5], [0.25]])\n', (20120, 20145), True, 'import numpy as np\n'), ((20429, 20462), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['G'][1][1]"], {}), "(terms['G'][1][1])\n", (20444, 20462), True, 'import numpy as np\n'), ((20572, 20606), 'numpy.array', 'np.array', (['[[1, 0], [0, 0], [0, 1]]'], {}), '([[1, 0], [0, 0], [0, 1]])\n', (20580, 20606), True, 'import numpy as np\n'), ((20935, 20962), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['f']"], {}), "(terms['f'])\n", (20950, 20962), True, 'import numpy as np\n'), ((21066, 21091), 'numpy.array', 'np.array', (['[[0], [0], [1]]'], {}), '([[0], [0], [1]])\n', (21074, 21091), True, 'import numpy as np\n'), ((21276, 21303), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['f']"], {}), "(terms['f'])\n", (21291, 21303), True, 'import numpy as np\n'), ((21407, 21444), 'numpy.array', 'np.array', (['[[1 / 6], [1 / 3], [1 / 6]]'], {}), '([[1 / 6], [1 / 3], [1 / 6]])\n', (21415, 21444), True, 'import numpy as np\n'), ((22844, 22877), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (22859, 22877), True, 'import numpy as np\n'), ((22987, 23008), 'numpy.array', 'np.array', (['[[0, 0, 1]]'], {}), '([[0, 0, 1]])\n', (22995, 23008), True, 'import numpy as np\n'), ((23219, 23252), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][2][1]"], {}), "(terms['E'][2][1])\n", (23234, 23252), True, 'import numpy as np\n'), ((23362, 23383), 'numpy.array', 'np.array', (['[[0, 0, 1]]'], {}), '([[0, 0, 1]])\n', (23370, 23383), True, 'import numpy as np\n'), ((23593, 23626), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (23608, 23626), True, 'import numpy as np\n'), ((23736, 23758), 'numpy.array', 'np.array', (['[[0, -2, 2]]'], {}), '([[0, -2, 2]])\n', (23744, 23758), True, 'import numpy as np\n'), ((23960, 23993), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (23975, 23993), True, 'import numpy as np\n'), ((24103, 24132), 'numpy.array', 'np.array', (['[[0.25, 0.5, 0.25]]'], {}), '([[0.25, 0.5, 0.25]])\n', (24111, 24132), True, 'import numpy as np\n'), ((24336, 24369), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (24351, 24369), True, 'import numpy as np\n'), ((24479, 24506), 'numpy.array', 'np.array', (['[[0.25, 0.25, 0]]'], {}), '([[0.25, 0.25, 0]])\n', (24487, 24506), True, 'import numpy as np\n'), ((24709, 24742), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (24724, 24742), True, 'import numpy as np\n'), ((24852, 24874), 'numpy.array', 'np.array', (['[[-1, 0, 1]]'], {}), '([[-1, 0, 1]])\n', (24860, 24874), True, 'import numpy as np\n'), ((25080, 25113), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][2][1]"], {}), "(terms['E'][2][1])\n", (25095, 25113), True, 'import numpy as np\n'), ((25223, 25252), 'numpy.array', 'np.array', (['[[0.25, 0.5, 0.25]]'], {}), '([[0.25, 0.5, 0.25]])\n', (25231, 25252), True, 'import numpy as np\n'), ((26552, 26585), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (26567, 26585), True, 'import numpy as np\n'), ((26695, 26738), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 1], [0, 0, 2]]'], {}), '([[0, 0, 0], [0, 0, 1], [0, 0, 2]])\n', (26703, 26738), True, 'import numpy as np\n'), ((27052, 27085), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (27067, 27085), True, 'import numpy as np\n'), ((27195, 27250), 'numpy.array', 'np.array', (['[[0, 0, 0], [0.25, 0.5, 0.25], [0.5, 1, 0.5]]'], {}), '([[0, 0, 0], [0.25, 0.5, 0.25], [0.5, 1, 0.5]])\n', (27203, 27250), True, 'import numpy as np\n'), ((27561, 27594), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (27576, 27594), True, 'import numpy as np\n'), ((27704, 27779), 'numpy.array', 'np.array', (['[[1 / 6, 1 / 12, 0], [1 / 12, 1 / 3, 1 / 12], [0, 1 / 12, 1 / 6]]'], {}), '([[1 / 6, 1 / 12, 0], [1 / 12, 1 / 3, 1 / 12], [0, 1 / 12, 1 / 6]])\n', (27712, 27779), True, 'import numpy as np\n'), ((28102, 28135), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (28117, 28135), True, 'import numpy as np\n'), ((28245, 28320), 'numpy.array', 'np.array', (['[[1 / 6, 1 / 12, 0], [1 / 12, 1 / 3, 1 / 12], [0, 1 / 12, 1 / 6]]'], {}), '([[1 / 6, 1 / 12, 0], [1 / 12, 1 / 3, 1 / 12], [0, 1 / 12, 1 / 6]])\n', (28253, 28320), True, 'import numpy as np\n'), ((28639, 28672), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (28654, 28672), True, 'import numpy as np\n'), ((28782, 28833), 'numpy.array', 'np.array', (['[[0, 0, 0.25], [0, 0, 0.5], [0, 0, 0.25]]'], {}), '([[0, 0, 0.25], [0, 0, 0.5], [0, 0, 0.25]])\n', (28790, 28833), True, 'import numpy as np\n'), ((29151, 29184), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (29166, 29184), True, 'import numpy as np\n'), ((29294, 29345), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0.25, 0.5, 0.25]]'], {}), '([[0, 0, 0], [0, 0, 0], [0.25, 0.5, 0.25]])\n', (29302, 29345), True, 'import numpy as np\n'), ((29668, 29701), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (29683, 29701), True, 'import numpy as np\n'), ((29811, 29854), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 1]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 1]])\n', (29819, 29854), True, 'import numpy as np\n'), ((30201, 30234), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][2][1]"], {}), "(terms['E'][2][1])\n", (30216, 30234), True, 'import numpy as np\n'), ((30344, 30419), 'numpy.array', 'np.array', (['[[1 / 6, 1 / 12, 0], [1 / 12, 1 / 3, 1 / 12], [0, 1 / 12, 1 / 6]]'], {}), '([[1 / 6, 1 / 12, 0], [1 / 12, 1 / 3, 1 / 12], [0, 1 / 12, 1 / 6]])\n', (30352, 30419), True, 'import numpy as np\n'), ((30746, 30779), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][2][1]"], {}), "(terms['E'][2][1])\n", (30761, 30779), True, 'import numpy as np\n'), ((30889, 30932), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[1, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (30897, 30932), True, 'import numpy as np\n'), ((31243, 31276), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (31258, 31276), True, 'import numpy as np\n'), ((31386, 31433), 'numpy.array', 'np.array', (['[[2, -2, 0], [-2, 4, -2], [0, -2, 2]]'], {}), '([[2, -2, 0], [-2, 4, -2], [0, -2, 2]])\n', (31394, 31433), True, 'import numpy as np\n'), ((31755, 31788), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (31770, 31788), True, 'import numpy as np\n'), ((31898, 31956), 'numpy.array', 'np.array', (['[[-0.5, 0.5, 0], [-0.5, 0, 0.5], [0, -0.5, 0.5]]'], {}), '([[-0.5, 0.5, 0], [-0.5, 0, 0.5], [0, -0.5, 0.5]])\n', (31906, 31956), True, 'import numpy as np\n'), ((32277, 32310), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (32292, 32310), True, 'import numpy as np\n'), ((32420, 32464), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, -2], [0, 0, 2]]'], {}), '([[0, 0, 0], [0, 0, -2], [0, 0, 2]])\n', (32428, 32464), True, 'import numpy as np\n'), ((32828, 32861), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (32843, 32861), True, 'import numpy as np\n'), ((33150, 33183), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (33165, 33183), True, 'import numpy as np\n'), ((33512, 33545), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (33527, 33545), True, 'import numpy as np\n'), ((33838, 33871), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['E'][0][1]"], {}), "(terms['E'][0][1])\n", (33853, 33871), True, 'import numpy as np\n'), ((34127, 34160), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['G'][0][1]"], {}), "(terms['G'][0][1])\n", (34142, 34160), True, 'import numpy as np\n'), ((34270, 34295), 'numpy.array', 'np.array', (['[[0], [0], [1]]'], {}), '([[0], [0], [1]])\n', (34278, 34295), True, 'import numpy as np\n'), ((34486, 34519), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['G'][0][1]"], {}), "(terms['G'][0][1])\n", (34501, 34519), True, 'import numpy as np\n'), ((34629, 34654), 'numpy.array', 'np.array', (['[[0], [0], [1]]'], {}), '([[0], [0], [1]])\n', (34637, 34654), True, 'import numpy as np\n'), ((34789, 34866), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['[self.alternating_weights_term, self.field_int]'], {'name': '""""""'}), "([self.alternating_weights_term, self.field_int], name='')\n", (34808, 34866), True, 'import pyinduct.simulation as sim\n'), ((36354, 36369), 'pyinduct.Base', 'pi.Base', (['funcs1'], {}), '(funcs1)\n', (36361, 36369), True, 'import pyinduct as pi\n'), ((36427, 36442), 'pyinduct.Base', 'pi.Base', (['funcs1'], {}), '(funcs1)\n', (36434, 36442), True, 'import pyinduct as pi\n'), ((36783, 36811), 'numpy.diag', 'np.diag', (['[1, 0, 1, 0, 0]', '(-1)'], {}), '([1, 0, 1, 0, 0], -1)\n', (36790, 36811), True, 'import numpy as np\n'), ((36814, 36840), 'numpy.diag', 'np.diag', (['([0] * 4 + [1] * 2)'], {}), '([0] * 4 + [1] * 2)\n', (36821, 36840), True, 'import numpy as np\n'), ((38582, 38612), 'pyinduct.Product', 'pi.Product', (['field_var_ddt', 'psi'], {}), '(field_var_ddt, psi)\n', (38592, 38612), True, 'import pyinduct as pi\n'), ((38761, 38793), 'pyinduct.Product', 'pi.Product', (['field_var_dz', 'psi_dz'], {}), '(field_var_dz, psi_dz)\n', (38771, 38793), True, 'import pyinduct as pi\n'), ((39296, 39460), 'numpy.array', 'np.array', (['[[0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1], [-2.25, 3, -\n 0.75, 0, 0, 0], [7.5, -18, 10.5, 0, 0, 0], [-3.75, 21, -17.25, 0, 0, 0]]'], {}), '([[0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1], [-\n 2.25, 3, -0.75, 0, 0, 0], [7.5, -18, 10.5, 0, 0, 0], [-3.75, 21, -17.25,\n 0, 0, 0]])\n', (39304, 39460), True, 'import numpy as np\n'), ((39695, 39747), 'numpy.array', 'np.array', (['[[0], [0], [0], [0.125], [-1.75], [6.875]]'], {}), '([[0], [0], [0], [0.125], [-1.75], [6.875]])\n', (39703, 39747), True, 'import numpy as np\n'), ((42427, 42457), 'pyinduct.Product', 'pi.Product', (['field_var_ddt', 'psi'], {}), '(field_var_ddt, psi)\n', (42437, 42457), True, 'import pyinduct as pi\n'), ((42708, 42740), 'pyinduct.Product', 'pi.Product', (['field_var_dz', 'psi_dz'], {}), '(field_var_dz, psi_dz)\n', (42718, 42740), True, 'import pyinduct as pi\n'), ((43999, 44066), 'pyinduct.PgAnimatedPlot', 'pi.PgAnimatedPlot', (['eval_data[:2]'], {'title': '"""fem approx and derivative"""'}), "(eval_data[:2], title='fem approx and derivative')\n", (44016, 44066), True, 'import pyinduct as pi\n'), ((44122, 44152), 'pyinduct.PgSurfacePlot', 'pi.PgSurfacePlot', (['eval_data[0]'], {}), '(eval_data[0])\n', (44138, 44152), True, 'import pyinduct as pi\n'), ((44165, 44188), 'pyinduct.show', 'pi.show', ([], {'show_mpl': '(False)'}), '(show_mpl=False)\n', (44172, 44188), True, 'import pyinduct as pi\n'), ((46850, 46910), 'numpy.array', 'np.array', (['[vec.func for vec in norm_comp_mod_base.fractions]'], {}), '([vec.func for vec in norm_comp_mod_base.fractions])\n', (46858, 46910), True, 'import numpy as np\n'), ((47788, 47811), 'pyinduct.show', 'pi.show', ([], {'show_mpl': '(False)'}), '(show_mpl=False)\n', (47795, 47811), True, 'import pyinduct as pi\n'), ((49440, 49510), 'pyinduct.PgAnimatedPlot', 'pi.PgAnimatedPlot', (['eval_data[0:2]'], {'title': '"""modal approx and derivative"""'}), "(eval_data[0:2], title='modal approx and derivative')\n", (49457, 49510), True, 'import pyinduct as pi\n'), ((49530, 49560), 'pyinduct.PgSurfacePlot', 'pi.PgSurfacePlot', (['eval_data[0]'], {}), '(eval_data[0])\n', (49546, 49560), True, 'import pyinduct as pi\n'), ((49573, 49596), 'pyinduct.show', 'pi.show', ([], {'show_mpl': '(False)'}), '(show_mpl=False)\n', (49580, 49596), True, 'import pyinduct as pi\n'), ((49707, 49774), 'numpy.isclose', 'np.isclose', (['eval_data[0].output_data[-1, 0]', 'self.y_end'], {'atol': '(0.001)'}), '(eval_data[0].output_data[-1, 0], self.y_end, atol=0.001)\n', (49717, 49774), True, 'import numpy as np\n'), ((50888, 50914), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""base_1"""'], {}), "('base_1')\n", (50904, 50914), True, 'import pyinduct as pi\n'), ((50941, 50967), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""base_2"""'], {}), "('base_2')\n", (50957, 50967), True, 'import pyinduct as pi\n'), ((51002, 51026), 'pyinduct.ConstantTrajectory', 'pi.ConstantTrajectory', (['(0)'], {}), '(0)\n', (51023, 51026), True, 'import pyinduct as pi\n'), ((51534, 51565), 'pyinduct.parse_weak_formulation', 'pi.parse_weak_formulation', (['form'], {}), '(form)\n', (51559, 51565), True, 'import pyinduct as pi\n'), ((56072, 56098), 'pyinduct.PgAnimatedPlot', 'pi.PgAnimatedPlot', (['results'], {}), '(results)\n', (56089, 56098), True, 'import pyinduct as pi\n'), ((56111, 56134), 'pyinduct.show', 'pi.show', ([], {'show_mpl': '(False)'}), '(show_mpl=False)\n', (56118, 56134), True, 'import pyinduct as pi\n'), ((56671, 56693), 'pyinduct.PgAnimatedPlot', 'pi.PgAnimatedPlot', (['res'], {}), '(res)\n', (56688, 56693), True, 'import pyinduct as pi\n'), ((56706, 56729), 'pyinduct.show', 'pi.show', ([], {'show_mpl': '(False)'}), '(show_mpl=False)\n', (56713, 56729), True, 'import pyinduct as pi\n'), ((57502, 57524), 'pyinduct.PgAnimatedPlot', 'pi.PgAnimatedPlot', (['res'], {}), '(res)\n', (57519, 57524), True, 'import pyinduct as pi\n'), ((57537, 57560), 'pyinduct.show', 'pi.show', ([], {'show_mpl': '(False)'}), '(show_mpl=False)\n', (57544, 57560), True, 'import pyinduct as pi\n'), ((58608, 58630), 'pyinduct.PgAnimatedPlot', 'pi.PgAnimatedPlot', (['res'], {}), '(res)\n', (58625, 58630), True, 'import pyinduct as pi\n'), ((58643, 58666), 'pyinduct.show', 'pi.show', ([], {'show_mpl': '(False)'}), '(show_mpl=False)\n', (58650, 58666), True, 'import pyinduct as pi\n'), ((60894, 60921), 'numpy.zeros', 'np.zeros', (['self.base_2.shape'], {}), '(self.base_2.shape)\n', (60902, 60921), True, 'import numpy as np\n'), ((61300, 61341), 'pyinduct.PgAnimatedPlot', 'pi.PgAnimatedPlot', (['[eval_d]'], {'title': '"""Test"""'}), "([eval_d], title='Test')\n", (61317, 61341), True, 'import pyinduct as pi\n'), ((61361, 61385), 'pyinduct.PgSurfacePlot', 'pi.PgSurfacePlot', (['eval_d'], {}), '(eval_d)\n', (61377, 61385), True, 'import pyinduct as pi\n'), ((61398, 61421), 'pyinduct.show', 'pi.show', ([], {'show_mpl': '(False)'}), '(show_mpl=False)\n', (61405, 61421), True, 'import pyinduct as pi\n'), ((63684, 63711), 'numpy.zeros', 'np.zeros', (['self.base_2.shape'], {}), '(self.base_2.shape)\n', (63692, 63711), True, 'import numpy as np\n'), ((66146, 66183), 'numpy.zeros', 'np.zeros', (['self.base_1.fractions.shape'], {}), '(self.base_1.fractions.shape)\n', (66154, 66183), True, 'import numpy as np\n'), ((66890, 66927), 'numpy.zeros', 'np.zeros', (['self.base_1.fractions.shape'], {}), '(self.base_1.fractions.shape)\n', (66898, 66927), True, 'import numpy as np\n'), ((66984, 67007), 'pyinduct.deregister_base', 'pi.deregister_base', (['lbl'], {}), '(lbl)\n', (67002, 67007), True, 'import pyinduct as pi\n'), ((67698, 67735), 'numpy.zeros', 'np.zeros', (['self.base_1.fractions.shape'], {}), '(self.base_1.fractions.shape)\n', (67706, 67735), True, 'import numpy as np\n'), ((67826, 67849), 'pyinduct.deregister_base', 'pi.deregister_base', (['lbl'], {}), '(lbl)\n', (67844, 67849), True, 'import pyinduct as pi\n'), ((68737, 68757), 'numpy.ones', 'np.ones', (['omega.shape'], {}), '(omega.shape)\n', (68744, 68757), True, 'import numpy as np\n'), ((68760, 68770), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (68767, 68770), True, 'import numpy as np\n'), ((71177, 71232), 'pyinduct.simulation.simulate_state_space', 'sim.simulate_state_space', (['ss_modal', 'initial_weights', 'dt'], {}), '(ss_modal, initial_weights, dt)\n', (71201, 71232), True, 'import pyinduct.simulation as sim\n'), ((71254, 71316), 'pyinduct.simulation.evaluate_approximation', 'sim.evaluate_approximation', (['"""eig_base"""', 'q', 't', 'dz'], {'spat_order': '(0)'}), "('eig_base', q, t, dz, spat_order=0)\n", (71280, 71316), True, 'import pyinduct.simulation as sim\n'), ((71528, 71552), 'pyinduct.PgSurfacePlot', 'pi.PgSurfacePlot', (['eval_d'], {}), '(eval_d)\n', (71544, 71552), True, 'import pyinduct as pi\n'), ((71565, 71588), 'pyinduct.show', 'pi.show', ([], {'show_mpl': '(False)'}), '(show_mpl=False)\n', (71572, 71588), True, 'import pyinduct as pi\n'), ((73787, 73812), 'numpy.real_if_close', 'np.real_if_close', (['eig_val'], {}), '(eig_val)\n', (73803, 73812), True, 'import numpy as np\n'), ((74572, 74613), 'pyinduct.PgAnimatedPlot', 'pi.PgAnimatedPlot', (['[eval_d]'], {'title': '"""Test"""'}), "([eval_d], title='Test')\n", (74589, 74613), True, 'import pyinduct as pi\n'), ((74633, 74657), 'pyinduct.PgSurfacePlot', 'pi.PgSurfacePlot', (['eval_d'], {}), '(eval_d)\n', (74649, 74657), True, 'import pyinduct as pi\n'), ((74670, 74693), 'pyinduct.show', 'pi.show', ([], {'show_mpl': '(False)'}), '(show_mpl=False)\n', (74677, 74693), True, 'import pyinduct as pi\n'), ((75985, 76013), 'pyinduct.PgAnimatedPlot', 'pi.PgAnimatedPlot', (['eval_data'], {}), '(eval_data)\n', (76002, 76013), True, 'import pyinduct as pi\n'), ((76026, 76049), 'pyinduct.show', 'pi.show', ([], {'show_mpl': '(False)'}), '(show_mpl=False)\n', (76033, 76049), True, 'import pyinduct as pi\n'), ((76620, 76675), 'pyinduct.EvalData', 'pi.EvalData', (['ev.input_data[:-1]', 'ev.output_data[..., i]'], {}), '(ev.input_data[:-1], ev.output_data[..., i])\n', (76631, 76675), True, 'import pyinduct as pi\n'), ((76772, 76794), 'pyinduct.PgAnimatedPlot', 'pi.PgAnimatedPlot', (['evs'], {}), '(evs)\n', (76789, 76794), True, 'import pyinduct as pi\n'), ((76807, 76830), 'pyinduct.show', 'pi.show', ([], {'show_mpl': '(False)'}), '(show_mpl=False)\n', (76814, 76830), True, 'import pyinduct as pi\n'), ((6340, 6357), 'numpy.hstack', 'np.hstack', (['(c, c)'], {}), '((c, c))\n', (6349, 6357), True, 'import numpy as np\n'), ((7028, 7045), 'numpy.hstack', 'np.hstack', (['(c, c)'], {}), '((c, c))\n', (7037, 7045), True, 'import numpy as np\n'), ((8769, 8791), 'pyinduct.ConstantFunction', 'pi.ConstantFunction', (['(1)'], {}), '(1)\n', (8788, 8791), True, 'import pyinduct as pi\n'), ((11590, 11635), 'pyinduct.Product', 'pi.Product', (['self.scalar_func', 'self.test_funcs'], {}), '(self.scalar_func, self.test_funcs)\n', (11600, 11635), True, 'import pyinduct as pi\n'), ((11786, 11831), 'pyinduct.Product', 'pi.Product', (['self.scalar_func', 'self.test_funcs'], {}), '(self.scalar_func, self.test_funcs)\n', (11796, 11831), True, 'import pyinduct as pi\n'), ((11984, 12029), 'pyinduct.Product', 'pi.Product', (['self.scalar_func', 'self.test_funcs'], {}), '(self.scalar_func, self.test_funcs)\n', (11994, 12029), True, 'import pyinduct as pi\n'), ((21792, 21819), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['f']"], {}), "(terms['f'])\n", (21807, 21819), True, 'import numpy as np\n'), ((21931, 21967), 'numpy.array', 'np.array', (['[[0, 0], [0, 0.5], [1, 1]]'], {}), '([[0, 0], [0, 0.5], [1, 1]])\n', (21939, 21967), True, 'import numpy as np\n'), ((22290, 22317), 'numpy.iscomplexobj', 'np.iscomplexobj', (["terms['f']"], {}), "(terms['f'])\n", (22305, 22317), True, 'import numpy as np\n'), ((22429, 22481), 'numpy.array', 'np.array', (['[[1 / 6 + 0], [1 / 3 + 0.25], [1 / 6 + 1]]'], {}), '([[1 / 6 + 0], [1 / 3 + 0.25], [1 / 6 + 1]])\n', (22437, 22481), True, 'import numpy as np\n'), ((35044, 35081), 'pyinduct.ConstantFunction', 'pi.ConstantFunction', (['(0)'], {'domain': '(0, 1)'}), '(0, domain=(0, 1))\n', (35063, 35081), True, 'import pyinduct as pi\n'), ((35146, 35167), 'pyinduct.Domain', 'pi.Domain', (['(0, 1)', 'nf'], {}), '((0, 1), nf)\n', (35155, 35167), True, 'import pyinduct as pi\n'), ((35909, 35922), 'pyinduct.Base', 'pi.Base', (['base'], {}), '(base)\n', (35916, 35922), True, 'import pyinduct as pi\n'), ((38884, 38900), 'pyinduct.Input', 'pi.Input', (['self.u'], {}), '(self.u)\n', (38892, 38900), True, 'import pyinduct as pi\n'), ((42884, 42900), 'pyinduct.Input', 'pi.Input', (['self.u'], {}), '(self.u)\n', (42892, 42900), True, 'import pyinduct as pi\n'), ((43549, 43685), 'pyinduct.simulation.evaluate_approximation', 'sim.evaluate_approximation', (['"""fem_base"""', 'q[:, der_idx * fem_base.fractions.size:(der_idx + 1) * fem_base.fractions.size]', 't', 'self.dz'], {}), "('fem_base', q[:, der_idx * fem_base.fractions.\n size:(der_idx + 1) * fem_base.fractions.size], t, self.dz)\n", (43575, 43685), True, 'import pyinduct.simulation as sim\n'), ((45517, 45540), 'numpy.arange', 'np.arange', (['(0)', '(1000.0)', '(2)'], {}), '(0, 1000.0, 2)\n', (45526, 45540), True, 'import numpy as np\n'), ((50712, 50762), 'pyinduct.ConstantFunction', 'pi.ConstantFunction', (['(1)'], {'domain': 'dummy_domain.bounds'}), '(1, domain=dummy_domain.bounds)\n', (50731, 50762), True, 'import pyinduct as pi\n'), ((50821, 50871), 'pyinduct.ConstantFunction', 'pi.ConstantFunction', (['(1)'], {'domain': 'dummy_domain.bounds'}), '(1, domain=dummy_domain.bounds)\n', (50840, 50871), True, 'import pyinduct as pi\n'), ((51146, 51163), 'pyinduct.ScalarTerm', 'pi.ScalarTerm', (['x2'], {}), '(x2)\n', (51159, 51163), True, 'import pyinduct as pi\n'), ((51177, 51193), 'pyinduct.ScalarTerm', 'pi.ScalarTerm', (['u'], {}), '(u)\n', (51190, 51193), True, 'import pyinduct as pi\n'), ((51389, 51405), 'pyinduct.ScalarTerm', 'pi.ScalarTerm', (['u'], {}), '(u)\n', (51402, 51405), True, 'import pyinduct as pi\n'), ((61848, 61898), 'pyinduct.TemporalDerivedFieldVariable', 'pi.TemporalDerivedFieldVariable', (['"""base_2"""'], {'order': '(1)'}), "('base_2', order=1)\n", (61879, 61898), True, 'import pyinduct as pi\n'), ((61942, 61976), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_2"""'], {'order': '(0)'}), "('base_2', order=0)\n", (61957, 61976), True, 'import pyinduct as pi\n'), ((62037, 62086), 'pyinduct.SpatialDerivedFieldVariable', 'pi.SpatialDerivedFieldVariable', (['"""base_2"""'], {'order': '(0)'}), "('base_2', order=0)\n", (62067, 62086), True, 'import pyinduct as pi\n'), ((62130, 62164), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_2"""'], {'order': '(2)'}), "('base_2', order=2)\n", (62145, 62164), True, 'import pyinduct as pi\n'), ((62235, 62284), 'pyinduct.SpatialDerivedFieldVariable', 'pi.SpatialDerivedFieldVariable', (['"""base_2"""'], {'order': '(1)'}), "('base_2', order=1)\n", (62265, 62284), True, 'import pyinduct as pi\n'), ((62328, 62362), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_2"""'], {'order': '(0)'}), "('base_2', order=0)\n", (62343, 62362), True, 'import pyinduct as pi\n'), ((62433, 62482), 'pyinduct.SpatialDerivedFieldVariable', 'pi.SpatialDerivedFieldVariable', (['"""base_2"""'], {'order': '(0)'}), "('base_2', order=0)\n", (62463, 62482), True, 'import pyinduct as pi\n'), ((62526, 62560), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_2"""'], {'order': '(0)'}), "('base_2', order=0)\n", (62541, 62560), True, 'import pyinduct as pi\n'), ((62661, 62727), 'pyinduct.SpatialDerivedFieldVariable', 'pi.SpatialDerivedFieldVariable', (['"""base_2"""'], {'order': '(1)', 'location': 'self.l'}), "('base_2', order=1, location=self.l)\n", (62691, 62727), True, 'import pyinduct as pi\n'), ((62767, 62818), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_2"""'], {'order': '(0)', 'location': 'self.l'}), "('base_2', order=0, location=self.l)\n", (62782, 62818), True, 'import pyinduct as pi\n'), ((62869, 62930), 'pyinduct.SpatialDerivedFieldVariable', 'pi.SpatialDerivedFieldVariable', (['"""base_2"""'], {'order': '(0)', 'location': '(0)'}), "('base_2', order=0, location=0)\n", (62899, 62930), True, 'import pyinduct as pi\n'), ((62970, 63016), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_2"""'], {'order': '(0)', 'location': '(0)'}), "('base_2', order=0, location=0)\n", (62985, 63016), True, 'import pyinduct as pi\n'), ((63079, 63140), 'pyinduct.SpatialDerivedFieldVariable', 'pi.SpatialDerivedFieldVariable', (['"""base_2"""'], {'order': '(0)', 'location': '(0)'}), "('base_2', order=0, location=0)\n", (63109, 63140), True, 'import pyinduct as pi\n'), ((63180, 63226), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_2"""'], {'order': '(1)', 'location': '(0)'}), "('base_2', order=1, location=0)\n", (63195, 63226), True, 'import pyinduct as pi\n'), ((63277, 63288), 'pyinduct.Input', 'pi.Input', (['u'], {}), '(u)\n', (63285, 63288), True, 'import pyinduct as pi\n'), ((63328, 63379), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_2"""'], {'order': '(1)', 'location': 'self.l'}), "('base_2', order=1, location=self.l)\n", (63343, 63379), True, 'import pyinduct as pi\n'), ((64308, 64358), 'pyinduct.TemporalDerivedFieldVariable', 'pi.TemporalDerivedFieldVariable', (['"""base_1"""'], {'order': '(1)'}), "('base_1', order=1)\n", (64339, 64358), True, 'import pyinduct as pi\n'), ((64402, 64436), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_1"""'], {'order': '(0)'}), "('base_1', order=0)\n", (64417, 64436), True, 'import pyinduct as pi\n'), ((64497, 64546), 'pyinduct.SpatialDerivedFieldVariable', 'pi.SpatialDerivedFieldVariable', (['"""base_1"""'], {'order': '(1)'}), "('base_1', order=1)\n", (64527, 64546), True, 'import pyinduct as pi\n'), ((64590, 64624), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_1"""'], {'order': '(1)'}), "('base_1', order=1)\n", (64605, 64624), True, 'import pyinduct as pi\n'), ((64694, 64743), 'pyinduct.SpatialDerivedFieldVariable', 'pi.SpatialDerivedFieldVariable', (['"""base_1"""'], {'order': '(0)'}), "('base_1', order=0)\n", (64724, 64743), True, 'import pyinduct as pi\n'), ((64787, 64821), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_1"""'], {'order': '(1)'}), "('base_1', order=1)\n", (64802, 64821), True, 'import pyinduct as pi\n'), ((64891, 64940), 'pyinduct.SpatialDerivedFieldVariable', 'pi.SpatialDerivedFieldVariable', (['"""base_1"""'], {'order': '(0)'}), "('base_1', order=0)\n", (64921, 64940), True, 'import pyinduct as pi\n'), ((64984, 65018), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_1"""'], {'order': '(0)'}), "('base_1', order=0)\n", (64999, 65018), True, 'import pyinduct as pi\n'), ((65119, 65185), 'pyinduct.SpatialDerivedFieldVariable', 'pi.SpatialDerivedFieldVariable', (['"""base_1"""'], {'order': '(0)', 'location': 'self.l'}), "('base_1', order=0, location=self.l)\n", (65149, 65185), True, 'import pyinduct as pi\n'), ((65225, 65276), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_1"""'], {'order': '(0)', 'location': 'self.l'}), "('base_1', order=0, location=self.l)\n", (65240, 65276), True, 'import pyinduct as pi\n'), ((65327, 65393), 'pyinduct.SpatialDerivedFieldVariable', 'pi.SpatialDerivedFieldVariable', (['"""base_1"""'], {'order': '(0)', 'location': 'self.l'}), "('base_1', order=0, location=self.l)\n", (65357, 65393), True, 'import pyinduct as pi\n'), ((65433, 65484), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_1"""'], {'order': '(0)', 'location': 'self.l'}), "('base_1', order=0, location=self.l)\n", (65448, 65484), True, 'import pyinduct as pi\n'), ((65546, 65607), 'pyinduct.SpatialDerivedFieldVariable', 'pi.SpatialDerivedFieldVariable', (['"""base_1"""'], {'order': '(1)', 'location': '(0)'}), "('base_1', order=1, location=0)\n", (65576, 65607), True, 'import pyinduct as pi\n'), ((65647, 65693), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_1"""'], {'order': '(0)', 'location': '(0)'}), "('base_1', order=0, location=0)\n", (65662, 65693), True, 'import pyinduct as pi\n'), ((65743, 65754), 'pyinduct.Input', 'pi.Input', (['u'], {}), '(u)\n', (65751, 65754), True, 'import pyinduct as pi\n'), ((65794, 65845), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""base_1"""'], {'order': '(0)', 'location': 'self.l'}), "('base_1', order=0, location=self.l)\n", (65809, 65845), True, 'import pyinduct as pi\n'), ((68799, 68884), 'pyinduct.SecondOrderDirichletEigenfunction', 'pi.SecondOrderDirichletEigenfunction', (['omega[i]', 'param', 'dz.bounds[-1]', 'norm_fak[i]'], {}), '(omega[i], param, dz.bounds[-1],\n norm_fak[i])\n', (68835, 68884), True, 'import pyinduct as pi\n'), ((69232, 69325), 'pyinduct.SecondOrderDirichletEigenfunction', 'pi.SecondOrderDirichletEigenfunction', (['omega[i]', 'adjoint_param', 'dz.bounds[-1]', 'norm_fak[i]'], {}), '(omega[i], adjoint_param, dz.bounds[-1],\n norm_fak[i])\n', (69268, 69325), True, 'import pyinduct as pi\n'), ((70897, 70928), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['ss_weak.A[1]'], {}), '(ss_weak.A[1])\n', (70914, 70928), True, 'import numpy as np\n'), ((70951, 70983), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['ss_modal.A[1]'], {}), '(ss_modal.A[1])\n', (70968, 70983), True, 'import numpy as np\n'), ((72514, 72572), 'pyinduct.SecondOrderRobinEigenfunction', 'pi.SecondOrderRobinEigenfunction', (['om', 'param', 'dz.bounds[-1]'], {}), '(om, param, dz.bounds[-1])\n', (72546, 72572), True, 'import pyinduct as pi\n'), ((72662, 72728), 'pyinduct.SecondOrderRobinEigenfunction', 'pi.SecondOrderRobinEigenfunction', (['om', 'adjoint_param', 'dz.bounds[-1]'], {}), '(om, adjoint_param, dz.bounds[-1])\n', (72694, 72728), True, 'import pyinduct as pi\n'), ((74087, 74118), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['ss_weak.A[1]'], {}), '(ss_weak.A[1])\n', (74104, 74118), True, 'import numpy as np\n'), ((74129, 74161), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['ss_modal.A[1]'], {}), '(ss_modal.A[1])\n', (74146, 74161), True, 'import numpy as np\n'), ((17051, 17101), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.input_term2'], {'name': '"""test"""'}), "(self.input_term2, name='test')\n", (17070, 17101), True, 'import pyinduct.simulation as sim\n'), ((17403, 17453), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.input_term3'], {'name': '"""test"""'}), "(self.input_term3, name='test')\n", (17422, 17453), True, 'import pyinduct.simulation as sim\n'), ((17759, 17817), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.input_term3_swapped'], {'name': '"""test"""'}), "(self.input_term3_swapped, name='test')\n", (17778, 17817), True, 'import pyinduct.simulation as sim\n'), ((18123, 18180), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.input_term3_scaled'], {'name': '"""test"""'}), "(self.input_term3_scaled, name='test')\n", (18142, 18180), True, 'import pyinduct.simulation as sim\n'), ((18489, 18557), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.input_term3_scaled_first_half'], {'name': '"""test"""'}), "(self.input_term3_scaled_first_half, name='test')\n", (18508, 18557), True, 'import pyinduct.simulation as sim\n'), ((18867, 18936), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.input_term3_scaled_second_half'], {'name': '"""test"""'}), "(self.input_term3_scaled_second_half, name='test')\n", (18886, 18936), True, 'import pyinduct.simulation as sim\n'), ((19259, 19354), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['[self.input_term_vectorial1, self.input_term_vectorial2]'], {'name': '"""test"""'}), "([self.input_term_vectorial1, self.input_term_vectorial2\n ], name='test')\n", (19278, 19354), True, 'import pyinduct.simulation as sim\n'), ((19818, 19870), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.input_term_dt'], {'name': '"""test"""'}), "(self.input_term_dt, name='test')\n", (19837, 19870), True, 'import pyinduct.simulation as sim\n'), ((20234, 20335), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['[self.input_term_vectorial_dt1, self.input_term_vectorial_dt2]'], {'name': '"""test"""'}), "([self.input_term_vectorial_dt1, self.\n input_term_vectorial_dt2], name='test')\n", (20253, 20335), True, 'import pyinduct.simulation as sim\n'), ((20813, 20861), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.func_term'], {'name': '"""test"""'}), "(self.func_term, name='test')\n", (20832, 20861), True, 'import pyinduct.simulation as sim\n'), ((21150, 21202), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.func_term_int'], {'name': '"""test"""'}), "(self.func_term_int, name='test')\n", (21169, 21202), True, 'import pyinduct.simulation as sim\n'), ((34003, 34053), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.input_term1'], {'name': '"""test"""'}), "(self.input_term1, name='test')\n", (34022, 34053), True, 'import pyinduct.simulation as sim\n'), ((34354, 34412), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.input_term1_swapped'], {'name': '"""test"""'}), "(self.input_term1_swapped, name='test')\n", (34373, 34412), True, 'import pyinduct.simulation as sim\n'), ((35749, 35834), 'pyinduct.ComposedFunctionVector', 'pi.ComposedFunctionVector', (['[f[i] for f in sel_funcs]', '[s[i] for s in sel_scalars]'], {}), '([f[i] for f in sel_funcs], [s[i] for s in\n sel_scalars])\n', (35774, 35834), True, 'import pyinduct as pi\n'), ((36119, 36137), 'pyinduct.Product', 'pi.Product', (['fv', 'tf'], {}), '(fv, tf)\n', (36129, 36137), True, 'import pyinduct as pi\n'), ((44553, 44562), 'numpy.sin', 'np.sin', (['w'], {}), '(w)\n', (44559, 44562), True, 'import numpy as np\n'), ((44703, 44719), 'numpy.cos', 'np.cos', (['(freq * z)'], {}), '(freq * z)\n', (44709, 44719), True, 'import numpy as np\n'), ((47900, 47949), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""norm_modal_base"""'], {'order': '(2, 0)'}), "('norm_modal_base', order=(2, 0))\n", (47916, 47949), True, 'import pyinduct as pi\n'), ((47995, 48029), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""norm_modal_base"""'], {}), "('norm_modal_base')\n", (48010, 48029), True, 'import pyinduct as pi\n'), ((48156, 48217), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""norm_modal_base"""'], {'order': '(2, 0)', 'location': '(0)'}), "('norm_modal_base', order=(2, 0), location=0)\n", (48172, 48217), True, 'import pyinduct as pi\n'), ((48240, 48286), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""norm_modal_base"""'], {'location': '(0)'}), "('norm_modal_base', location=0)\n", (48255, 48286), True, 'import pyinduct as pi\n'), ((48363, 48379), 'pyinduct.Input', 'pi.Input', (['self.u'], {}), '(self.u)\n', (48371, 48379), True, 'import pyinduct as pi\n'), ((48423, 48469), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""norm_modal_base"""'], {'location': '(1)'}), "('norm_modal_base', location=1)\n", (48438, 48469), True, 'import pyinduct as pi\n'), ((48537, 48584), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""norm_modal_base"""'], {'location': '(1)'}), "('norm_modal_base', location=1)\n", (48553, 48584), True, 'import pyinduct as pi\n'), ((48618, 48673), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""norm_modal_base"""'], {'order': '(1)', 'location': '(1)'}), "('norm_modal_base', order=1, location=1)\n", (48633, 48673), True, 'import pyinduct as pi\n'), ((48750, 48797), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""norm_modal_base"""'], {'location': '(0)'}), "('norm_modal_base', location=0)\n", (48766, 48797), True, 'import pyinduct as pi\n'), ((48841, 48896), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""norm_modal_base"""'], {'order': '(1)', 'location': '(0)'}), "('norm_modal_base', order=1, location=0)\n", (48856, 48896), True, 'import pyinduct as pi\n'), ((49002, 49037), 'pyinduct.FieldVariable', 'pi.FieldVariable', (['"""norm_modal_base"""'], {}), "('norm_modal_base')\n", (49018, 49037), True, 'import pyinduct as pi\n'), ((49083, 49126), 'pyinduct.TestFunction', 'pi.TestFunction', (['"""norm_modal_base"""'], {'order': '(2)'}), "('norm_modal_base', order=2)\n", (49098, 49126), True, 'import pyinduct as pi\n'), ((6601, 6617), 'numpy.zeros_like', 'np.zeros_like', (['c'], {}), '(c)\n', (6614, 6617), True, 'import numpy as np\n'), ((21657, 21710), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.comp_func_term'], {'name': '"""test"""'}), "(self.comp_func_term, name='test')\n", (21676, 21710), True, 'import pyinduct.simulation as sim\n'), ((22151, 22208), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.comp_func_term_int'], {'name': '"""test"""'}), "(self.comp_func_term_int, name='test')\n", (22170, 22208), True, 'import pyinduct.simulation as sim\n'), ((22696, 22749), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.field_term_at1'], {'name': '"""test"""'}), "(self.field_term_at1, name='test')\n", (22715, 22749), True, 'import pyinduct.simulation as sim\n'), ((23067, 23124), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.field_term_ddt_at1'], {'name': '"""test"""'}), "(self.field_term_ddt_at1, name='test')\n", (23086, 23124), True, 'import pyinduct.simulation as sim\n'), ((23442, 23498), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.field_term_dz_at1'], {'name': '"""test"""'}), "(self.field_term_dz_at1, name='test')\n", (23461, 23498), True, 'import pyinduct.simulation as sim\n'), ((23817, 23865), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.field_int'], {'name': '"""test"""'}), "(self.field_int, name='test')\n", (23836, 23865), True, 'import pyinduct.simulation as sim\n'), ((24188, 24241), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.field_int_half'], {'name': '"""test"""'}), "(self.field_int_half, name='test')\n", (24207, 24241), True, 'import pyinduct.simulation as sim\n'), ((24563, 24614), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.field_dz_int'], {'name': '"""test"""'}), "(self.field_dz_int, name='test')\n", (24582, 24614), True, 'import pyinduct.simulation as sim\n'), ((24933, 24985), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.field_ddt_int'], {'name': '"""test"""'}), "(self.field_ddt_int, name='test')\n", (24952, 24985), True, 'import pyinduct.simulation as sim\n'), ((26402, 26457), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.prod_term_fs_at1'], {'name': '"""test"""'}), "(self.prod_term_fs_at1, name='test')\n", (26421, 26457), True, 'import pyinduct.simulation as sim\n'), ((26907, 26957), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.prod_int_fs'], {'name': '"""test"""'}), "(self.prod_int_fs, name='test')\n", (26926, 26957), True, 'import pyinduct.simulation as sim\n'), ((27415, 27466), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.prod_int_f_f'], {'name': '"""test"""'}), "(self.prod_int_f_f, name='test')\n", (27434, 27466), True, 'import pyinduct.simulation as sim\n'), ((27948, 28007), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.prod_int_f_f_swapped'], {'name': '"""test"""'}), "(self.prod_int_f_f_swapped, name='test')\n", (27967, 28007), True, 'import pyinduct.simulation as sim\n'), ((28489, 28544), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.prod_int_f_at1_f'], {'name': '"""test"""'}), "(self.prod_int_f_at1_f, name='test')\n", (28508, 28544), True, 'import pyinduct.simulation as sim\n'), ((29001, 29056), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.prod_int_f_f_at1'], {'name': '"""test"""'}), "(self.prod_int_f_f_at1, name='test')\n", (29020, 29056), True, 'import pyinduct.simulation as sim\n'), ((29513, 29573), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.prod_term_f_at1_f_at1'], {'name': '"""test"""'}), "(self.prod_term_f_at1_f_at1, name='test')\n", (29532, 29573), True, 'import pyinduct.simulation as sim\n'), ((30052, 30106), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.prod_int_fddt_f'], {'name': '"""test"""'}), "(self.prod_int_fddt_f, name='test')\n", (30071, 30106), True, 'import pyinduct.simulation as sim\n'), ((30588, 30651), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.prod_term_fddt_at0_f_at0'], {'name': '"""test"""'}), "(self.prod_term_fddt_at0_f_at0, name='test')\n", (30607, 30651), True, 'import pyinduct.simulation as sim\n'), ((31101, 31148), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.spat_int'], {'name': '"""test"""'}), "(self.spat_int, name='test')\n", (31120, 31148), True, 'import pyinduct.simulation as sim\n'), ((31602, 31660), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.spat_int_asymmetric'], {'name': '"""test"""'}), "(self.spat_int_asymmetric, name='test')\n", (31621, 31660), True, 'import pyinduct.simulation as sim\n'), ((32119, 32182), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.prod_term_f_at1_dphi_at1'], {'name': '"""test"""'}), "(self.prod_term_f_at1_dphi_at1, name='test')\n", (32138, 32182), True, 'import pyinduct.simulation as sim\n'), ((32680, 32733), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.prod_int_sf_fv'], {'name': '"""test"""'}), "(self.prod_int_sf_fv, name='test')\n", (32699, 32733), True, 'import pyinduct.simulation as sim\n'), ((32994, 33055), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.prod_int_sf_fv_swapped'], {'name': '"""test"""'}), "(self.prod_int_sf_fv_swapped, name='test')\n", (33013, 33055), True, 'import pyinduct.simulation as sim\n'), ((33360, 33422), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.prod_term_tf_at0_lv_at0'], {'name': '"""test"""'}), "(self.prod_term_tf_at0_lv_at0, name='test')\n", (33379, 33422), True, 'import pyinduct.simulation as sim\n'), ((33678, 33748), 'pyinduct.simulation.WeakFormulation', 'sim.WeakFormulation', (['self.prod_term_tf_at0_lv_at0_swapped'], {'name': '"""test"""'}), "(self.prod_term_tf_at0_lv_at0_swapped, name='test')\n", (33697, 33748), True, 'import pyinduct.simulation as sim\n'), ((43256, 43298), 'pyinduct.project_on_base', 'pi.project_on_base', (['self.ic[idx]', 'fem_base'], {}), '(self.ic[idx], fem_base)\n', (43274, 43298), True, 'import pyinduct as pi\n'), ((44585, 44594), 'numpy.cos', 'np.cos', (['w'], {}), '(w)\n', (44591, 44594), True, 'import numpy as np\n'), ((44769, 44785), 'numpy.sin', 'np.sin', (['(freq * z)'], {}), '(freq * z)\n', (44775, 44785), True, 'import numpy as np\n'), ((44853, 44869), 'numpy.sin', 'np.sin', (['(freq * z)'], {}), '(freq * z)\n', (44859, 44869), True, 'import numpy as np\n'), ((47161, 47183), 'numpy.vectorize', 'np.vectorize', (['vec.func'], {}), '(vec.func)\n', (47173, 47183), True, 'import numpy as np\n'), ((47314, 47332), 'numpy.vectorize', 'np.vectorize', (['func'], {}), '(func)\n', (47326, 47332), True, 'import numpy as np\n'), ((44928, 44944), 'numpy.cos', 'np.cos', (['(freq * z)'], {}), '(freq * z)\n', (44934, 44944), True, 'import numpy as np\n'), ((45019, 45035), 'numpy.cos', 'np.cos', (['(freq * z)'], {}), '(freq * z)\n', (45025, 45035), True, 'import numpy as np\n'), ((45098, 45114), 'numpy.sin', 'np.sin', (['(freq * z)'], {}), '(freq * z)\n', (45104, 45114), True, 'import numpy as np\n'), ((47714, 47731), 'numpy.array', 'np.array', (['self.dz'], {}), '(self.dz)\n', (47722, 47731), True, 'import numpy as np\n')] |
"""
===============================================================================
| process_data.py |
===============================================================================
| A collection of scripts to read data from exodus files using the yt library.|
| Note: A good way to generate the functions is to: |
| from functools import partial |
| fxns = [partial(f,*args,**keywords) for args,keywords in ...] |
| |
| This makes independent functions which can be given different args |
| and keywords. |
===============================================================================
"""
#Import modules
import yt
import numpy as np
import functools
def get_dof_coordinate_data(data_set, dof, meshname = 'connect1'):
"""
=================================
| get_dof_coordinate_data |
=================================
Get the degree of freedom data and the
x,y,z coordinates at which that data is defined.
note that dof can either be a string or a list of strings.
"""
if type(dof)==str:
dof_data = [[float(d) for d in data] for data in data_set.all_data()[meshname, dof]]
elif type(dof)==list:
_dof_data = [[[float(d) for d in data] for data in data_set.all_data()[meshname, _dof]] for _dof in dof]
dof_data = [zip(*v) for v in zip(*_dof_data)]
coord_x = data_set.all_data()[meshname, 'vertex_x']
coord_y = data_set.all_data()[meshname, 'vertex_y']
coord_z = data_set.all_data()[meshname, 'vertex_z']
return [[(d,float(_x),float(_y),float(_z)) for d,_x,_y,_z in zip(data,x,y,z)] for data,x,y,z in zip(dof_data, coord_x, coord_y, coord_z)]
def evaluate_functions_at_coordinates(list_of_functions,coordinates):
"""
===========================================
| evaluate_functions_at_coordinates |
===========================================
Evaluate functions at the given coordinate points.
Functions should be of the form v = f(x,y,z)
"""
return [tuple([f(x,y,z) for f in list_of_functions]) for x,y,z in coordinates]
def evaluate_manufactured_solution_result_at_step(filename,dof,list_of_functions,step=-1,meshname='connect1',rtol=1e-5):
"""
=======================================================
| evaluate_manufactured_solution_result_at_step |
=======================================================
Evaluate the manufactured solution result and
return a true-false statement if the solution
has converged at a given step. Defaults to the
last step of the simulation.
"""
data_set = yt.load(filename,step=-1)
simulation_results = get_dof_coordinate_data(data_set, dof, meshname = meshname);
flat_simulation_results = [item for sublist in simulation_results for item in sublist]
manufactured_solution = evaluate_functions_at_coordinates(list_of_functions,[sr[1:] for sr in flat_simulation_results])
if(len(flat_simulation_results) != len(manufactured_solution)):
print("Error: there aren't as many simulation results as manufactured solutions")
return False
result = all([np.allclose(a,b,rtol=rtol) for a,b in zip([r[0] for r in flat_simulation_results],manufactured_solution)])
if(result==False):
print("Result failed. Computing maximum differences...\n")
diffs = np.array([np.array(a)-np.array(b) for a,b in zip([r[0] for r in flat_simulation_results],manufactured_solution)])
print("maximum abs differences: {0}".format(np.max(np.abs(diffs),axis=0)))
return result
### UTILITY FUNCTIONS ###
def const_fxn(x,y,z,v=0.):
return v
def linear_fxn(x,y,z,a=0.,b=0.,c=0.,d=0.):
return a*x + b*y + c*z + d
def generate_linear_functions(n,bounds=[1.0,-1.0],seed=123):
"""
===================================
| generate_linear_functions |
===================================
Generate linear functions with random coefficients
"""
np.random.seed(seed)
coefs = [(bounds[1] - bounds[0])*np.random.rand(4) + bounds[0] for _ in range(n)]
strings = ["{0}*x+{1}*y+{2}*z+{3}".format(coef[0],coef[1],coef[2],coef[3]) for coef in coefs]
return [functools.partial(linear_fxn,a=coef[0],b=coef[1],c=coef[2],d=coef[3]) for coef in coefs],coefs,strings
def generate_random_phis(stretch_scale_bounds = [0.5,2.0], theta_bounds = [-0.5*np.pi,0.5*np.pi],seed=123):
"""
==============================
| generate_random_phis |
==============================
Generate random values of phi that will be physical.
"""
np.random.seed(seed)
S = np.diag([(b-a)*np.random.rand(3) + a])*np.eye(3)
thetas = (theta_bounds[1]-theta_bounds[0])*np.random.rand(3) + theta_bounds[0]
Rx = np.array([[ 1, 0, 0],\
[ 0, np.cos(thetas[0]), -np.sin(thetas[0])],\
[ 0, np.sin(thetas[0]), np.cos(thetas[0])]])
Ry = np.array([[ np.cos(thetas[1]), 0, np.sin(thetas[1])],\
[ 0, 1, 0],\
[-np.sin(thetas[1]), 0, np.cos(thetas[1])]])
Rz = np.array([[ np.cos(thetas[2]),-np.sin(thetas[2]), 0],\
[ np.sin(thetas[2]), np.cos(thetas[2]), 0],\
[ 0, 0, 1]])
phi = Rx*Ry*Rz*S
def rotate_matrix(A,thetas):
"""
=======================
| rotate_matrix |
=======================
Rotate the given matrix by the
provided angles. The order with which
these are applied are x rotation, then y, then z.
thetas = [theta_x, theta_y, theta_z]
"""
Rx = np.array([[ 1, 0, 0],\
[ 0, np.cos(thetas[0]), -np.sin(thetas[0])],\
[ 0, np.sin(thetas[0]), np.cos(thetas[0])]])
Ry = np.array([[ np.cos(thetas[1]), 0, np.sin(thetas[1])],\
[ 0, 1, 0],\
[-np.sin(thetas[1]), 0, np.cos(thetas[1])]])
Rz = np.array([[ np.cos(thetas[2]),-np.sin(thetas[2]), 0],\
[ np.sin(thetas[2]), np.cos(thetas[2]), 0],\
[ 0, 0, 1]])
return np.dot(Rz,np.dot(Ry,np.dot(Rx,A)))
def form_linear_phi_eqns(stretch_scale_bounds = [0.5,2.0],theta_bounds = [-.5*np.pi,0.5*np.pi],seed=123):
"""
==============================
| form_linear_phi_eqns |
==============================
Form equations that result in a linear stretch of phi.
"""
np.random.seed(seed)
terms = [np.diag((stretch_scale_bounds[1]-stretch_scale_bounds[0])*np.random.rand(3) + stretch_scale_bounds[0])*np.eye(3) for _ in range(4)]
thetas = (theta_bounds[1] - theta_bounds[0])*np.random.rand(3) + theta_bounds[0]
#Compute the total rotation matrix
R = rotate_matrix(np.eye(3),thetas)
#Compute the rotation terms
rotated_terms = [np.dot(R,t) for t in terms]
#Compute the coefficients
coefs = zip(*[(rt[0,0],rt[1,1],rt[2,2],rt[1,2],rt[0,2],rt[0,1],rt[2,1],rt[2,0],rt[1,0])\
for rt in rotated_terms])
strings = ["{0}*x+{1}*y+{2}*z+{3}".format(coef[0],coef[1],coef[2],coef[3]) for coef in coefs]
return [functools.partial(linear_fxn,a=coef[0],b=coef[1],c=coef[2],d=coef[3]) for coef in coefs],coefs,strings
| [
"numpy.abs",
"numpy.eye",
"numpy.allclose",
"numpy.random.rand",
"numpy.array",
"numpy.dot",
"yt.load",
"numpy.random.seed",
"functools.partial",
"numpy.cos",
"numpy.sin"
] | [((2892, 2918), 'yt.load', 'yt.load', (['filename'], {'step': '(-1)'}), '(filename, step=-1)\n', (2899, 2918), False, 'import yt\n'), ((4270, 4290), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4284, 4290), True, 'import numpy as np\n'), ((4883, 4903), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4897, 4903), True, 'import numpy as np\n'), ((7185, 7205), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7199, 7205), True, 'import numpy as np\n'), ((4951, 4960), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4957, 4960), True, 'import numpy as np\n'), ((7499, 7508), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (7505, 7508), True, 'import numpy as np\n'), ((7571, 7583), 'numpy.dot', 'np.dot', (['R', 't'], {}), '(R, t)\n', (7577, 7583), True, 'import numpy as np\n'), ((3424, 3452), 'numpy.allclose', 'np.allclose', (['a', 'b'], {'rtol': 'rtol'}), '(a, b, rtol=rtol)\n', (3435, 3452), True, 'import numpy as np\n'), ((4487, 4560), 'functools.partial', 'functools.partial', (['linear_fxn'], {'a': 'coef[0]', 'b': 'coef[1]', 'c': 'coef[2]', 'd': 'coef[3]'}), '(linear_fxn, a=coef[0], b=coef[1], c=coef[2], d=coef[3])\n', (4504, 4560), False, 'import functools\n'), ((5009, 5026), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (5023, 5026), True, 'import numpy as np\n'), ((6877, 6890), 'numpy.dot', 'np.dot', (['Rx', 'A'], {}), '(Rx, A)\n', (6883, 6890), True, 'import numpy as np\n'), ((7322, 7331), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (7328, 7331), True, 'import numpy as np\n'), ((7401, 7418), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (7415, 7418), True, 'import numpy as np\n'), ((7878, 7951), 'functools.partial', 'functools.partial', (['linear_fxn'], {'a': 'coef[0]', 'b': 'coef[1]', 'c': 'coef[2]', 'd': 'coef[3]'}), '(linear_fxn, a=coef[0], b=coef[1], c=coef[2], d=coef[3])\n', (7895, 7951), False, 'import functools\n'), ((4328, 4345), 'numpy.random.rand', 'np.random.rand', (['(4)'], {}), '(4)\n', (4342, 4345), True, 'import numpy as np\n'), ((5167, 5184), 'numpy.cos', 'np.cos', (['thetas[0]'], {}), '(thetas[0])\n', (5173, 5184), True, 'import numpy as np\n'), ((5248, 5265), 'numpy.sin', 'np.sin', (['thetas[0]'], {}), '(thetas[0])\n', (5254, 5265), True, 'import numpy as np\n'), ((5268, 5285), 'numpy.cos', 'np.cos', (['thetas[0]'], {}), '(thetas[0])\n', (5274, 5285), True, 'import numpy as np\n'), ((5311, 5328), 'numpy.cos', 'np.cos', (['thetas[1]'], {}), '(thetas[1])\n', (5317, 5328), True, 'import numpy as np\n'), ((5350, 5367), 'numpy.sin', 'np.sin', (['thetas[1]'], {}), '(thetas[1])\n', (5356, 5367), True, 'import numpy as np\n'), ((5512, 5529), 'numpy.cos', 'np.cos', (['thetas[1]'], {}), '(thetas[1])\n', (5518, 5529), True, 'import numpy as np\n'), ((5555, 5572), 'numpy.cos', 'np.cos', (['thetas[2]'], {}), '(thetas[2])\n', (5561, 5572), True, 'import numpy as np\n'), ((5636, 5653), 'numpy.sin', 'np.sin', (['thetas[2]'], {}), '(thetas[2])\n', (5642, 5653), True, 'import numpy as np\n'), ((5655, 5672), 'numpy.cos', 'np.cos', (['thetas[2]'], {}), '(thetas[2])\n', (5661, 5672), True, 'import numpy as np\n'), ((6231, 6248), 'numpy.cos', 'np.cos', (['thetas[0]'], {}), '(thetas[0])\n', (6237, 6248), True, 'import numpy as np\n'), ((6312, 6329), 'numpy.sin', 'np.sin', (['thetas[0]'], {}), '(thetas[0])\n', (6318, 6329), True, 'import numpy as np\n'), ((6332, 6349), 'numpy.cos', 'np.cos', (['thetas[0]'], {}), '(thetas[0])\n', (6338, 6349), True, 'import numpy as np\n'), ((6375, 6392), 'numpy.cos', 'np.cos', (['thetas[1]'], {}), '(thetas[1])\n', (6381, 6392), True, 'import numpy as np\n'), ((6414, 6431), 'numpy.sin', 'np.sin', (['thetas[1]'], {}), '(thetas[1])\n', (6420, 6431), True, 'import numpy as np\n'), ((6576, 6593), 'numpy.cos', 'np.cos', (['thetas[1]'], {}), '(thetas[1])\n', (6582, 6593), True, 'import numpy as np\n'), ((6619, 6636), 'numpy.cos', 'np.cos', (['thetas[2]'], {}), '(thetas[2])\n', (6625, 6636), True, 'import numpy as np\n'), ((6700, 6717), 'numpy.sin', 'np.sin', (['thetas[2]'], {}), '(thetas[2])\n', (6706, 6717), True, 'import numpy as np\n'), ((6719, 6736), 'numpy.cos', 'np.cos', (['thetas[2]'], {}), '(thetas[2])\n', (6725, 6736), True, 'import numpy as np\n'), ((3648, 3659), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (3656, 3659), True, 'import numpy as np\n'), ((3660, 3671), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (3668, 3671), True, 'import numpy as np\n'), ((3811, 3824), 'numpy.abs', 'np.abs', (['diffs'], {}), '(diffs)\n', (3817, 3824), True, 'import numpy as np\n'), ((5187, 5204), 'numpy.sin', 'np.sin', (['thetas[0]'], {}), '(thetas[0])\n', (5193, 5204), True, 'import numpy as np\n'), ((5473, 5490), 'numpy.sin', 'np.sin', (['thetas[1]'], {}), '(thetas[1])\n', (5479, 5490), True, 'import numpy as np\n'), ((5574, 5591), 'numpy.sin', 'np.sin', (['thetas[2]'], {}), '(thetas[2])\n', (5580, 5591), True, 'import numpy as np\n'), ((6251, 6268), 'numpy.sin', 'np.sin', (['thetas[0]'], {}), '(thetas[0])\n', (6257, 6268), True, 'import numpy as np\n'), ((6537, 6554), 'numpy.sin', 'np.sin', (['thetas[1]'], {}), '(thetas[1])\n', (6543, 6554), True, 'import numpy as np\n'), ((6638, 6655), 'numpy.sin', 'np.sin', (['thetas[2]'], {}), '(thetas[2])\n', (6644, 6655), True, 'import numpy as np\n'), ((4927, 4944), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (4941, 4944), True, 'import numpy as np\n'), ((7277, 7294), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (7291, 7294), True, 'import numpy as np\n')] |
import numpy as np
class NetworkInput(object):
def __init__(self, path, input_shape, num_labels):
self.path = path
self.num_labels = num_labels
self.batch_start = 0
self.epochs_completed = 0
self.input_shape = input_shape
self.cache = np.array([])
self.cache_iterator = 0
self.cache_factor = 10
def next_batch(self, batch_size):
raise NotImplemented
def create_label_vector(self, label):
v = np.zeros(self.num_labels)
v[label] = 1
return v
def next_batch_cached(self, batch_size):
if self.cache_iterator == 0:
self.cache = self.next_batch(batch_size * self.cache_factor)
result_images = self.cache[0][self.cache_iterator * batch_size : (self.cache_iterator+1) * batch_size]
result_labels = self.cache[1][self.cache_iterator * batch_size : (self.cache_iterator+1) * batch_size]
self.cache_iterator = (self.cache_iterator + 1) % self.cache_factor
return result_images, result_labels
| [
"numpy.array",
"numpy.zeros"
] | [((288, 300), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (296, 300), True, 'import numpy as np\n'), ((487, 512), 'numpy.zeros', 'np.zeros', (['self.num_labels'], {}), '(self.num_labels)\n', (495, 512), True, 'import numpy as np\n')] |
from torchvision.transforms.functional import InterpolationMode
import os
from PIL import Image
import numpy as np
from collections import OrderedDict
from tqdm.auto import tqdm
import torchvision
import torch
def to_numpy(x):
x_ = np.array(x)
x_ = x_.astype(np.float32)
return x_
def get_image_transform(transform):
# fix for this issue: https://github.com/pytorch/vision/issues/2194
if transform is not None and isinstance(transform, torchvision.transforms.Compose) and (transform.transforms[-1], torchvision.transforms.ToTensor):
transform = torchvision.transforms.Compose([
*transform.transforms[:-1],
torchvision.transforms.Lambda(to_numpy),
torchvision.transforms.ToTensor()
])
elif isinstance(transform, torchvision.transforms.ToTensor):
transform = torchvision.transforms.Compose([
torchvision.transforms.Lambda(to_numpy),
torchvision.transforms.ToTensor()
])
return transform
def unproject(cam2world, intrinsic, depth):
# get dimensions
bs, _, H, W = depth.shape
# create meshgrid with image dimensions (== pixel coordinates of source image)
y = torch.linspace(0, H - 1, H).type_as(depth).int()
x = torch.linspace(0, W - 1, W).type_as(depth).int()
xx, yy = torch.meshgrid(x, y)
xx = torch.transpose(xx, 0, 1).repeat(bs, 1, 1)
yy = torch.transpose(yy, 0, 1).repeat(bs, 1, 1)
# get intrinsics and depth in correct format to match image dimensions
fx = intrinsic[:, 0, 0].unsqueeze(1).unsqueeze(1).expand_as(xx)
cx = intrinsic[:, 0, 2].unsqueeze(1).unsqueeze(1).expand_as(xx)
fy = intrinsic[:, 1, 1].unsqueeze(1).unsqueeze(1).expand_as(yy)
cy = intrinsic[:, 1, 2].unsqueeze(1).unsqueeze(1).expand_as(yy)
depth = depth.squeeze()
# inverse projection (K_inv) on pixel coordinates --> 3D point-cloud
x = (xx - cx) / fx * depth
y = (yy - cy) / fy * depth
# combine each point into an (x,y,z,1) vector
coords = torch.zeros(bs, H, W, 4).type_as(depth).float()
coords[:, :, :, 0] = x
coords[:, :, :, 1] = y
coords[:, :, :, 2] = depth
coords[:, :, :, 3] = 1
# extrinsic view projection to target view
coords = coords.view(bs, -1, 4)
coords = torch.bmm(coords, cam2world)
coords = coords.view(bs, H, W, 4)
return coords
def reproject(cam2world_src, cam2world_tar, W, H, intrinsic, depth_src, depth_tar, color_tar, mask_tar):
# get batch_size
bs = mask_tar.shape[0]
# calculate src2tar extrinsic matrix
world2cam_tar = torch.inverse(cam2world_tar)
src2tar = torch.transpose(torch.bmm(world2cam_tar, cam2world_src), 1, 2)
# create meshgrid with image dimensions (== pixel coordinates of source image)
y = torch.linspace(0, H - 1, H).type_as(color_tar).int()
x = torch.linspace(0, W - 1, W).type_as(color_tar).int()
xx, yy = torch.meshgrid(x, y)
xx = torch.transpose(xx, 0, 1).repeat(bs, 1, 1)
yy = torch.transpose(yy, 0, 1).repeat(bs, 1, 1)
# get intrinsics and depth in correct format to match image dimensions
fx = intrinsic[:,0,0].unsqueeze(1).unsqueeze(1).expand_as(xx)
cx = intrinsic[:,0,2].unsqueeze(1).unsqueeze(1).expand_as(xx)
fy = intrinsic[:,1,1].unsqueeze(1).unsqueeze(1).expand_as(yy)
cy = intrinsic[:,1,2].unsqueeze(1).unsqueeze(1).expand_as(yy)
depth_src = depth_src.squeeze()
# inverse projection (K_inv) on pixel coordinates --> 3D point-cloud
x = (xx - cx) / fx * depth_src
y = (yy - cy) / fy * depth_src
# combine each point into an (x,y,z,1) vector
coords = torch.zeros(bs, H, W, 4).type_as(color_tar).float()
coords[:, :, :, 0] = x
coords[:, :, :, 1] = y
coords[:, :, :, 2] = depth_src
coords[:, :, :, 3] = 1
# extrinsic view projection to target view
coords = coords.view(bs, -1, 4)
coords = torch.bmm(coords, src2tar)
coords = coords.view(bs, H, W, 4)
# projection (K) on 3D point-cloud --> pixel coordinates
z_tar = coords[:, :, :, 2]
x = coords[:, :, :, 0] / (1e-8 + z_tar) * fx + cx
y = coords[:, :, :, 1] / (1e-8 + z_tar) * fy + cy
# mask invalid pixel coordinates because of invalid source depth
mask0 = (depth_src == 0)
# mask invalid pixel coordinates after projection:
# these coordinates are not visible in target view (out of screen bounds)
mask1 = (x < 0) + (y < 0) + (x >= W - 1) + (y >= H - 1)
# create 4 target pixel coordinates which map to the nearest integer coordinate
# (left, top, right, bottom)
lx = torch.floor(x).float()
ly = torch.floor(y).float()
rx = (lx + 1).float()
ry = (ly + 1).float()
def make_grid(x, y):
"""
converts pixel coordinates from [0..W] or [0..H] to [-1..1] and stacks them together.
:param x: x pixel coordinates with shape NxHxW
:param y: y pixel coordinates with shape NxHxW
:return: (x,y) pixel coordinate grid with shape NxHxWx2
"""
x = (2.0 * x / W) - 1.0
y = (2.0 * y / H) - 1.0
grid = torch.stack((x, y), dim=3)
return grid
# combine to (x,y) pixel coordinates: (top-left, ..., bottom-right)
ll = make_grid(lx, ly)
lr = make_grid(lx, ry)
rl = make_grid(rx, ly)
rr = make_grid(rx, ry)
# calculate difference between depth in target view after reprojection and gt depth in target view
z_tar = z_tar.unsqueeze(1)
sample_z1 = torch.abs(z_tar - torch.nn.functional.grid_sample(depth_tar, ll,
mode="nearest",
padding_mode='border',
align_corners=True))
sample_z2 = torch.abs(z_tar - torch.nn.functional.grid_sample(depth_tar, lr,
mode="nearest",
padding_mode='border',
align_corners=True))
sample_z3 = torch.abs(z_tar - torch.nn.functional.grid_sample(depth_tar, rl,
mode="nearest",
padding_mode='border',
align_corners=True))
sample_z4 = torch.abs(z_tar - torch.nn.functional.grid_sample(depth_tar, rr,
mode="nearest",
padding_mode='border',
align_corners=True))
# mask invalid pixel coordinates because of too high difference in depth
mask2 = torch.minimum(torch.minimum(sample_z1, sample_z2), torch.minimum(sample_z3, sample_z4)) > 0.1
mask2 = mask2.int().squeeze()
# combine all masks
mask_remap = (1 - (mask0 + mask1 + mask2 > 0).int()).float().unsqueeze(1)
# create (x,y) pixel coordinate grid with reprojected float coordinates
map_x = x.float()
map_y = y.float()
map = make_grid(map_x, map_y)
# warp target rgb/mask to the new pixel coordinates based on the reprojection
# also mask the results
color_tar_to_src = torch.nn.functional.grid_sample(color_tar, map,
mode="bilinear",
padding_mode='border',
align_corners=True)
mask_tar = mask_tar.float().unsqueeze(1)
mask = torch.nn.functional.grid_sample(mask_tar, map,
mode="bilinear",
padding_mode='border',
align_corners=True)
mask = (mask > 0.99) * mask_remap
mask = mask.bool()
color_tar_to_src *= mask
return color_tar_to_src, mask.squeeze(1)
| [
"torch.nn.functional.grid_sample",
"torch.stack",
"torch.floor",
"torchvision.transforms.Lambda",
"torch.transpose",
"numpy.array",
"torch.meshgrid",
"torch.linspace",
"torch.bmm",
"torchvision.transforms.ToTensor",
"torch.minimum",
"torch.zeros",
"torch.inverse"
] | [((240, 251), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (248, 251), True, 'import numpy as np\n'), ((1320, 1340), 'torch.meshgrid', 'torch.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1334, 1340), False, 'import torch\n'), ((2278, 2306), 'torch.bmm', 'torch.bmm', (['coords', 'cam2world'], {}), '(coords, cam2world)\n', (2287, 2306), False, 'import torch\n'), ((2581, 2609), 'torch.inverse', 'torch.inverse', (['cam2world_tar'], {}), '(cam2world_tar)\n', (2594, 2609), False, 'import torch\n'), ((2906, 2926), 'torch.meshgrid', 'torch.meshgrid', (['x', 'y'], {}), '(x, y)\n', (2920, 2926), False, 'import torch\n'), ((3880, 3906), 'torch.bmm', 'torch.bmm', (['coords', 'src2tar'], {}), '(coords, src2tar)\n', (3889, 3906), False, 'import torch\n'), ((7399, 7510), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['color_tar', 'map'], {'mode': '"""bilinear"""', 'padding_mode': '"""border"""', 'align_corners': '(True)'}), "(color_tar, map, mode='bilinear',\n padding_mode='border', align_corners=True)\n", (7430, 7510), False, 'import torch\n'), ((7761, 7871), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['mask_tar', 'map'], {'mode': '"""bilinear"""', 'padding_mode': '"""border"""', 'align_corners': '(True)'}), "(mask_tar, map, mode='bilinear',\n padding_mode='border', align_corners=True)\n", (7792, 7871), False, 'import torch\n'), ((2640, 2679), 'torch.bmm', 'torch.bmm', (['world2cam_tar', 'cam2world_src'], {}), '(world2cam_tar, cam2world_src)\n', (2649, 2679), False, 'import torch\n'), ((5070, 5096), 'torch.stack', 'torch.stack', (['(x, y)'], {'dim': '(3)'}), '((x, y), dim=3)\n', (5081, 5096), False, 'import torch\n'), ((1350, 1375), 'torch.transpose', 'torch.transpose', (['xx', '(0)', '(1)'], {}), '(xx, 0, 1)\n', (1365, 1375), False, 'import torch\n'), ((1402, 1427), 'torch.transpose', 'torch.transpose', (['yy', '(0)', '(1)'], {}), '(yy, 0, 1)\n', (1417, 1427), False, 'import torch\n'), ((2936, 2961), 'torch.transpose', 'torch.transpose', (['xx', '(0)', '(1)'], {}), '(xx, 0, 1)\n', (2951, 2961), False, 'import torch\n'), ((2988, 3013), 'torch.transpose', 'torch.transpose', (['yy', '(0)', '(1)'], {}), '(yy, 0, 1)\n', (3003, 3013), False, 'import torch\n'), ((4566, 4580), 'torch.floor', 'torch.floor', (['x'], {}), '(x)\n', (4577, 4580), False, 'import torch\n'), ((4598, 4612), 'torch.floor', 'torch.floor', (['y'], {}), '(y)\n', (4609, 4612), False, 'import torch\n'), ((5467, 5577), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['depth_tar', 'll'], {'mode': '"""nearest"""', 'padding_mode': '"""border"""', 'align_corners': '(True)'}), "(depth_tar, ll, mode='nearest', padding_mode\n ='border', align_corners=True)\n", (5498, 5577), False, 'import torch\n'), ((5806, 5916), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['depth_tar', 'lr'], {'mode': '"""nearest"""', 'padding_mode': '"""border"""', 'align_corners': '(True)'}), "(depth_tar, lr, mode='nearest', padding_mode\n ='border', align_corners=True)\n", (5837, 5916), False, 'import torch\n'), ((6145, 6255), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['depth_tar', 'rl'], {'mode': '"""nearest"""', 'padding_mode': '"""border"""', 'align_corners': '(True)'}), "(depth_tar, rl, mode='nearest', padding_mode\n ='border', align_corners=True)\n", (6176, 6255), False, 'import torch\n'), ((6484, 6594), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['depth_tar', 'rr'], {'mode': '"""nearest"""', 'padding_mode': '"""border"""', 'align_corners': '(True)'}), "(depth_tar, rr, mode='nearest', padding_mode\n ='border', align_corners=True)\n", (6515, 6594), False, 'import torch\n'), ((6893, 6928), 'torch.minimum', 'torch.minimum', (['sample_z1', 'sample_z2'], {}), '(sample_z1, sample_z2)\n', (6906, 6928), False, 'import torch\n'), ((6930, 6965), 'torch.minimum', 'torch.minimum', (['sample_z3', 'sample_z4'], {}), '(sample_z3, sample_z4)\n', (6943, 6965), False, 'import torch\n'), ((664, 703), 'torchvision.transforms.Lambda', 'torchvision.transforms.Lambda', (['to_numpy'], {}), '(to_numpy)\n', (693, 703), False, 'import torchvision\n'), ((717, 750), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (748, 750), False, 'import torchvision\n'), ((892, 931), 'torchvision.transforms.Lambda', 'torchvision.transforms.Lambda', (['to_numpy'], {}), '(to_numpy)\n', (921, 931), False, 'import torchvision\n'), ((945, 978), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (976, 978), False, 'import torchvision\n'), ((1201, 1228), 'torch.linspace', 'torch.linspace', (['(0)', '(H - 1)', 'H'], {}), '(0, H - 1, H)\n', (1215, 1228), False, 'import torch\n'), ((1258, 1285), 'torch.linspace', 'torch.linspace', (['(0)', '(W - 1)', 'W'], {}), '(0, W - 1, W)\n', (1272, 1285), False, 'import torch\n'), ((2021, 2045), 'torch.zeros', 'torch.zeros', (['bs', 'H', 'W', '(4)'], {}), '(bs, H, W, 4)\n', (2032, 2045), False, 'import torch\n'), ((2779, 2806), 'torch.linspace', 'torch.linspace', (['(0)', '(H - 1)', 'H'], {}), '(0, H - 1, H)\n', (2793, 2806), False, 'import torch\n'), ((2840, 2867), 'torch.linspace', 'torch.linspace', (['(0)', '(W - 1)', 'W'], {}), '(0, W - 1, W)\n', (2854, 2867), False, 'import torch\n'), ((3615, 3639), 'torch.zeros', 'torch.zeros', (['bs', 'H', 'W', '(4)'], {}), '(bs, H, W, 4)\n', (3626, 3639), False, 'import torch\n')] |
import os
import sys
import numpy as np
from .config import config
class Model:
def __init__(self, n_feature, n_tag):
self.n_tag = n_tag
self.n_feature = n_feature
self.n_transition_feature = n_tag * (n_feature + n_tag)
if config.random:
self.w = np.random.random(size=(self.n_transition_feature,)) * 2 - 1
else:
self.w = np.zeros(self.n_transition_feature)
def expand(self, n_feature, n_tag):
new_transition_feature = n_tag * (n_feature + n_tag)
if config.random:
new_w = np.random.random(size=(new_transition_feature,)) * 2 - 1
else:
new_w = np.zeros(new_transition_feature)
n_node = self.n_tag * self.n_feature
n_edge = self.n_tag * self.n_tag
new_w[:n_node] = self.w[:n_node]
new_w[-n_edge:] = self.w[-n_edge:]
self.n_tag = n_tag
self.n_feature = n_feature
self.n_transition_feature = new_transition_feature
self.w = new_w
def _get_node_tag_feature_id(self, feature_id, tag_id):
return feature_id * self.n_tag + tag_id
def _get_tag_tag_feature_id(self, pre_tag_id, tag_id):
return self.n_feature * self.n_tag + tag_id * self.n_tag + pre_tag_id
@classmethod
def load(cls, model_dir=None):
if model_dir is None:
model_dir = config.modelDir
model_path = os.path.join(model_dir, "weights.npz")
if os.path.exists(model_path):
npz = np.load(model_path)
sizes = npz["sizes"]
w = npz["w"]
model = cls.__new__(cls)
model.n_tag = int(sizes[0])
model.n_feature = int(sizes[1])
model.n_transition_feature = model.n_tag * (
model.n_feature + model.n_tag
)
model.w = w
assert model.w.shape[0] == model.n_transition_feature
return model
print(
"WARNING: weights.npz does not exist, try loading using old format",
file=sys.stderr,
)
model_path = os.path.join(model_dir, "model.txt")
with open(model_path, encoding="utf-8") as f:
ary = f.readlines()
model = cls.__new__(cls)
model.n_tag = int(ary[0].strip())
wsize = int(ary[1].strip())
w = np.zeros(wsize)
for i in range(2, wsize):
w[i - 2] = float(ary[i].strip())
model.w = w
model.n_feature = wsize // model.n_tag - model.n_tag
model.n_transition_feature = wsize
model.save(model_dir)
return model
@classmethod
def new(cls, model, copy_weight=True):
new_model = cls.__new__(cls)
new_model.n_tag = model.n_tag
if copy_weight:
new_model.w = model.w.copy()
else:
new_model.w = np.zeros_like(model.w)
new_model.n_feature = (
new_model.w.shape[0] // new_model.n_tag - new_model.n_tag
)
new_model.n_transition_feature = new_model.w.shape[0]
return new_model
def save(self, model_dir=None):
if model_dir is None:
model_dir = config.modelDir
sizes = np.array([self.n_tag, self.n_feature])
np.savez(
os.path.join(model_dir, "weights.npz"), sizes=sizes, w=self.w
)
# np.save
# with open(file, "w", encoding="utf-8") as f:
# f.write("{}\n{}\n".format(self.n_tag, self.w.shape[0]))
# for value in self.w:
# f.write("{:.4f}\n".format(value))
| [
"os.path.exists",
"numpy.random.random",
"os.path.join",
"numpy.array",
"numpy.zeros",
"numpy.load",
"numpy.zeros_like"
] | [((1407, 1445), 'os.path.join', 'os.path.join', (['model_dir', '"""weights.npz"""'], {}), "(model_dir, 'weights.npz')\n", (1419, 1445), False, 'import os\n'), ((1457, 1483), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (1471, 1483), False, 'import os\n'), ((2092, 2128), 'os.path.join', 'os.path.join', (['model_dir', '"""model.txt"""'], {}), "(model_dir, 'model.txt')\n", (2104, 2128), False, 'import os\n'), ((2339, 2354), 'numpy.zeros', 'np.zeros', (['wsize'], {}), '(wsize)\n', (2347, 2354), True, 'import numpy as np\n'), ((3197, 3235), 'numpy.array', 'np.array', (['[self.n_tag, self.n_feature]'], {}), '([self.n_tag, self.n_feature])\n', (3205, 3235), True, 'import numpy as np\n'), ((394, 429), 'numpy.zeros', 'np.zeros', (['self.n_transition_feature'], {}), '(self.n_transition_feature)\n', (402, 429), True, 'import numpy as np\n'), ((669, 701), 'numpy.zeros', 'np.zeros', (['new_transition_feature'], {}), '(new_transition_feature)\n', (677, 701), True, 'import numpy as np\n'), ((1503, 1522), 'numpy.load', 'np.load', (['model_path'], {}), '(model_path)\n', (1510, 1522), True, 'import numpy as np\n'), ((2852, 2874), 'numpy.zeros_like', 'np.zeros_like', (['model.w'], {}), '(model.w)\n', (2865, 2874), True, 'import numpy as np\n'), ((3266, 3304), 'os.path.join', 'os.path.join', (['model_dir', '"""weights.npz"""'], {}), "(model_dir, 'weights.npz')\n", (3278, 3304), False, 'import os\n'), ((299, 350), 'numpy.random.random', 'np.random.random', ([], {'size': '(self.n_transition_feature,)'}), '(size=(self.n_transition_feature,))\n', (315, 350), True, 'import numpy as np\n'), ((578, 626), 'numpy.random.random', 'np.random.random', ([], {'size': '(new_transition_feature,)'}), '(size=(new_transition_feature,))\n', (594, 626), True, 'import numpy as np\n')] |
import cvxpy as cp
import math
import numpy as np
from collections import OrderedDict
from functools import partial
from multiprocessing import Pool, cpu_count
from scipy.optimize import minimize_scalar
from tqdm.auto import tqdm
def p_num_samples(epsilon, delta, n_x=3, const=None):
"""Compute the number of samples needed to satisfy the specified probabilistic guarantees for the p-norm ball reachable set estimate
:param epsilon: The accuracy parameter
:type epsilon: float
:param delta: The confidence parameter
:type delta: float
:param n_x: The state dimension, defaults to 3
:type n_x: int
:param const: The constraints placed on the parameters A and b, defaults to None
:type const: string, optional
:return: The number of samples needed to satisfy the specified probabilistic guarantees
:rtype: int
"""
if const is None:
n_theta = 0.5 * (n_x ** 2 + 3 * n_x)
elif const == "diagonal":
n_theta = 2 * n_x
elif const == "scalar":
n_theta = 1
N = math.ceil(math.e * (math.log(1 / delta) + n_theta) / (epsilon * (math.e - 1)))
return N
def solve_p_norm(sample, n_x=3, p=2, const=None):
"""Solves the scenario relaxation problem for the given sample with p-Norm Balls
:param sample: Sample from dynamical system (num_samples, n_x)
:type sample: numpy.ndarray
:param n_x: The state dimension, defaults to 3
:type n_x: int, optional
:param p: The order of p-norm, defaults to 2
:type p: int, optional
:param const: The constraints placed on the parameters A and b, defaults to None
:type const: string, optional
:return: The values of matrix A and vector b corresponding to the optimal p-Norm Ball, as well as the status of the optimizer.
:rtype: tuple
"""
if const is None:
A = cp.Variable((n_x, n_x), symmetric=True)
b = cp.Variable((n_x, 1))
elif const == "diagonal":
a = cp.Variable((n_x, 1))
A = cp.diag(a)
b = cp.Variable((n_x, 1))
elif const == "scalar":
sigma = cp.Variable()
A = sigma * np.identity(n_x)
b = np.zeros((n_x, 1))
obj = cp.Minimize(-cp.log_det(A))
constraints = [cp.pnorm(A @ r.reshape(n_x, 1) - b, p=p) <= 1 for r in sample]
prob = cp.Problem(obj, constraints)
prob.solve()
if const != "scalar":
return A.value, b.value, prob.status
else:
return A, b, prob.status
def multi_p_norm(samples, p=2, const=None):
"""Computes a the p-norm ball reachable set estimates across a series of timesteps
:param samples: The samples from a dynamic system across time, an array of shape (num_samples, timesteps, state_dim)
:type samples: numpy.ndarray
:param p: The order of p-norm, defaults to 2
:type p: int, optional
:param const: The constraints placed on the parameters A and b, defaults to None
:type const: string, optional
:raises ValueError: [description]
:return: [description]
:rtype: [type]
"""
if len(samples.shape) != 3:
raise ValueError("Samples must be of shape (num_samples, timesteps, state_dim")
n_x = samples.shape[2]
keys = ("A", "b", "status")
solve_p_norm_map = partial(solve_p_norm, n_x=n_x, p=p, const=const)
p = Pool(cpu_count())
solutions = [
dict(zip(keys, sol))
for sol in tqdm(
p.imap(solve_p_norm_map, samples.swapaxes(0, 1)), total=samples.shape[1]
)
]
return solutions
def p_norm_cont(arr, axis, default_val, n_x, A_val, b_val, p, minimum=True):
"""Solve for the optimal value that satisfies the p-Norm Ball conditions at the specified axis
:param arr: Array of shape (n_x - 1,) containing the independent variables of the p-norm condition
:type arr: numpy.ndarray
:param axis: The axis of the dependent variable for which to solve for (i.e. z -> axis=2).
:type axis: int
:param default_val: The value to return if no solution for the dependent variable is found that satisfies the p-norm conditions
:type default_val: float
:param n_x: The state dimension
:type n_x: int
:param A_val: The matrix of shape (n_x, n_x) corresponding to the optimal p-norm ball
:type A_val: numpy.ndarray
:param b_val: The vector of shape (n_x, 1) corresponding to the optimal p-norm ball
:type b_val: numpy.ndarray
:param p: The order of p-norm
:type p: int
:param minimum: True if optimizing for the minimal value of the dependent variable that satisfies the p-norm conditions, defaults to True
:type minimum: bool, optional
:return: The value at the specified axis which corresponds the the optimal value of the (n_x, 1) vector that satisfies the p-Norm Ball conditions at the specified axis
:rtype: float
"""
vec = cp.Variable((n_x, 1))
other_dims = list(range(n_x))
other_dims.remove(axis)
constraints = [vec[i][0] == arr[j] for i, j in zip(other_dims, range(n_x - 1))]
constraints.append(cp.pnorm(A_val @ vec - b_val, p=p) <= 1)
if minimum:
obj = cp.Minimize(vec[axis])
else:
obj = cp.Maximize(vec[axis])
prob = cp.Problem(obj, constraints)
try:
prob.solve()
except:
return default_val
if prob.status != "optimal":
return default_val
return vec.value[axis]
def p_norm_cont_proj(arr, axis, default_val, n_x, A_val, b_val, p):
"""Minimizes the p-Norm value with respect to value at the specified axis.
:param arr: Array of shape (n_x - 1,) containing the independent variables of the p-norm condition.
:type arr: numpy.ndarray
:param axis: The axis of the dependent variable for which to solve for (i.e. z -> axis=2).
:type axis: int
:param default_val: The value to return if no solution for the dependent variable is found that satisfies the p-norm conditions.
:type default_val: float
:param n_x: The state dimension.
:type n_x: int
:param A_val: The matrix of shape (n_x, n_x) corresponding to the optimal p-norm ball.
:type A_val: numpy.ndarray
:param b_val: The vector of shape (n_x, 1) corresponding to the optimal p-norm ball.
:type b_val: numpy.ndarray
:param p: The order of p-norm.
:type p: int
:return: The value at the specified axis which corresponds the the minimum p-Norm value of the (n_x, 1) vector.
:rtype: float
"""
vec = np.zeros((n_x))
other_dims = list(range(n_x))
other_dims.remove(axis)
for i, j in zip(other_dims, range(n_x - 1)):
vec[i] = arr[j]
def f(x):
vec[axis] = x
return np.linalg.norm(A_val @ vec.reshape((n_x, 1)) - b_val, ord=p)
res = minimize_scalar(f)
vec[axis] = res.x
if np.linalg.norm(A_val @ vec.reshape((n_x, 1)) - b_val, ord=p) <= 1:
return res.x
else:
return default_val
def p_compute_contour_2D(sample, A_val, b_val, cont_axis=2, n_x=3, p=2, grid_n=200):
"""Computes the 3D contour for 2 dimensions based on sample data and the A_val, and b_val corresponding to the optimal p-norm ball.
:param sample: Sample from dynamical system (num_samples, n_x)
:type sample: numpy.ndarray
:param A_val: The matrix of shape (n_x, n_x) corresponding to the optimal p-norm ball
:type A_val: numpy.ndarray
:param b_val: The vector of shape (n_x, 1) corresponding to the optimal p-norm ball
:type b_val: numpy.ndarray
:param cont_axis: The axis for which the contours are to be solved for, defaults to 2
:type cont_axis: int, optional
:param n_x: The state dimension, defaults to 3
:type n_x: int, optional
:param p: The order of p-norm, defaults to 2
:type p: int, optional
:param grid_n: The side length of the cube of points to be used for computing contours, defaults to 200
:type grid_n: int, optional
:return: The meshgrid, corresponding computed contour, and the extremum values for the chosen axis
:rtype: tuple
"""
x_min, x_max = sample[:, 0].min(), sample[:, 0].max()
y_min, y_max = sample[:, 1].min(), sample[:, 1].max()
z_min, z_max = sample[:, 2].min(), sample[:, 2].max()
x = np.linspace(
x_min - 0.4 * (x_max - x_min), x_max + 0.4 * (x_max - x_min), grid_n
)
y = np.linspace(
y_min - 0.4 * (y_max - y_min), y_max + 0.4 * (y_max - y_min), grid_n
)
z = np.linspace(
x_min - 0.4 * (z_max - z_min), z_max + 0.4 * (z_max - z_min), grid_n
)
if cont_axis == 2:
d0, d1 = np.meshgrid(x, y)
c_min, c_max = z_min, z_max
elif cont_axis == 1:
d0, d1 = np.meshgrid(x, z)
c_min, c_max = y_min, y_max
elif cont_axis == 0:
d0, d1 = np.meshgrid(y, z)
c_min, c_max = x_min, x_max
d2 = np.array([d0.flatten(), d1.flatten()]).T
solve_cont_d2 = partial(
p_norm_cont_proj,
axis=cont_axis,
default_val=c_max + 1,
n_x=n_x,
A_val=A_val,
b_val=b_val,
p=p,
)
cont = np.fromiter(map(solve_cont_d2, d2), dtype=np.float64).reshape(grid_n, grid_n)
return d0, d1, cont, c_min, c_max
def p_compute_contour_3D(
sample, A_val, b_val, cont_axis=2, n_x=3, p=2, grid_n=200, stretch=0.4
):
"""Computes the 3D contour for 3 dimensions based on sample data and the A_val, and b_val corresponding to the optimal p-norm ball.
:param sample: Sample from dynamical system (num_samples, n_x)
:type sample: numpy.ndarray
:param A_val: The matrix of shape (n_x, n_x) corresponding to the optimal p-norm ball
:type A_val: numpy.ndarray
:param b_val: The vector of shape (n_x, 1) corresponding to the optimal p-norm ball
:type b_val: numpy.ndarray
:param cont_axis: The axis for which the contours are to be solved for, defaults to 2
:type cont_axis: int, optional
:param n_x: The state dimension, defaults to 3
:type n_x: int, optional
:param p: The order of p-norm, defaults to 2
:type p: int, optional
:param grid_n: The side length of the cube of points to be used for computing contours, defaults to 200
:type grid_n: int, optional
:param stretch: The factor by which to stretch the grid used to compute the contour, defaults to 0.4
:type stretch: float, optional
:param minimum: True if optimizing for the minimal value of the dependent variable that satisfies the p-norm conditions, defaults to True
:type minimum: bool, optional
:return: The meshgrid, corresponding computed contour, and the extremum values for the chosen axis
:rtype: tuple
"""
x_min, x_max = sample[:, 0].min(), sample[:, 0].max()
y_min, y_max = sample[:, 1].min(), sample[:, 1].max()
z_min, z_max = sample[:, 2].min(), sample[:, 2].max()
x = np.linspace(
x_min - stretch * (x_max - x_min), x_max + stretch * (x_max - x_min), grid_n
)
y = np.linspace(
y_min - stretch * (y_max - y_min), y_max + stretch * (y_max - y_min), grid_n
)
z = np.linspace(
x_min - stretch * (z_max - z_min), z_max + stretch * (z_max - z_min), grid_n
)
if cont_axis == 2:
d0, d1 = np.meshgrid(x, y)
c_min, c_max = z_min, z_max
elif cont_axis == 1:
d0, d1 = np.meshgrid(x, z)
c_min, c_max = y_min, y_max
elif cont_axis == 0:
d0, d1 = np.meshgrid(y, z)
c_min, c_max = x_min, x_max
d2 = np.array([d0.flatten(), d1.flatten()]).T
solve_cont_d2_min = partial(
p_norm_cont,
axis=cont_axis,
default_val=c_max + 1,
n_x=n_x,
A_val=A_val,
b_val=b_val,
p=p,
minimum=True,
)
solve_cont_d2_max = partial(
p_norm_cont,
axis=cont_axis,
default_val=c_min - 1,
n_x=n_x,
A_val=A_val,
b_val=b_val,
p=p,
minimum=False,
)
cont_min = np.fromiter(map(solve_cont_d2_min, d2), dtype=np.float64).reshape(
grid_n, grid_n
)
cont_max = np.fromiter(map(solve_cont_d2_max, d2), dtype=np.float64).reshape(
grid_n, grid_n
)
return d0, d1, cont_min, cont_max, c_min, c_max
def p_compute_vals(sample, A_val, b_val, p=2, grid_n=200):
"""Computes the values within a p-norm ball in 1 dimension
:param sample: The sample from a specific time step, an array of shape (num_samples,)
:type sample: numpy.ndarray
:param A_val: The matrix of shape (1, 1) corresponding to the optimal p-norm ball
:type A_val: numpy.ndarray
:param b_val: The vector of shape (1, 1) corresponding to the optimal p-norm ball
:type b_val: numpy.ndarray
:param p: The order of p-norm, defaults to 2
:type p: int, optional
:param grid_n: The number of points to test for the p-norm ball estimation at each a given time step, defaults to 200
:type grid_n: int, optional
:return: The values within the p-norm ball
:rtype: list
"""
# assuming sample is (num_samples,) shaped array
y_min, y_max = sample.min(), sample.max()
y = np.linspace(
y_min - 0.4 * (y_max - y_min), y_max + 0.4 * (y_max - y_min), grid_n
)
vals = []
for v in y:
try:
if np.linalg.norm(A_val @ np.array([[v]]) - b_val, ord=p) <= 1:
vals.append(v)
except ValueError:
pass
return vals
def p_get_dict(i, samples, solution_list, items, grid_n=50):
"""Generates a dictionary where keys are the passed in labels and values are the output of p_compute_contour_3D, the contour information for a p-norm ball reachable set estimate at a given time
:param i: The index
:type i: int
:param samples: Array of shape (num_samples, time, 3)
:type samples: numpy.ndarray
:param solution_list: List of solutions where each solution is a dictionary with keys ("A", "b", "status"), defaults to None
:type solution_list: list, optional
:param items: A list of specific indices corresponding to the times at which the p-norm ball reachable set estimate will be chosen
:type items: list
:param grid_n: The side length of the cube of points to be used for computing contours, defaults to 50
:type grid_n: int, optional
:return: A dictionary where keys are the passed in labels and values are the output of p_compute_contour_3D, the contour information for a p-norm ball reachable set estimate at a given time
:rtype: dict
"""
labels = ["xv", "yv", "z_cont", "z_cont2", "z_min", "z_max"]
index = items[i]
A_i, b_i = solution_list[index]["A"], solution_list[index]["b"]
return dict(
zip(labels, p_compute_contour_3D(samples[:, index, :], A_i, b_i, grid_n=grid_n))
)
def p_dict_list(
samples, solution_list, num_parts, num_indices=20, logspace=True, grid_n=50
):
"""Generates a list of dictionaries, each containing the contour information for a p-norm ball reachable set estimate at a given time
:param samples: Array of shape (num_samples, time, 3)
:type samples: numpy.ndarray
:param solution_list: List of solutions where each solution is a dictionary with keys ("A", "b", "status"), defaults to None
:type solution_list: list, optional
:param num_parts: The number of total timesteps for the samples
:type num_parts: int
:param num_indices: The number of indices corresponding to the number of reachable set estimates made at different times, defaults to 20
:type num_indices: int, optional
:param logspace: If True, a logarithmic scale is used for choosing times to compute reachable set estimates, defaults to True
:type logspace: bool, optional
:param grid_n: The side length of the cube of points to be used for computing contours, defaults to 50
:type grid_n: int, optional
:return: A list of dictionaries, each containing the contour information for a p-norm ball reachable set estimate at a given time
:rtype: list
"""
if logspace:
log_ceil = np.log10(num_parts)
items = list(
OrderedDict.fromkeys(
[int(i) - 1 for i in np.logspace(0, log_ceil, num_indices)]
)
)
else:
items = [int(i) for i in np.linspace(0, num_parts, num_indices)]
get_dict = partial(
p_get_dict,
samples=samples,
solution_list=solution_list,
items=items,
grid_n=grid_n,
)
p = Pool(cpu_count())
dict_list = [
d for d in tqdm(p.imap(get_dict, np.arange(len(items))), total=len(items))
]
return dict_list
def p_emp_estimate(samples, A_val, b_val, n_x=3, p=2):
"""Computes the ratio of samples within the estimated reachable set for the p-norm ball reachable set estimation
:param samples: Sample from dynamical system (num_samples, n_x)
:type samples: numpy.ndarray
:param A_val: The matrix of shape (n_x, n_x) corresponding to the optimal p-norm ball
:type A_val: numpy.ndarray
:param b_val: The vector of shape (n_x, 1) corresponding to the optimal p-norm ball
:type b_val: numpy.ndarray
:param n_x: The state dimension, defaults to 3
:type n_x: int, optional
:param p: The order of p-norm, defaults to 2
:type p: int, optional
:return: The ratio of samples within the estimated reachability set
:rtype: float
"""
num_samples = samples.shape[0]
count = 0
for sample in samples:
vec = sample.reshape(n_x, 1)
if np.linalg.norm(A_val @ vec - b_val, ord=p) <= 1:
count += 1
return count / num_samples
def p_get_reachable_2D(samples, A_val, b_val, p=2, grid_n=50):
"""Obtains the reachable set estimate for a set of samples at a give timestep. A utility function for plotting the reachable set estimate across all timesteps for two state variables.
:param samples: Samples of shape (num_samples, 2)
:type samples: numpy.ndarray
:param A_val: Matrix corresponding to the reachable set estimate at a given timestep for two state variables, a (2, 2) array
:type A_val: numpy.ndarray
:param b_val: Vector corresponding to the reachable set estimate at a given timestep for two state variables, a (2, 1) array
:type b_val: numpy.ndarray
:param p: The order of the p-norm ball, defaults to 2
:type p: int, optional
:param grid_n: The side length of the square of points to be used for checking for points inside the p-norm ball, defaults to 50
:type grid_n: int, optional
:return: The x and y values included in the p-norm ball
:rtype: tuple
"""
x_min, x_max = samples[:, 0].min(), samples[:, 0].max()
y_min, y_max = samples[:, 1].min(), samples[:, 1].max()
x = np.linspace(
x_min - 0.4 * (x_max - x_min), x_max + 0.4 * (x_max - x_min), grid_n
)
y = np.linspace(
y_min - 0.4 * (y_max - y_min), y_max + 0.4 * (y_max - y_min), grid_n
)
d0, d1 = np.meshgrid(x, y)
d2 = np.array([d0.flatten(), d1.flatten()]).T
xs, ys = [], []
for pair in d2:
if np.linalg.norm(A_val @ pair.reshape((2, 1)) - b_val, ord=p) <= 1:
xs.append(pair[0])
ys.append(pair[1])
return xs, ys
def p_get_reachable_3D(samples, A_val, b_val, p=2, grid_n=25):
"""Obtains the reachable set estimate for a set of samples at a give timestep. A utility function for plotting the reachable set estimate across all timesteps for three state variables.
:param samples: Samples of shape (num_samples, 3)
:type samples: numpy.ndarray
:param A_val: Matrix corresponding to the reachable set estimate at a given timestep for two state variables, a (3, 3) array
:type A_val: numpy.ndarray
:param b_val: Vector corresponding to the reachable set estimate at a given timestep for two state variables, a (3, 1) array
:type b_val: numpy.ndarray
:param p: The order of the p-norm ball, defaults to 2
:type p: int, optional
:param grid_n: The side length of the square of points to be used for checking for points inside the p-norm ball, defaults to 50
:type grid_n: int, optional
:return: The x and y values included in the p-norm ball
:rtype: tuple
"""
x_min, x_max = samples[:, 0].min(), samples[:, 0].max()
y_min, y_max = samples[:, 1].min(), samples[:, 1].max()
z_min, z_max = samples[:, 2].min(), samples[:, 2].max()
x = np.linspace(
x_min - 0.4 * (x_max - x_min), x_max + 0.4 * (x_max - x_min), grid_n
)
y = np.linspace(
y_min - 0.4 * (y_max - y_min), y_max + 0.4 * (y_max - y_min), grid_n
)
z = np.linspace(
z_min - 0.4 * (z_max - z_min), z_max + 0.4 * (z_max - z_min), grid_n
)
d0, d1, d2 = np.meshgrid(x, y, z)
d3 = np.array([d0.flatten(), d1.flatten(), d2.flatten()]).T
xs, ys, zs = [], [], []
for trio in d3:
if np.linalg.norm(A_val @ trio.reshape((3, 1)) - b_val, ord=p) <= 1:
xs.append(trio[0])
ys.append(trio[1])
zs.append(trio[2])
return xs, ys, zs
| [
"cvxpy.diag",
"numpy.log10",
"cvxpy.pnorm",
"multiprocessing.cpu_count",
"math.log",
"numpy.array",
"numpy.linalg.norm",
"cvxpy.Maximize",
"cvxpy.Minimize",
"numpy.linspace",
"scipy.optimize.minimize_scalar",
"numpy.meshgrid",
"numpy.logspace",
"numpy.identity",
"cvxpy.Problem",
"cvxpy... | [((2291, 2319), 'cvxpy.Problem', 'cp.Problem', (['obj', 'constraints'], {}), '(obj, constraints)\n', (2301, 2319), True, 'import cvxpy as cp\n'), ((3229, 3277), 'functools.partial', 'partial', (['solve_p_norm'], {'n_x': 'n_x', 'p': 'p', 'const': 'const'}), '(solve_p_norm, n_x=n_x, p=p, const=const)\n', (3236, 3277), False, 'from functools import partial\n'), ((4816, 4837), 'cvxpy.Variable', 'cp.Variable', (['(n_x, 1)'], {}), '((n_x, 1))\n', (4827, 4837), True, 'import cvxpy as cp\n'), ((5159, 5187), 'cvxpy.Problem', 'cp.Problem', (['obj', 'constraints'], {}), '(obj, constraints)\n', (5169, 5187), True, 'import cvxpy as cp\n'), ((6410, 6423), 'numpy.zeros', 'np.zeros', (['n_x'], {}), '(n_x)\n', (6418, 6423), True, 'import numpy as np\n'), ((6685, 6703), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['f'], {}), '(f)\n', (6700, 6703), False, 'from scipy.optimize import minimize_scalar\n'), ((8155, 8240), 'numpy.linspace', 'np.linspace', (['(x_min - 0.4 * (x_max - x_min))', '(x_max + 0.4 * (x_max - x_min))', 'grid_n'], {}), '(x_min - 0.4 * (x_max - x_min), x_max + 0.4 * (x_max - x_min),\n grid_n)\n', (8166, 8240), True, 'import numpy as np\n'), ((8259, 8344), 'numpy.linspace', 'np.linspace', (['(y_min - 0.4 * (y_max - y_min))', '(y_max + 0.4 * (y_max - y_min))', 'grid_n'], {}), '(y_min - 0.4 * (y_max - y_min), y_max + 0.4 * (y_max - y_min),\n grid_n)\n', (8270, 8344), True, 'import numpy as np\n'), ((8363, 8448), 'numpy.linspace', 'np.linspace', (['(x_min - 0.4 * (z_max - z_min))', '(z_max + 0.4 * (z_max - z_min))', 'grid_n'], {}), '(x_min - 0.4 * (z_max - z_min), z_max + 0.4 * (z_max - z_min),\n grid_n)\n', (8374, 8448), True, 'import numpy as np\n'), ((8818, 8926), 'functools.partial', 'partial', (['p_norm_cont_proj'], {'axis': 'cont_axis', 'default_val': '(c_max + 1)', 'n_x': 'n_x', 'A_val': 'A_val', 'b_val': 'b_val', 'p': 'p'}), '(p_norm_cont_proj, axis=cont_axis, default_val=c_max + 1, n_x=n_x,\n A_val=A_val, b_val=b_val, p=p)\n', (8825, 8926), False, 'from functools import partial\n'), ((10745, 10838), 'numpy.linspace', 'np.linspace', (['(x_min - stretch * (x_max - x_min))', '(x_max + stretch * (x_max - x_min))', 'grid_n'], {}), '(x_min - stretch * (x_max - x_min), x_max + stretch * (x_max -\n x_min), grid_n)\n', (10756, 10838), True, 'import numpy as np\n'), ((10857, 10950), 'numpy.linspace', 'np.linspace', (['(y_min - stretch * (y_max - y_min))', '(y_max + stretch * (y_max - y_min))', 'grid_n'], {}), '(y_min - stretch * (y_max - y_min), y_max + stretch * (y_max -\n y_min), grid_n)\n', (10868, 10950), True, 'import numpy as np\n'), ((10969, 11062), 'numpy.linspace', 'np.linspace', (['(x_min - stretch * (z_max - z_min))', '(z_max + stretch * (z_max - z_min))', 'grid_n'], {}), '(x_min - stretch * (z_max - z_min), z_max + stretch * (z_max -\n z_min), grid_n)\n', (10980, 11062), True, 'import numpy as np\n'), ((11436, 11554), 'functools.partial', 'partial', (['p_norm_cont'], {'axis': 'cont_axis', 'default_val': '(c_max + 1)', 'n_x': 'n_x', 'A_val': 'A_val', 'b_val': 'b_val', 'p': 'p', 'minimum': '(True)'}), '(p_norm_cont, axis=cont_axis, default_val=c_max + 1, n_x=n_x, A_val=\n A_val, b_val=b_val, p=p, minimum=True)\n', (11443, 11554), False, 'from functools import partial\n'), ((11646, 11765), 'functools.partial', 'partial', (['p_norm_cont'], {'axis': 'cont_axis', 'default_val': '(c_min - 1)', 'n_x': 'n_x', 'A_val': 'A_val', 'b_val': 'b_val', 'p': 'p', 'minimum': '(False)'}), '(p_norm_cont, axis=cont_axis, default_val=c_min - 1, n_x=n_x, A_val=\n A_val, b_val=b_val, p=p, minimum=False)\n', (11653, 11765), False, 'from functools import partial\n'), ((12998, 13083), 'numpy.linspace', 'np.linspace', (['(y_min - 0.4 * (y_max - y_min))', '(y_max + 0.4 * (y_max - y_min))', 'grid_n'], {}), '(y_min - 0.4 * (y_max - y_min), y_max + 0.4 * (y_max - y_min),\n grid_n)\n', (13009, 13083), True, 'import numpy as np\n'), ((16195, 16293), 'functools.partial', 'partial', (['p_get_dict'], {'samples': 'samples', 'solution_list': 'solution_list', 'items': 'items', 'grid_n': 'grid_n'}), '(p_get_dict, samples=samples, solution_list=solution_list, items=\n items, grid_n=grid_n)\n', (16202, 16293), False, 'from functools import partial\n'), ((18615, 18700), 'numpy.linspace', 'np.linspace', (['(x_min - 0.4 * (x_max - x_min))', '(x_max + 0.4 * (x_max - x_min))', 'grid_n'], {}), '(x_min - 0.4 * (x_max - x_min), x_max + 0.4 * (x_max - x_min),\n grid_n)\n', (18626, 18700), True, 'import numpy as np\n'), ((18719, 18804), 'numpy.linspace', 'np.linspace', (['(y_min - 0.4 * (y_max - y_min))', '(y_max + 0.4 * (y_max - y_min))', 'grid_n'], {}), '(y_min - 0.4 * (y_max - y_min), y_max + 0.4 * (y_max - y_min),\n grid_n)\n', (18730, 18804), True, 'import numpy as np\n'), ((18829, 18846), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (18840, 18846), True, 'import numpy as np\n'), ((20283, 20368), 'numpy.linspace', 'np.linspace', (['(x_min - 0.4 * (x_max - x_min))', '(x_max + 0.4 * (x_max - x_min))', 'grid_n'], {}), '(x_min - 0.4 * (x_max - x_min), x_max + 0.4 * (x_max - x_min),\n grid_n)\n', (20294, 20368), True, 'import numpy as np\n'), ((20387, 20472), 'numpy.linspace', 'np.linspace', (['(y_min - 0.4 * (y_max - y_min))', '(y_max + 0.4 * (y_max - y_min))', 'grid_n'], {}), '(y_min - 0.4 * (y_max - y_min), y_max + 0.4 * (y_max - y_min),\n grid_n)\n', (20398, 20472), True, 'import numpy as np\n'), ((20491, 20576), 'numpy.linspace', 'np.linspace', (['(z_min - 0.4 * (z_max - z_min))', '(z_max + 0.4 * (z_max - z_min))', 'grid_n'], {}), '(z_min - 0.4 * (z_max - z_min), z_max + 0.4 * (z_max - z_min),\n grid_n)\n', (20502, 20576), True, 'import numpy as np\n'), ((20609, 20629), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y', 'z'], {}), '(x, y, z)\n', (20620, 20629), True, 'import numpy as np\n'), ((1838, 1877), 'cvxpy.Variable', 'cp.Variable', (['(n_x, n_x)'], {'symmetric': '(True)'}), '((n_x, n_x), symmetric=True)\n', (1849, 1877), True, 'import cvxpy as cp\n'), ((1890, 1911), 'cvxpy.Variable', 'cp.Variable', (['(n_x, 1)'], {}), '((n_x, 1))\n', (1901, 1911), True, 'import cvxpy as cp\n'), ((3291, 3302), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (3300, 3302), False, 'from multiprocessing import Pool, cpu_count\n'), ((5078, 5100), 'cvxpy.Minimize', 'cp.Minimize', (['vec[axis]'], {}), '(vec[axis])\n', (5089, 5100), True, 'import cvxpy as cp\n'), ((5125, 5147), 'cvxpy.Maximize', 'cp.Maximize', (['vec[axis]'], {}), '(vec[axis])\n', (5136, 5147), True, 'import cvxpy as cp\n'), ((8500, 8517), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (8511, 8517), True, 'import numpy as np\n'), ((11114, 11131), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (11125, 11131), True, 'import numpy as np\n'), ((15920, 15939), 'numpy.log10', 'np.log10', (['num_parts'], {}), '(num_parts)\n', (15928, 15939), True, 'import numpy as np\n'), ((16350, 16361), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (16359, 16361), False, 'from multiprocessing import Pool, cpu_count\n'), ((1954, 1975), 'cvxpy.Variable', 'cp.Variable', (['(n_x, 1)'], {}), '((n_x, 1))\n', (1965, 1975), True, 'import cvxpy as cp\n'), ((1988, 1998), 'cvxpy.diag', 'cp.diag', (['a'], {}), '(a)\n', (1995, 1998), True, 'import cvxpy as cp\n'), ((2011, 2032), 'cvxpy.Variable', 'cp.Variable', (['(n_x, 1)'], {}), '((n_x, 1))\n', (2022, 2032), True, 'import cvxpy as cp\n'), ((2183, 2196), 'cvxpy.log_det', 'cp.log_det', (['A'], {}), '(A)\n', (2193, 2196), True, 'import cvxpy as cp\n'), ((5007, 5041), 'cvxpy.pnorm', 'cp.pnorm', (['(A_val @ vec - b_val)'], {'p': 'p'}), '(A_val @ vec - b_val, p=p)\n', (5015, 5041), True, 'import cvxpy as cp\n'), ((8596, 8613), 'numpy.meshgrid', 'np.meshgrid', (['x', 'z'], {}), '(x, z)\n', (8607, 8613), True, 'import numpy as np\n'), ((11210, 11227), 'numpy.meshgrid', 'np.meshgrid', (['x', 'z'], {}), '(x, z)\n', (11221, 11227), True, 'import numpy as np\n'), ((17386, 17428), 'numpy.linalg.norm', 'np.linalg.norm', (['(A_val @ vec - b_val)'], {'ord': 'p'}), '(A_val @ vec - b_val, ord=p)\n', (17400, 17428), True, 'import numpy as np\n'), ((2077, 2090), 'cvxpy.Variable', 'cp.Variable', ([], {}), '()\n', (2088, 2090), True, 'import cvxpy as cp\n'), ((2140, 2158), 'numpy.zeros', 'np.zeros', (['(n_x, 1)'], {}), '((n_x, 1))\n', (2148, 2158), True, 'import numpy as np\n'), ((8692, 8709), 'numpy.meshgrid', 'np.meshgrid', (['y', 'z'], {}), '(y, z)\n', (8703, 8709), True, 'import numpy as np\n'), ((11306, 11323), 'numpy.meshgrid', 'np.meshgrid', (['y', 'z'], {}), '(y, z)\n', (11317, 11323), True, 'import numpy as np\n'), ((16139, 16177), 'numpy.linspace', 'np.linspace', (['(0)', 'num_parts', 'num_indices'], {}), '(0, num_parts, num_indices)\n', (16150, 16177), True, 'import numpy as np\n'), ((1063, 1082), 'math.log', 'math.log', (['(1 / delta)'], {}), '(1 / delta)\n', (1071, 1082), False, 'import math\n'), ((2111, 2127), 'numpy.identity', 'np.identity', (['n_x'], {}), '(n_x)\n', (2122, 2127), True, 'import numpy as np\n'), ((16033, 16070), 'numpy.logspace', 'np.logspace', (['(0)', 'log_ceil', 'num_indices'], {}), '(0, log_ceil, num_indices)\n', (16044, 16070), True, 'import numpy as np\n'), ((13176, 13191), 'numpy.array', 'np.array', (['[[v]]'], {}), '([[v]])\n', (13184, 13191), True, 'import numpy as np\n')] |
from lmfit import Model, Parameter
from copy import deepcopy
import numpy as np
from __code.fitting_functions import kropff_high_lambda, kropff_low_lambda, kropff_bragg_peak_tof
from __code.bragg_edge_peak_fitting_gui_utility import GuiUtility
class KropffFittingJobHandler:
"""https://lmfit.github.io/lmfit-py/examples/example_Model_interface.html"""
def __init__(self, parent=None):
self.parent = parent
self.xaxis_to_fit = None
self.list_yaxis_to_fit = None
def prepare(self, kropff_tooldbox='high'):
"""
:param kropff_tooldbox: 'high', 'low', 'bragg_peak'
"""
if kropff_tooldbox == 'bragg_peak':
fitting_range = [self.parent.kropff_fitting_range['low'][0],
self.parent.kropff_fitting_range['high'][1]]
else:
fitting_range = self.parent.kropff_fitting_range[kropff_tooldbox]
xaxis = self.parent.fitting_input_dictionary['xaxis']['lambda'][0]
[left_xaxis_index, right_xaxis_index] = self.parent.bragg_edge_range
full_fitting_xaxis = xaxis[left_xaxis_index: right_xaxis_index]
# self.xaxis_to_fit = full_fitting_xaxis[fitting_range[0]: fitting_range[1] + 1] * 1e-6 # to convert in s
self.xaxis_to_fit = full_fitting_xaxis[fitting_range[0]: fitting_range[1] + 1]
list_yaxis_to_fit = []
for _key in self.parent.fitting_input_dictionary['rois'].keys():
_yaxis = self.parent.fitting_input_dictionary['rois'][_key]['profile']
full_fitting_yaxis = _yaxis[left_xaxis_index: right_xaxis_index]
list_yaxis_to_fit.append(full_fitting_yaxis[fitting_range[0]: fitting_range[1] + 1])
self.list_yaxis_to_fit = list_yaxis_to_fit
def run_kropff_high_lambda(self, update_table_ui=False):
gmodel = Model(kropff_high_lambda, missing='drop', independent_vars=['lda'])
lda = self.xaxis_to_fit
o_gui = GuiUtility(parent=self.parent)
a0_init = np.float(str(self.parent.kropff_high_lda_a0_init.text()))
b0_init = np.float(str(self.parent.kropff_high_lda_b0_init.text()))
for _index, yaxis in enumerate(self.list_yaxis_to_fit):
yaxis = -np.log(yaxis)
_result = gmodel.fit(yaxis, lda=lda, a0=a0_init, b0=b0_init)
a0 = _result.params['a0'].value
a0_error = _result.params['a0'].stderr
b0 = _result.params['b0'].value
b0_error = _result.params['b0'].stderr
yaxis_fitted = kropff_high_lambda(self.xaxis_to_fit, a0, b0)
result_dict = {'a0': a0,
'b0': b0,
'a0_error': a0_error,
'b0_error': b0_error,
'xaxis_to_fit': lda,
'yaxis_fitted': yaxis_fitted}
self.parent.fitting_input_dictionary['rois'][_index]['fitting']['kropff']['high'] = deepcopy(result_dict)
if update_table_ui:
o_gui.update_kropff_high_lambda_table_ui(row=_index,
a0=a0,
b0=b0,
a0_error=a0_error,
b0_error=b0_error)
def run_kropff_low_lambda(self, update_table_ui=False):
gmodel = Model(kropff_low_lambda, missing='drop', independent_vars=['lda'])
lda = self.xaxis_to_fit
o_gui = GuiUtility(parent=self.parent)
ahkl_init = np.float(str(self.parent.kropff_low_lda_ahkl_init.text()))
bhkl_init = np.float(str(self.parent.kropff_low_lda_bhkl_init.text()))
for _row, yaxis in enumerate(self.list_yaxis_to_fit):
_entry = self.parent.fitting_input_dictionary['rois'][_row]['fitting']['kropff']['high']
a0 = np.float(_entry['a0'])
b0 = np.float(_entry['b0'])
yaxis = -np.log(yaxis)
_result = gmodel.fit(yaxis, lda=lda,
a0=Parameter('a0', value=a0, vary=False),
b0=Parameter('b0', value=b0, vary=False),
ahkl=ahkl_init,
bhkl=bhkl_init)
ahkl = _result.params['ahkl'].value
ahkl_error = _result.params['ahkl'].stderr
bhkl = _result.params['bhkl'].value
bhkl_error = _result.params['bhkl'].stderr
yaxis_fitted = kropff_low_lambda(lda,
a0, b0, ahkl, bhkl)
result_dict = {'ahkl': ahkl,
'bhkl': bhkl,
'ahkl_error': ahkl_error,
'bhkl_error': bhkl_error,
'xaxis_to_fit': lda,
'yaxis_fitted': yaxis_fitted}
self.parent.fitting_input_dictionary['rois'][_row]['fitting']['kropff']['low'] = deepcopy(result_dict)
if update_table_ui:
o_gui.update_kropff_low_lambda_table_ui(row=_row,
ahkl=ahkl,
bhkl=bhkl,
ahkl_error=ahkl_error,
bhkl_error=bhkl_error)
def run_bragg_peak(self, update_table_ui=False, list_row_to_fit=None):
gmodel = Model(kropff_bragg_peak_tof, nan_policy='propagate', independent_vars=['lda'])
lda = self.xaxis_to_fit
o_gui = GuiUtility(parent=self.parent)
ldahkl_init = np.float(str(self.parent.ui.kropff_bragg_peak_ldahkl_init.text()))
tau_init = np.float(str(self.parent.kropff_bragg_peak_tau_init.text()))
sigma_init = np.float(self.parent.kropff_bragg_peak_sigma_comboBox.currentText())
for _row, yaxis in enumerate(self.list_yaxis_to_fit):
if not list_row_to_fit is None:
if _row not in list_row_to_fit:
continue
_entry_high = self.parent.fitting_input_dictionary['rois'][_row]['fitting']['kropff']['high']
a0 = np.float(_entry_high['a0'])
b0 = np.float(_entry_high['b0'])
_entry_low = self.parent.fitting_input_dictionary['rois'][_row]['fitting']['kropff']['low']
ahkl = np.float(_entry_low['ahkl'])
bhkl = np.float(_entry_low['bhkl'])
yaxis = -np.log(yaxis)
_result = gmodel.fit(yaxis,
lda=lda,
a0=Parameter('a0', value=a0, vary=False),
b0=Parameter('b0', value=b0, vary=False),
ahkl=Parameter('ahkl', value=ahkl, vary=False),
bhkl=Parameter('bhkl', value=bhkl, vary=False),
ldahkl=ldahkl_init,
sigma=sigma_init,
tau=tau_init)
ldahkl = _result.params['ldahkl'].value
ldahkl_error = _result.params['ldahkl'].stderr
sigma = _result.params['sigma'].value
sigma_error = _result.params['sigma'].stderr
tau = _result.params['tau'].value
tau_error = _result.params['tau'].stderr
yaxis_fitted = kropff_bragg_peak_tof(self.xaxis_to_fit,
a0, b0, ahkl, bhkl,
ldahkl, sigma, tau)
result_dict = {'ldahkl': ldahkl,
'ldahkl_error': ldahkl_error,
'sigma': sigma,
'sigma_error': sigma_error,
'tau': tau,
'tau_error': tau_error,
'xaxis_to_fit': lda,
'yaxis_fitted': yaxis_fitted}
self.parent.fitting_input_dictionary['rois'][_row]['fitting']['kropff']['bragg_peak'] = deepcopy(
result_dict)
if update_table_ui:
o_gui.update_kropff_bragg_edge_table_ui(row=_row,
ldahkl=ldahkl,
ldahkl_error=ldahkl_error,
tau=tau,
tau_error=tau_error,
sigma=sigma,
sigma_error=sigma_error)
| [
"lmfit.Parameter",
"lmfit.Model",
"numpy.float",
"__code.fitting_functions.kropff_high_lambda",
"__code.bragg_edge_peak_fitting_gui_utility.GuiUtility",
"__code.fitting_functions.kropff_bragg_peak_tof",
"numpy.log",
"copy.deepcopy",
"__code.fitting_functions.kropff_low_lambda"
] | [((1664, 1731), 'lmfit.Model', 'Model', (['kropff_high_lambda'], {'missing': '"""drop"""', 'independent_vars': "['lda']"}), "(kropff_high_lambda, missing='drop', independent_vars=['lda'])\n", (1669, 1731), False, 'from lmfit import Model, Parameter\n'), ((1769, 1799), '__code.bragg_edge_peak_fitting_gui_utility.GuiUtility', 'GuiUtility', ([], {'parent': 'self.parent'}), '(parent=self.parent)\n', (1779, 1799), False, 'from __code.bragg_edge_peak_fitting_gui_utility import GuiUtility\n'), ((3025, 3091), 'lmfit.Model', 'Model', (['kropff_low_lambda'], {'missing': '"""drop"""', 'independent_vars': "['lda']"}), "(kropff_low_lambda, missing='drop', independent_vars=['lda'])\n", (3030, 3091), False, 'from lmfit import Model, Parameter\n'), ((3129, 3159), '__code.bragg_edge_peak_fitting_gui_utility.GuiUtility', 'GuiUtility', ([], {'parent': 'self.parent'}), '(parent=self.parent)\n', (3139, 3159), False, 'from __code.bragg_edge_peak_fitting_gui_utility import GuiUtility\n'), ((4820, 4898), 'lmfit.Model', 'Model', (['kropff_bragg_peak_tof'], {'nan_policy': '"""propagate"""', 'independent_vars': "['lda']"}), "(kropff_bragg_peak_tof, nan_policy='propagate', independent_vars=['lda'])\n", (4825, 4898), False, 'from lmfit import Model, Parameter\n'), ((4936, 4966), '__code.bragg_edge_peak_fitting_gui_utility.GuiUtility', 'GuiUtility', ([], {'parent': 'self.parent'}), '(parent=self.parent)\n', (4946, 4966), False, 'from __code.bragg_edge_peak_fitting_gui_utility import GuiUtility\n'), ((2263, 2308), '__code.fitting_functions.kropff_high_lambda', 'kropff_high_lambda', (['self.xaxis_to_fit', 'a0', 'b0'], {}), '(self.xaxis_to_fit, a0, b0)\n', (2281, 2308), False, 'from __code.fitting_functions import kropff_high_lambda, kropff_low_lambda, kropff_bragg_peak_tof\n'), ((2621, 2642), 'copy.deepcopy', 'deepcopy', (['result_dict'], {}), '(result_dict)\n', (2629, 2642), False, 'from copy import deepcopy\n'), ((3465, 3487), 'numpy.float', 'np.float', (["_entry['a0']"], {}), "(_entry['a0'])\n", (3473, 3487), True, 'import numpy as np\n'), ((3496, 3518), 'numpy.float', 'np.float', (["_entry['b0']"], {}), "(_entry['b0'])\n", (3504, 3518), True, 'import numpy as np\n'), ((3988, 4030), '__code.fitting_functions.kropff_low_lambda', 'kropff_low_lambda', (['lda', 'a0', 'b0', 'ahkl', 'bhkl'], {}), '(lda, a0, b0, ahkl, bhkl)\n', (4005, 4030), False, 'from __code.fitting_functions import kropff_high_lambda, kropff_low_lambda, kropff_bragg_peak_tof\n'), ((4392, 4413), 'copy.deepcopy', 'deepcopy', (['result_dict'], {}), '(result_dict)\n', (4400, 4413), False, 'from copy import deepcopy\n'), ((5458, 5485), 'numpy.float', 'np.float', (["_entry_high['a0']"], {}), "(_entry_high['a0'])\n", (5466, 5485), True, 'import numpy as np\n'), ((5494, 5521), 'numpy.float', 'np.float', (["_entry_high['b0']"], {}), "(_entry_high['b0'])\n", (5502, 5521), True, 'import numpy as np\n'), ((5628, 5656), 'numpy.float', 'np.float', (["_entry_low['ahkl']"], {}), "(_entry_low['ahkl'])\n", (5636, 5656), True, 'import numpy as np\n'), ((5667, 5695), 'numpy.float', 'np.float', (["_entry_low['bhkl']"], {}), "(_entry_low['bhkl'])\n", (5675, 5695), True, 'import numpy as np\n'), ((6471, 6556), '__code.fitting_functions.kropff_bragg_peak_tof', 'kropff_bragg_peak_tof', (['self.xaxis_to_fit', 'a0', 'b0', 'ahkl', 'bhkl', 'ldahkl', 'sigma', 'tau'], {}), '(self.xaxis_to_fit, a0, b0, ahkl, bhkl, ldahkl, sigma, tau\n )\n', (6492, 6556), False, 'from __code.fitting_functions import kropff_high_lambda, kropff_low_lambda, kropff_bragg_peak_tof\n'), ((7048, 7069), 'copy.deepcopy', 'deepcopy', (['result_dict'], {}), '(result_dict)\n', (7056, 7069), False, 'from copy import deepcopy\n'), ((2012, 2025), 'numpy.log', 'np.log', (['yaxis'], {}), '(yaxis)\n', (2018, 2025), True, 'import numpy as np\n'), ((3532, 3545), 'numpy.log', 'np.log', (['yaxis'], {}), '(yaxis)\n', (3538, 3545), True, 'import numpy as np\n'), ((5709, 5722), 'numpy.log', 'np.log', (['yaxis'], {}), '(yaxis)\n', (5715, 5722), True, 'import numpy as np\n'), ((3613, 3650), 'lmfit.Parameter', 'Parameter', (['"""a0"""'], {'value': 'a0', 'vary': '(False)'}), "('a0', value=a0, vary=False)\n", (3622, 3650), False, 'from lmfit import Model, Parameter\n'), ((3679, 3716), 'lmfit.Parameter', 'Parameter', (['"""b0"""'], {'value': 'b0', 'vary': '(False)'}), "('b0', value=b0, vary=False)\n", (3688, 3716), False, 'from lmfit import Model, Parameter\n'), ((5815, 5852), 'lmfit.Parameter', 'Parameter', (['"""a0"""'], {'value': 'a0', 'vary': '(False)'}), "('a0', value=a0, vary=False)\n", (5824, 5852), False, 'from lmfit import Model, Parameter\n'), ((5881, 5918), 'lmfit.Parameter', 'Parameter', (['"""b0"""'], {'value': 'b0', 'vary': '(False)'}), "('b0', value=b0, vary=False)\n", (5890, 5918), False, 'from lmfit import Model, Parameter\n'), ((5949, 5990), 'lmfit.Parameter', 'Parameter', (['"""ahkl"""'], {'value': 'ahkl', 'vary': '(False)'}), "('ahkl', value=ahkl, vary=False)\n", (5958, 5990), False, 'from lmfit import Model, Parameter\n'), ((6021, 6062), 'lmfit.Parameter', 'Parameter', (['"""bhkl"""'], {'value': 'bhkl', 'vary': '(False)'}), "('bhkl', value=bhkl, vary=False)\n", (6030, 6062), False, 'from lmfit import Model, Parameter\n')] |
import numpy as np
from gensim.models import KeyedVectors
from tqdm import tqdm
def load_word2vec(path):
"""
Loads a Word2Vec model using gensim.
Parameters
==========
path : str
Local path to Word2Vec model binary file.
Returns
=======
word2vec_model : gensim model object
Word2Vec model.
"""
word2vec_model = KeyedVectors.load_word2vec_format(path, binary=True)
return word2vec_model
def compute_embeddings(papers, col='tokens_no_stopwords', word2vec_model_path='/data/w2v/PubMed-and-PMC-w2v.bin'):
"""
Computes Word2Vec embeddings for a provided column of a dataframe with pre-trained model.
If word does not exist in the model's vcoabulary,
Parameters
==========
papers : pd.DataFrame
DataFrame where each row represents a paper.
col : str
Column of papers on which to compute embeddings.
Typically the concatenated title and abstract.
word2vec_model_path : str
Local path to word2vec model binary file.
Returns
=======
all_papers_text_embeddings : list
"""
### Load model ###
word2vec_model = load_word2vec(word2vec_model_path)
print("w2v model loaded")
### Compute embeddings for each row in papers ###
papers_text_list = papers[col].tolist()
all_papers_text_embeddings = []
### Loop through all papers ###
for paper_text in tqdm(papers_text_list, desc="papers"):
one_paper_text_embeddings = []
### Split up paper text into tokens ###
for token in paper_text.split():
### If token exists in the model's vocabulary, get the embedding ###
if token in word2vec_model.vocab:
one_paper_text_embeddings.append(word2vec_model[token])
one_paper_text_embeddings = np.average(np.array(one_paper_text_embeddings), axis=0)
all_papers_text_embeddings.append(one_paper_text_embeddings)
return all_papers_text_embeddings
def average_embeddings(papers):
"""
Take the average embedding over title and abstract column
"""
av_embeddings = []
for embeddings in list(papers["all_embeddings"]):
av_emb = np.average(np.array(embeddings), axis=0)
av_embeddings.append(av_emb)
return av_embeddings
| [
"gensim.models.KeyedVectors.load_word2vec_format",
"numpy.array",
"tqdm.tqdm"
] | [((369, 421), 'gensim.models.KeyedVectors.load_word2vec_format', 'KeyedVectors.load_word2vec_format', (['path'], {'binary': '(True)'}), '(path, binary=True)\n', (402, 421), False, 'from gensim.models import KeyedVectors\n'), ((1417, 1454), 'tqdm.tqdm', 'tqdm', (['papers_text_list'], {'desc': '"""papers"""'}), "(papers_text_list, desc='papers')\n", (1421, 1454), False, 'from tqdm import tqdm\n'), ((1834, 1869), 'numpy.array', 'np.array', (['one_paper_text_embeddings'], {}), '(one_paper_text_embeddings)\n', (1842, 1869), True, 'import numpy as np\n'), ((2217, 2237), 'numpy.array', 'np.array', (['embeddings'], {}), '(embeddings)\n', (2225, 2237), True, 'import numpy as np\n')] |
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
import config as cfg
import soundfile as sf
import glob
import pandas as pd
import random
from sklearn.model_selection import train_test_split
def create_dataloader(mode,type=0, snr=0):
if mode == 'train':
return DataLoader(
dataset=Wave_Dataset(mode, type, snr),
batch_size=cfg.batch,
shuffle=True,
num_workers=0,
pin_memory=True,
drop_last=True,
sampler=None
)
elif mode == 'valid':
return DataLoader(
dataset=Wave_Dataset(mode, type, snr),
batch_size=cfg.batch, shuffle=False, num_workers=0
)
elif mode == 'test':
return DataLoader(
dataset=Wave_Dataset(mode, type, snr),
batch_size=cfg.batch, shuffle=False, num_workers=0
)
# Dataloader for aicup competetion
model = 1
class Wave_Dataset(Dataset):
def __init__(self, mode, type, snr):
# Load Data
# train_point.pkl has less training data (select segment > 32000 (2s))
# best score: 1224.4674(30 epochs for 216_backup)
# Train_point2.pkl has more training data (select segment > 16000 (1s))
#df_all = pd.read_pickle("./dataset/train_point2.pkl")
df_all = pd.read_pickle("./dataset/train_point2.pkl")
df_test = pd.read_pickle("./dataset/dataset_test.pkl")
if mode == 'train':
# Load training data
# df[data]: training file path
# df[label]: label file path
# df[split]: split point (for sounfile longer than 5 sec)
self.mode = 'train'
print('<Training dataset>')
print('Load the data...')
df = df_all
self.df = df[['data', 'label', 'split']].reset_index(drop = True)
if mode == 'valid':
self.mode = 'valid'
# Split 0.02 data for validation,
# use random state to control randomness
train, valid = train_test_split(df_all, test_size = 0.02, random_state=42)
self.df = valid[['data', 'label', 'split']].reset_index(drop = True)
#self.df.to_pickle("./valid.pkl")
elif mode == 'test':
# Load testing data
self.mode = 'test'
print('<Test dataset>')
print('Load the data...')
self.df = df_test[['file', 'data']].reset_index(drop = True)
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
max_l = 80000 # 5sec * 16000 (sample rate)
d = self.df.iloc[idx]
if self.mode == 'test':
fn = d['file']
test_path = d['data']
test_path = 'dataset/'+ test_path
inputs, samplerate = sf.read(test_path)
ol = len(inputs) # original test soundfile length
l1 = 0
l2 = 0
l3 = 0
input1 = np.zeros(max_l) #(8000,)
input2 = np.zeros(max_l)# (8000,)
input3 = np.zeros(max_l) #(8000,)
# inputs over 5 min
if ol > max_l:
l1 = max_l
l2 = len(inputs[max_l:])
input1 = inputs[:max_l]
if l2 > max_l:
l2 = max_l
l3 = len(inputs[2*max_l:])
input2 = inputs[max_l:2*max_l]
input3[:l3] = inputs[2*max_l:]
else:
input2[:l2] = inputs[max_l:]
# test file less than or equal 5 min
elif ol <= max_l:
l1 = ol
input1[:ol] = inputs[:ol]
input1 = torch.from_numpy(input1)
input2 = torch.from_numpy(input2)
input3 = torch.from_numpy(input3)
# asser length of model input
try:
assert len(input1) == 80000 and len(input2) == 80000 and len(input3) == 80000
except:
print(len(input1), len(input2))
return fn, input1, input2, input3, ol, l1, l2, l3
# for training&validation data
elif self.mode == 'train' or self.mode == 'valid':
# read soundfiles (.flac)
inputs_path = d['data']
inputs_path = 'dataset/' + inputs_path
# read label sounfiles
targets_path = d['label']
targets_path = 'dataset/'+ targets_path
# retrieve split point for file segmentation
beg = d['split']
inputs, samplerate = sf.read(inputs_path)
inputs = inputs[beg:]
inputs = list(inputs)
# noise file length
noise_l = len(inputs)
targets, samplerate = sf.read(targets_path)
targets = targets[beg:]
targets = list(targets)
# clean file length
clean_l = len(targets)
# check length match, two size should be equal
try:
assert clean_l == noise_l
except:
print(d)
if noise_l < max_l:
#padd with 0
pad = [0] * (max_l - noise_l)
inputs.extend(pad)
targets.extend(pad)
elif noise_l > max_l:
inputs = inputs[:max_l]
targets = targets[:max_l]
inputs = np.array(inputs)
targets = np.array(targets)
# assert model input length == max_len
try:
assert len(inputs) == max_l
except:
print(d)
try:
assert len(targets) == max_l
except:
print(d)
# transform to torch from numpy
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
return inputs, targets
| [
"pandas.read_pickle",
"sklearn.model_selection.train_test_split",
"torch.from_numpy",
"numpy.array",
"numpy.zeros",
"soundfile.read"
] | [((1338, 1382), 'pandas.read_pickle', 'pd.read_pickle', (['"""./dataset/train_point2.pkl"""'], {}), "('./dataset/train_point2.pkl')\n", (1352, 1382), True, 'import pandas as pd\n'), ((1401, 1445), 'pandas.read_pickle', 'pd.read_pickle', (['"""./dataset/dataset_test.pkl"""'], {}), "('./dataset/dataset_test.pkl')\n", (1415, 1445), True, 'import pandas as pd\n'), ((2059, 2116), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df_all'], {'test_size': '(0.02)', 'random_state': '(42)'}), '(df_all, test_size=0.02, random_state=42)\n', (2075, 2116), False, 'from sklearn.model_selection import train_test_split\n'), ((2849, 2867), 'soundfile.read', 'sf.read', (['test_path'], {}), '(test_path)\n', (2856, 2867), True, 'import soundfile as sf\n'), ((3008, 3023), 'numpy.zeros', 'np.zeros', (['max_l'], {}), '(max_l)\n', (3016, 3023), True, 'import numpy as np\n'), ((3054, 3069), 'numpy.zeros', 'np.zeros', (['max_l'], {}), '(max_l)\n', (3062, 3069), True, 'import numpy as np\n'), ((3100, 3115), 'numpy.zeros', 'np.zeros', (['max_l'], {}), '(max_l)\n', (3108, 3115), True, 'import numpy as np\n'), ((3742, 3766), 'torch.from_numpy', 'torch.from_numpy', (['input1'], {}), '(input1)\n', (3758, 3766), False, 'import torch\n'), ((3788, 3812), 'torch.from_numpy', 'torch.from_numpy', (['input2'], {}), '(input2)\n', (3804, 3812), False, 'import torch\n'), ((3834, 3858), 'torch.from_numpy', 'torch.from_numpy', (['input3'], {}), '(input3)\n', (3850, 3858), False, 'import torch\n'), ((4611, 4631), 'soundfile.read', 'sf.read', (['inputs_path'], {}), '(inputs_path)\n', (4618, 4631), True, 'import soundfile as sf\n'), ((4800, 4821), 'soundfile.read', 'sf.read', (['targets_path'], {}), '(targets_path)\n', (4807, 4821), True, 'import soundfile as sf\n'), ((5442, 5458), 'numpy.array', 'np.array', (['inputs'], {}), '(inputs)\n', (5450, 5458), True, 'import numpy as np\n'), ((5481, 5498), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (5489, 5498), True, 'import numpy as np\n'), ((5828, 5852), 'torch.from_numpy', 'torch.from_numpy', (['inputs'], {}), '(inputs)\n', (5844, 5852), False, 'import torch\n'), ((5875, 5900), 'torch.from_numpy', 'torch.from_numpy', (['targets'], {}), '(targets)\n', (5891, 5900), False, 'import torch\n')] |
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
import robolib.robogui.pixel_editor as pe
import cv2
import robolib.images.feature_extraction as extr
DEBUG = True
label_labels = ["O", "X"]
labels = np.random.randint(0, 2, size=(1000, 1))
size = 9
data = np.zeros(shape=(1000, size, size, 1))
for la, d in zip(labels, data):
img = np.empty((size, size))
img.fill(-1)
if la == 0:
cv2.ellipse(img, (4, 4), (np.random.randint(2, 5), np.random.randint(2, 5)), 0, 360, 0, 1)
else:
randPointStart = np.random.randint(0, 16)
randPointEnd = np.random.randint(0, 16)
cv2.line(img, (int(randPointStart / 4), randPointStart % 4), (8 - int(randPointEnd / 4), 8 - randPointEnd % 4), 1)
randPointStart = np.random.randint(0, 16)
randPointEnd = np.random.randint(0, 16)
cv2.line(img, (8 - int(randPointStart / 4), randPointStart % 4), (int(randPointEnd / 4), 8 - randPointEnd % 4), 1)
img = extr.resize_image_to_info(img, size, size)
d[:, :, :] = np.reshape(img, (size, size, 1))
if DEBUG:
if pe.show_image(img):
DEBUG = False
model = Sequential()
model.add(Conv2D(9, (3, 3), activation='relu', input_shape=(size, size, 1)))
model.add(Conv2D(9, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), (2, 2)))
# model.add(Conv2D(3, (3, 3), activation='relu'))
# model.add(MaxPooling2D((2, 2), (2, 2)))
model.add(Flatten())
model.add(Dense(9, activation='relu'))
model.add(Dense(2, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
tbCallback = keras.callbacks.TensorBoard(log_dir="./logs", write_images=True)
one_hot_labels = keras.utils.to_categorical(labels, num_classes=2)
model.fit(data, one_hot_labels, epochs=250, batch_size=80, callbacks=[tbCallback])
while True:
predict_data = pe.get_drawing_input(size, size, size*3, size*3)
if all(1.0 not in row for row in predict_data):
break
# if DEBUG:
pe.show_image(predict_data)
output = model.predict(np.array([predict_data]), 1, 3)
if all(all(n < 0.9 for n in m) for m in output):
print("Don't know, will guess: ")
print(label_labels[np.argmax(output)])
if DEBUG:
print(np.around(output, 5))
| [
"keras.layers.Conv2D",
"numpy.reshape",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"robolib.robogui.pixel_editor.show_image",
"robolib.robogui.pixel_editor.get_drawing_input",
"numpy.argmax",
"keras.utils.to_categorical",
"keras.callbacks.TensorBoard",
"keras.models.Sequential",
"numpy... | [((354, 393), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': '(1000, 1)'}), '(0, 2, size=(1000, 1))\n', (371, 393), True, 'import numpy as np\n'), ((410, 447), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1000, size, size, 1)'}), '(shape=(1000, size, size, 1))\n', (418, 447), True, 'import numpy as np\n'), ((1285, 1297), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1295, 1297), False, 'from keras.models import Sequential\n'), ((1828, 1892), 'keras.callbacks.TensorBoard', 'keras.callbacks.TensorBoard', ([], {'log_dir': '"""./logs"""', 'write_images': '(True)'}), "(log_dir='./logs', write_images=True)\n", (1855, 1892), False, 'import keras\n'), ((1911, 1960), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['labels'], {'num_classes': '(2)'}), '(labels, num_classes=2)\n', (1937, 1960), False, 'import keras\n'), ((491, 513), 'numpy.empty', 'np.empty', (['(size, size)'], {}), '((size, size))\n', (499, 513), True, 'import numpy as np\n'), ((1110, 1152), 'robolib.images.feature_extraction.resize_image_to_info', 'extr.resize_image_to_info', (['img', 'size', 'size'], {}), '(img, size, size)\n', (1135, 1152), True, 'import robolib.images.feature_extraction as extr\n'), ((1171, 1203), 'numpy.reshape', 'np.reshape', (['img', '(size, size, 1)'], {}), '(img, (size, size, 1))\n', (1181, 1203), True, 'import numpy as np\n'), ((1308, 1373), 'keras.layers.Conv2D', 'Conv2D', (['(9)', '(3, 3)'], {'activation': '"""relu"""', 'input_shape': '(size, size, 1)'}), "(9, (3, 3), activation='relu', input_shape=(size, size, 1))\n", (1314, 1373), False, 'from keras.layers import Conv2D\n'), ((1385, 1421), 'keras.layers.Conv2D', 'Conv2D', (['(9)', '(3, 3)'], {'activation': '"""relu"""'}), "(9, (3, 3), activation='relu')\n", (1391, 1421), False, 'from keras.layers import Conv2D\n'), ((1433, 1461), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)', '(2, 2)'], {}), '((2, 2), (2, 2))\n', (1445, 1461), False, 'from keras.layers import MaxPooling2D\n'), ((1565, 1574), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1572, 1574), False, 'from keras.layers import Flatten\n'), ((1586, 1613), 'keras.layers.Dense', 'Dense', (['(9)'], {'activation': '"""relu"""'}), "(9, activation='relu')\n", (1591, 1613), False, 'from keras.layers import Dense\n'), ((1625, 1652), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""relu"""'}), "(2, activation='relu')\n", (1630, 1652), False, 'from keras.layers import Dense\n'), ((1664, 1694), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (1669, 1694), False, 'from keras.layers import Dense\n'), ((2076, 2128), 'robolib.robogui.pixel_editor.get_drawing_input', 'pe.get_drawing_input', (['size', 'size', '(size * 3)', '(size * 3)'], {}), '(size, size, size * 3, size * 3)\n', (2096, 2128), True, 'import robolib.robogui.pixel_editor as pe\n'), ((2216, 2243), 'robolib.robogui.pixel_editor.show_image', 'pe.show_image', (['predict_data'], {}), '(predict_data)\n', (2229, 2243), True, 'import robolib.robogui.pixel_editor as pe\n'), ((682, 706), 'numpy.random.randint', 'np.random.randint', (['(0)', '(16)'], {}), '(0, 16)\n', (699, 706), True, 'import numpy as np\n'), ((730, 754), 'numpy.random.randint', 'np.random.randint', (['(0)', '(16)'], {}), '(0, 16)\n', (747, 754), True, 'import numpy as np\n'), ((903, 927), 'numpy.random.randint', 'np.random.randint', (['(0)', '(16)'], {}), '(0, 16)\n', (920, 927), True, 'import numpy as np\n'), ((951, 975), 'numpy.random.randint', 'np.random.randint', (['(0)', '(16)'], {}), '(0, 16)\n', (968, 975), True, 'import numpy as np\n'), ((1230, 1248), 'robolib.robogui.pixel_editor.show_image', 'pe.show_image', (['img'], {}), '(img)\n', (1243, 1248), True, 'import robolib.robogui.pixel_editor as pe\n'), ((2272, 2296), 'numpy.array', 'np.array', (['[predict_data]'], {}), '([predict_data])\n', (2280, 2296), True, 'import numpy as np\n'), ((2422, 2439), 'numpy.argmax', 'np.argmax', (['output'], {}), '(output)\n', (2431, 2439), True, 'import numpy as np\n'), ((2470, 2490), 'numpy.around', 'np.around', (['output', '(5)'], {}), '(output, 5)\n', (2479, 2490), True, 'import numpy as np\n'), ((582, 605), 'numpy.random.randint', 'np.random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (599, 605), True, 'import numpy as np\n'), ((607, 630), 'numpy.random.randint', 'np.random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (624, 630), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import pcs_aero.args as args
import pcs_aero.models as models
def make_UV(model):
U = (model.m[3] / model.m[0]).T[::-1]
V = (model.m[5] / model.m[0]).T
Y, X = np.mgrid[0:model.N, 0:model.N]
return X, Y, U, V
if __name__ == "__main__":
parser = args.ModelArgParser(description="Create a streamplot of some "
"simulation.")
parser.add_argument('file_name', type=str, help='File name to save as.')
args, model = parser.parse_args()
name = args.file_name
X, Y, U, V = make_UV(model)
o = model.obstacle_mask[:, ::-1].T
omask = np.ma.masked_where(~o, np.ones(o.shape))
U = np.ma.array(U, mask=o)
V = np.ma.array(V, mask=o)
speed = model.velocity.T[::-1]
plt.xlabel('x')
plt.ylabel('y')
plt.xticks([])
plt.yticks([])
plt.title('Stream velocities for {shape} with $\\theta={theta:3}$'.format(
shape=model.obstacle['name'],
theta=model.obstacle['theta']))
strm = plt.streamplot(
X, Y, U, V, density=2, linewidth=1, color=speed, cmap='YlOrRd')
cbar = plt.colorbar(strm.lines)
plt.imshow(omask, cmap='binary', alpha=1, vmin=0, vmax=1)
plt.savefig(name)
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.savefig",
"numpy.ones",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.ma.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.streamplot",
"matplotlib.pyplot.yticks",
"pcs_aero.args.ModelArgParser"
] | [((344, 418), 'pcs_aero.args.ModelArgParser', 'args.ModelArgParser', ([], {'description': '"""Create a streamplot of some simulation."""'}), "(description='Create a streamplot of some simulation.')\n", (363, 418), True, 'import pcs_aero.args as args\n'), ((733, 755), 'numpy.ma.array', 'np.ma.array', (['U'], {'mask': 'o'}), '(U, mask=o)\n', (744, 755), True, 'import numpy as np\n'), ((764, 786), 'numpy.ma.array', 'np.ma.array', (['V'], {'mask': 'o'}), '(V, mask=o)\n', (775, 786), True, 'import numpy as np\n'), ((828, 843), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (838, 843), True, 'import matplotlib.pyplot as plt\n'), ((848, 863), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (858, 863), True, 'import matplotlib.pyplot as plt\n'), ((868, 882), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (878, 882), True, 'import matplotlib.pyplot as plt\n'), ((887, 901), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (897, 901), True, 'import matplotlib.pyplot as plt\n'), ((1070, 1148), 'matplotlib.pyplot.streamplot', 'plt.streamplot', (['X', 'Y', 'U', 'V'], {'density': '(2)', 'linewidth': '(1)', 'color': 'speed', 'cmap': '"""YlOrRd"""'}), "(X, Y, U, V, density=2, linewidth=1, color=speed, cmap='YlOrRd')\n", (1084, 1148), True, 'import matplotlib.pyplot as plt\n'), ((1169, 1193), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['strm.lines'], {}), '(strm.lines)\n', (1181, 1193), True, 'import matplotlib.pyplot as plt\n'), ((1199, 1256), 'matplotlib.pyplot.imshow', 'plt.imshow', (['omask'], {'cmap': '"""binary"""', 'alpha': '(1)', 'vmin': '(0)', 'vmax': '(1)'}), "(omask, cmap='binary', alpha=1, vmin=0, vmax=1)\n", (1209, 1256), True, 'import matplotlib.pyplot as plt\n'), ((1262, 1279), 'matplotlib.pyplot.savefig', 'plt.savefig', (['name'], {}), '(name)\n', (1273, 1279), True, 'import matplotlib.pyplot as plt\n'), ((706, 722), 'numpy.ones', 'np.ones', (['o.shape'], {}), '(o.shape)\n', (713, 722), True, 'import numpy as np\n')] |
from Strategy.gridSearch import GridSearch
import numpy as np
def plot_gridSearch_results(manager, ticker):
ps = [round(i, 3) for i in np.arange(0.001, 0.033, 0.004)] # 8 options
sharePers = [round(j, 2) for j in np.arange(0.01, 1.0, 0.05)] # 12 options
grid = GridSearch(manager, ticker, ps, sharePers)
returns = grid.get_grid_search_data('return')
returns = returns.pivot(index='p', columns='sharePer', values='return')
grid.plot_heatmap(returns, 'Return', 'RdYlGn', vmin=-15, vmax=30)
mdds = grid.get_grid_search_data('MDD')
mdds = mdds.pivot(index='p', columns='sharePer', values='MDD')
grid.plot_heatmap(mdds, 'Drawdown', "Purples", vmin=min(mdds), vmax=max(mdds))
| [
"Strategy.gridSearch.GridSearch",
"numpy.arange"
] | [((277, 319), 'Strategy.gridSearch.GridSearch', 'GridSearch', (['manager', 'ticker', 'ps', 'sharePers'], {}), '(manager, ticker, ps, sharePers)\n', (287, 319), False, 'from Strategy.gridSearch import GridSearch\n'), ((141, 171), 'numpy.arange', 'np.arange', (['(0.001)', '(0.033)', '(0.004)'], {}), '(0.001, 0.033, 0.004)\n', (150, 171), True, 'import numpy as np\n'), ((224, 250), 'numpy.arange', 'np.arange', (['(0.01)', '(1.0)', '(0.05)'], {}), '(0.01, 1.0, 0.05)\n', (233, 250), True, 'import numpy as np\n')] |
import numpy as np
from scipy import io
from behavioral_syntax.utilities.loading_data import loading_data
#directory = '/Users/cyrilrocke/Documents/c_elegans/data/raw_data/'
#files = os.listdir(directory+'/test1/20_videos')
g = io.loadmat('/Users/cyrilrocke/Documents/c_elegans/data/postures')
postures = g.get('postures')
#pos_x = np.load(directory+'/test1/features/pos_x.npy')
#pos_y = np.load(directory+'/test1/features/pos_y.npy')
all_postures = []
def posture_seq(directory,postures,sampling_fraction):
"""posture_seq grabs samples locomotion files from a directory and
converts them to strings of posture_sequences
Input:
directory = the directory containing locomotion files
postures = the mat file or numpy array of template postures
sampling_fraction = the fraction of files you want to sample
Output:
all_postures = a list of posture_sequences(of type string)
"""
num_postures = len(postures)
angle_data = loading_data(directory,sampling_fraction)[0]
i = 0
while i < len(angle_data):
if len(angle_data[i][1]) > 1000:
#get angles for the skeletons
angles, m_a = angle_data[i]
#X, Y = MA2skel(angles, m_a, 1)
#initialize Vars and posture_sequence:
#Vars = np.zeros(len(X))
posture_sequence = ''
for i in range(len(angles)):
distances = [np.inf]*num_postures
for j in range(num_postures):
distances[j] = np.linalg.norm(angles[i]-postures[:,j])
val = min(distances)
#angle_err[i] = val
ind = distances.index(val)
#Vars[i] = np.corrcoef(angles[i],postures[:,ind])[0][1]**2
posture_sequence = posture_sequence + ' ' + str(ind)
all_postures.append(posture_sequence)
i+=1
else:
i+=1
return all_postures
| [
"scipy.io.loadmat",
"behavioral_syntax.utilities.loading_data.loading_data",
"numpy.linalg.norm"
] | [((231, 296), 'scipy.io.loadmat', 'io.loadmat', (['"""/Users/cyrilrocke/Documents/c_elegans/data/postures"""'], {}), "('/Users/cyrilrocke/Documents/c_elegans/data/postures')\n", (241, 296), False, 'from scipy import io\n'), ((1000, 1042), 'behavioral_syntax.utilities.loading_data.loading_data', 'loading_data', (['directory', 'sampling_fraction'], {}), '(directory, sampling_fraction)\n', (1012, 1042), False, 'from behavioral_syntax.utilities.loading_data import loading_data\n'), ((1608, 1650), 'numpy.linalg.norm', 'np.linalg.norm', (['(angles[i] - postures[:, j])'], {}), '(angles[i] - postures[:, j])\n', (1622, 1650), True, 'import numpy as np\n')] |
# coding: utf8
import os
import copy
import pytest
import numpy as np
import numpy.testing as npt
from scipy.io import wavfile
import openturns as ot
from mock import patch
from batman.visualization import (HdrBoxplot, Kiviat3D, Tree, pdf,
sensitivity_indices, corr_cov, reshow,
response_surface, doe, doe_ascii, pairplot,
mesh_2D, cusunoro, moment_independent)
from batman.visualization.density import ecdf
from batman.surrogate import SurrogateModel
from batman.space import Space
from batman.functions import (Ishigami, db_Mascaret, el_nino)
import matplotlib.pyplot as plt
try:
import matplotlib.animation as manimation
manimation.writers['ffmpeg']
have_ffmpeg = True
except (RuntimeError, KeyError):
have_ffmpeg = False
dataset = el_nino()
labels, data = dataset.space, dataset.data
# dataset_tahiti = tahiti()
# labels_tahiti, data_tahiti = dataset_tahiti.space, dataset_tahiti.data
class TestHdr:
@pytest.fixture(scope="session")
def hdr(self):
np.random.seed(123456)
ot.RandomGenerator.SetSeed(123456)
return HdrBoxplot(data)
def test_hdr_basic(self, hdr, tmp, seed):
print('Data shape: ', data.shape)
assert len(hdr.extra_quantiles) == 0
median_t = [24.27, 25.67, 25.98, 25.05, 23.76, 22.40,
21.31, 20.43, 20.20, 20.47, 21.17, 22.37]
npt.assert_almost_equal(hdr.median, median_t, decimal=2)
quant = np.vstack([hdr.outliers, hdr.hdr_90, hdr.hdr_50])
quant_t = np.vstack([[27.20, 28.16, 29.00, 28.94, 28.27, 27.24,
25.84, 24.01, 22.37, 22.24, 22.38, 23.26],
[23.94, 26.16, 27.07, 26.50, 26.40, 25.92,
25.36, 24.70, 24.52, 24.67, 25.76, 27.02],
[28.01, 28.83, 29.12, 28.23, 27.18, 25.33,
23.41, 22.11, 21.25, 21.56, 21.64, 23.01],
[25.63, 26.99, 27.63, 27.11, 26.10, 24.65,
23.55, 22.50, 22.13, 22.51, 23.37, 24.54],
[23.04, 24.58, 24.71, 23.41, 21.98, 20.74,
19.85, 19.09, 18.85, 19.04, 19.58, 20.80],
[24.85, 26.15, 26.56, 25.78, 24.58, 23.20,
22.11, 21.17, 20.93, 21.25, 22.00, 23.23],
[23.67, 25.14, 25.46, 24.28, 22.94, 21.62,
20.59, 19.75, 19.51, 19.73, 20.37, 21.54]])
npt.assert_almost_equal(quant, quant_t, decimal=0)
figs, axs = hdr.plot(fname=os.path.join(tmp, 'hdr_boxplot.pdf'),
labels=labels,
x_common=np.linspace(1, 12, 12),
xlabel='Month of the year (-)',
flabel='Water surface temperature (C)')
assert len(figs) == 3
assert len(axs) == 3
fig = reshow(figs[2])
plt.plot([0, 10], [25, 25])
axs[2].plot([0, 6], [4, -3])
fig.savefig(os.path.join(tmp, 'hdr_boxplot_change_sample.pdf'))
fig = reshow(figs[1])
axs[1][0].plot([0, 6], [4, -3])
fig.savefig(os.path.join(tmp, 'hdr_boxplot_change_scatter.pdf'))
@pytest.mark.xfail(raises=AssertionError, reason='Global optimization')
@patch("matplotlib.pyplot.show")
def test_hdr_alpha(self, mock_show, seed):
hdr = HdrBoxplot(data, alpha=[0.7])
extra_quant_t = np.vstack([[25.1, 26.4, 26.9, 26.3, 25.2, 23.9,
22.7, 21.8, 21.5, 21.8, 22.5, 23.7],
[23.4, 25.0, 25.1, 24.0, 22.6, 21.3,
20.3, 19.5, 19.2, 19.5, 20.0, 21.2]])
npt.assert_almost_equal(hdr.extra_quantiles, extra_quant_t, decimal=1)
hdr.plot()
hdr.plot(samples=10)
@pytest.mark.xfail(raises=AssertionError, reason='Global optimization')
@patch("matplotlib.pyplot.show")
def test_hdr_multiple_alpha(self, mock_show, seed):
hdr = HdrBoxplot(data, alpha=[0.4, 0.92])
extra_quant_t = [[26., 27., 28., 27., 26., 25., 24., 23., 22., 23., 23., 25.],
[23., 25., 25., 23., 22., 21., 20., 19., 19., 19., 20., 21.],
[25., 26., 26., 26., 24., 23., 22., 21., 21., 21., 22., 23.],
[24., 25., 26., 25., 23., 22., 21., 20., 20., 23., 21., 25.]]
npt.assert_almost_equal(hdr.extra_quantiles, np.vstack(extra_quant_t), decimal=0)
hdr.plot()
def test_hdr_threshold(self, seed):
hdr = HdrBoxplot(data, alpha=[0.8], threshold=0.93)
labels_pos = np.all(np.isin(data, hdr.outliers), axis=1)
outliers = labels[labels_pos]
npt.assert_equal([[1982], [1983], [1997], [1998]], outliers)
def test_hdr_outliers_method(self, seed):
hdr = HdrBoxplot(data, threshold=0.93, outliers_method='forest')
labels_pos = np.all(np.isin(data, hdr.outliers), axis=1)
outliers = labels[labels_pos]
npt.assert_equal([[1982], [1983], [1997], [1998]], outliers)
def test_hdr_optimize_bw(self, seed):
hdr = HdrBoxplot(data, optimize=True)
median_t = [24.27, 25.67, 25.98, 25.05, 23.76, 22.40,
21.31, 20.43, 20.20, 20.47, 21.17, 22.37]
npt.assert_almost_equal(hdr.median, median_t, decimal=2)
@patch("matplotlib.pyplot.show")
def test_hdr_variance(self, mock_show):
hdr = HdrBoxplot(data, variance=0.9)
median_t = [24.37, 25.74, 26.02, 25.07, 23.76, 22.40,
21.31, 20.44, 20.23, 20.52, 21.24, 22.44]
npt.assert_almost_equal(hdr.median, median_t, decimal=2)
hdr.plot()
@patch("matplotlib.pyplot.show")
def test_hdr_plot_data(self, mock_show, hdr):
hdr.plot(samples=data, labels=labels.tolist())
@pytest.mark.skipif(not have_ffmpeg, reason='ffmpeg not available')
def test_hdr_fhops(self, hdr, tmp):
hdr.f_hops(x_common=np.linspace(1, 12, 12),
labels=labels,
xlabel='Month of the year (-)',
flabel='Water surface temperature (C)',
fname=os.path.join(tmp, 'f-HOPs.mp4'))
hdr.f_hops(samples=10, fname=os.path.join(tmp, 'f-HOPs.mp4'))
hdr.f_hops(samples=data, fname=os.path.join(tmp, 'f-HOPs.mp4'))
hdr = HdrBoxplot(data, outliers_method='forest')
hdr.f_hops(fname=os.path.join(tmp, 'f-HOPs.mp4'))
def test_hdr_sound(self, hdr, tmp):
hdr.sound(fname=os.path.join(tmp, 'song-fHOPs-samples.wav'),
samples=5, distance=False)
_, song = wavfile.read(os.path.join(tmp, 'song-fHOPs-samples.wav'))
assert song.shape[0] == 5 * 44100 * 400 / 1000.0
hdr.sound(fname=os.path.join(tmp, 'song-fHOPs-data.wav'),
samples=data)
frame_rate = 1000
hdr.sound(frame_rate=frame_rate, fname=os.path.join(tmp, 'song-fHOPs.wav'))
_, song = wavfile.read(os.path.join(tmp, 'song-fHOPs.wav'))
assert song.shape[0] == data.shape[0] * 44100 * frame_rate / 1000.0
def test_hdr_sample(self, hdr):
samples = hdr.sample(10)
assert samples.shape[0] == 10
assert samples.shape[1] == 12
samples = hdr.sample([[0, 0], [-1, 3]])
samples_t = [[24.39, 25.85, 26.23, 25.38, 24.18, 22.86,
21.77, 20.85, 20.57, 20.85, 21.55, 22.73],
[25.41, 26.54, 26.94, 26.18, 24.65, 22.79,
21.35, 20.09, 19.54, 19.74, 20.15, 21.27]]
npt.assert_almost_equal(samples, samples_t, decimal=2)
# @pytest.mark.skipif(not have_ffmpeg, reason='ffmpeg not available')
# @patch("matplotlib.pyplot.show")
# def test_hdr_tahiti(self, mock_show, tmp):
# hdr = HdrBoxplot(data_tahiti)
# print('Data tahiti shape: ', data_tahiti.shape)
# labels_pos = np.all(np.isin(data_tahiti, hdr.outliers), axis=1)
# outliers = labels_tahiti[labels_pos]
# npt.assert_equal([1975, 1983, 1998, 2010], outliers)
# hdr.plot(fname=os.path.join(tmp, 'hdr_boxplot.pdf'))
# hdr.f_hops(samples=10, fname=os.path.join(tmp, 'f-HOPs.mp4'))
# hdr.sound(fname=os.path.join(tmp, 'song-fHOPs.wav'))
class TestKiviat:
@pytest.fixture(scope="session")
def kiviat_data(self):
sample = [[30, 4000], [15, 5000]]
data = [[12], [15]]
plabels = ['Ks', 'Q', '-']
bounds = [[15.0, 2500.0], [60.0, 6000.0]]
kiviat = Kiviat3D(sample, data, bounds=bounds, plabels=plabels)
return kiviat
@patch("matplotlib.pyplot.show")
def test_kiviat_plot(self, mock_show, tmp):
sample = [[30, 4000], [15, 5000]]
data = [[12], [15]]
functional_data = [[12, 300], [15, 347]]
kiviat = Kiviat3D([[30], [15]], data)
kiviat.plot()
kiviat = Kiviat3D(sample, data)
kiviat.plot(fill=False, ticks_nbr=12)
kiviat = Kiviat3D(sample, functional_data)
kiviat = Kiviat3D(sample, functional_data, stack_order='qoi', cbar_order='hdr')
kiviat = Kiviat3D(sample, functional_data, stack_order='hdr', cbar_order='qoi')
kiviat = Kiviat3D(sample, functional_data, stack_order=1, cbar_order='hdr')
kiviat = Kiviat3D(sample, functional_data, idx=1, cbar_order='hdr',
range_cbar=[0, 1])
kiviat.plot(fname=os.path.join(tmp, 'kiviat.pdf'))
@pytest.mark.skipif(not have_ffmpeg, reason='ffmpeg not available')
def test_kiviat_fhops(self, kiviat_data, tmp):
kiviat_data.f_hops(frame_rate=40, ticks_nbr=30,
fname=os.path.join(tmp, 'kiviat_fill.mp4'))
kiviat_data.f_hops(fname=os.path.join(tmp, 'kiviat.mp4'), fill=False)
@patch("matplotlib.pyplot.show")
@pytest.mark.skipif(not have_ffmpeg, reason='ffmpeg not available')
def test_tree(self, mock_show, tmp):
sample = [[30, 4000], [15, 5000], [20, 4500]]
functional_data = [[12, 300], [15, 347], [14, 320]]
tree = Tree(sample, functional_data,
bounds=[[10.0, 2500.0], [60.0, 6000.0]])
tree.plot(fname=os.path.join(tmp, 'tree.pdf'),
flabel='Water level (m)')
tree.f_hops(fname=os.path.join(tmp, 'tree.mp4'))
def test_connectivity(self):
connectivity = Kiviat3D.mesh_connectivity(6, 3)
connectivity_t = np.array([[4, 0, 1, 3, 4],
[4, 1, 2, 4, 5],
[4, 2, 0, 5, 3]], dtype=int)
npt.assert_equal(connectivity, connectivity_t)
with pytest.raises(ValueError):
Kiviat3D.mesh_connectivity(6, 4)
connectivity = Kiviat3D.mesh_connectivity(8, 4)
connectivity_t = np.array([[4, 0, 1, 4, 5],
[4, 1, 2, 5, 6],
[4, 2, 3, 6, 7],
[4, 3, 0, 7, 4]], dtype=int)
npt.assert_equal(connectivity, connectivity_t)
class TestPdf:
def test_pdf_1D(self, tmp):
pdf(data[:10, 5].reshape(-1, 1), fname=os.path.join(tmp, 'pdf.pdf'))
@patch("matplotlib.pyplot.show")
def test_pdf_surrogate(self, mock_show, ishigami_data):
dist = ot.ComposedDistribution(ishigami_data.dists)
surrogate = SurrogateModel('rbf', ishigami_data.space.corners,
ishigami_data.space.plabels)
surrogate.fit(ishigami_data.space, ishigami_data.target_space)
settings = {
"dist": dist,
"model": surrogate,
"method": 'kriging',
"bounds": ishigami_data.space.corners
}
pdf(settings)
@patch("matplotlib.pyplot.show")
def test_pdf_nD(self, mock_show, tmp):
fig_pdf = pdf(data, xdata=np.linspace(1, 12, 12),
range_cbar=[0, 0.5], ticks_nbr=6,
fname=os.path.join(tmp, 'pdf_nd.pdf'))
reshow(fig_pdf)
plt.plot([0, 10], [25, 25])
plt.show()
plt.close()
def test_pdf_nD_moments(self, tmp):
pdf(data, xlabel='s', flabel='Y', moments=True,
fname=os.path.join(tmp, 'pdf_nd_moments.pdf'))
def test_pdf_dotplot(self, tmp):
pdf(data[:10, 5].reshape(-1, 1), dotplot=True,
fname=os.path.join(tmp, 'pdf_dotplot.pdf'))
class TestSensitivity:
@patch("matplotlib.pyplot.show")
def test_sobols_aggregated(self, mock_show, tmp):
fun = Ishigami()
indices = [fun.s_first, fun.s_total]
fig = sensitivity_indices(indices, conf=0.05)
fig = reshow(fig[0])
plt.plot([0, 10], [0.5, 0.5])
fig.show()
sensitivity_indices(indices, plabels=['x1', 't', 'y'],
fname=os.path.join(tmp, 'sobol.pdf'))
sensitivity_indices(indices, polar=True, conf=[[0.2, 0.1, 0.1], [0.1, 0.1, 0.1]])
sensitivity_indices([indices[0]])
@patch("matplotlib.pyplot.show")
def test_sobols_map(self, mock_show, tmp):
fun = db_Mascaret()
indices = [fun.s_first, fun.s_total, fun.s_first_full, fun.s_total_full]
sensitivity_indices(indices)
sensitivity_indices(indices, plabels=['Ks', 'Q'], xdata=fun.x,
fname=os.path.join(tmp, 'sobol_map.pdf'))
class TestResponseSurface:
@patch("matplotlib.pyplot.show")
def test_response_surface_1D(self, mock_show, tmp):
def fun(x):
return x ** 2
bounds = [[-7], [10]]
path = os.path.join(tmp, 'rs_1D.pdf')
response_surface(bounds=bounds, fun=fun, fname=path)
xdata = np.linspace(0, 1, 10)
def fun(x):
return (xdata * x) ** 2
sample = np.array(range(5)).reshape(-1, 1)
data = fun(sample)
response_surface(bounds=bounds, sample=sample, data=data, xdata=xdata)
@pytest.mark.xfail(raises=ValueError)
@patch("matplotlib.pyplot.show")
def test_response_surface_2D_scalar(self, mock_show, tmp, branin_data, seed):
space = branin_data.space
bounds = [[-7, 0], [10, 15]]
path = os.path.join(tmp, 'rs_2D_vector.pdf')
response_surface(bounds=bounds, sample=space, data=branin_data.target_space)
response_surface(bounds=bounds, fun=branin_data.func, doe=space, resampling=4,
fname=path, feat_order=[2, 1])
@patch("matplotlib.pyplot.show")
def test_response_surface_2D_vector(self, mock_show, tmp, mascaret_data):
space = mascaret_data.space
data = mascaret_data.target_space
xdata = mascaret_data.func.x
bounds = [[15.0, 2500.0], [60, 6000.0]]
order = [1, 2]
path = os.path.join(tmp, 'rs_2D_vector.pdf')
response_surface(bounds=bounds, sample=space, data=data, xdata=xdata, fname=path)
response_surface(bounds=bounds, fun=mascaret_data.func, xdata=xdata,
plabels=['Ks', 'Q'], feat_order=order, flabel='Z')
@pytest.mark.skipif(not have_ffmpeg, reason='ffmpeg not available')
def test_response_surface_3D(self, ishigami_data, tmp):
space = ishigami_data.space
fun = ishigami_data.func
bounds = [[-4, -4, -4], [4, 4, 4]]
order = [1, 2, 3]
path = os.path.join(tmp, 'rs_3D_vector')
response_surface(bounds=bounds, fun=fun, doe=space, resampling=30,
contours=[-20, 0, 20], fname=path, feat_order=order)
@pytest.mark.skipif(not have_ffmpeg, reason='ffmpeg not available')
def test_response_surface_4D(self, g_function_data, tmp):
space = g_function_data.space
fun = g_function_data.func
bounds = g_function_data.space.corners
order = [2, 3, 4, 1]
path = os.path.join(tmp, 'rs_4D_vector')
response_surface(bounds=bounds, fun=fun, doe=space, resampling=10,
axis_disc=[2, 15, 15, 15], fname=path, feat_order=order)
class Test2Dmesh:
def test_2D_mesh(self, tmp):
datadir = os.path.join(os.path.dirname(__file__), 'data')
fname = os.path.join(datadir, 'data_Garonne.csv')
path = os.path.join(tmp, 'garonne_2D.pdf')
vmin = 2.5
mesh_2D(fname=fname, fformat='csv', xlabel='x label',
flabels=['Variable'], vmins=[vmin], output_path=path)
def test_2D_mesh_add_var(self, tmp, mascaret_data):
datadir = os.path.join(os.path.dirname(__file__), 'data')
fname = os.path.join(datadir, 'data_2D_mesh.csv')
var_sobol = [[0.1, 0.2], [0.3, 0.2], [0.88, 0.2], [0.9, 1.0], [0.1, 0.12]]
path = os.path.join(tmp, 'data_2D.pdf')
flabels = ['Ks', 'Q']
mesh_2D(fname=fname, fformat='csv', xlabel='x label', var=var_sobol,
flabels=flabels, output_path=path)
var_sobol = [[0.1, 0.2], [0.3, 0.2], [0.88, 0.2], [0.9, 1.0]]
with pytest.raises(ValueError):
mesh_2D(fname=fname, var=var_sobol)
class TestDoe:
@patch("matplotlib.pyplot.show")
def test_doe(self, mock_show, mascaret_data):
doe(mascaret_data.space)
def test_doe_3D(self, ishigami_data, tmp):
fig, ax = doe(ishigami_data.space, fname=os.path.join(tmp, 'DOE.pdf'))
fig = reshow(fig)
ax[0].plot([0, 6], [4, -3])
fig.savefig(os.path.join(tmp, 'DOE_change.pdf'))
def test_doe_mufi(self, ishigami_data, tmp):
doe(ishigami_data.space, multifidelity=True,
fname=os.path.join(tmp, 'DOE_mufi.pdf'))
def test_doe_ascii(self, ishigami_data, tmp):
doe_ascii(ishigami_data.space, plabels=['a', 'b', 'c'],
bounds=[[-np.pi, -np.pi, -np.pi], [np.pi, np.pi, np.pi]])
doe_ascii([[0.2, 0.8], [0.3, 0.7]], fname=os.path.join(tmp, 'DOE_ascii.pdf'))
def test_pairplot(self, ishigami_data, tmp):
pairplot(ishigami_data.space, ishigami_data.target_space,
plabels=['x1', 'x2', 'x3'],
fname=os.path.join(tmp, 'pairplot.pdf'))
@patch("matplotlib.pyplot.show")
def test_corr_cov(mock_show, mascaret_data, tmp):
func = mascaret_data.func
dist = ot.ComposedDistribution(mascaret_data.dists)
sample = np.array(ot.LHSExperiment(dist, 500).generate())
data = func(sample)
corr_cov(data, sample, func.x, interpolation='lanczos', plabels=['Ks', 'Q'])
corr_cov(data, sample, func.x, fname=os.path.join(tmp, 'corr_cov.pdf'))
class TestDensity:
def test_cusunoro(self, ishigami_data, tmp):
Y = ishigami_data.target_space.flatten()
X = ishigami_data.space
cuso = cusunoro(X, Y, plabels=['x1', 'x2', 'x3'],
fname=os.path.join(tmp, 'cusunoro.pdf'))
npt.assert_almost_equal(cuso[2], [0.328, 0.353, 0.018], decimal=3)
def test_ecdf(self):
data = np.array([1, 3, 6, 10, 2])
xs, ys = ecdf(data)
npt.assert_equal(xs, [1, 2, 3, 6, 10])
npt.assert_equal(ys, [0, 0.25, 0.5, 0.75, 1.])
def test_moment_independant(self, ishigami_data, tmp):
ishigami_data_ = copy.deepcopy(ishigami_data)
ishigami_data_.space.max_points_nb = 5000
X = ishigami_data_.space.sampling(5000, 'olhs')
Y = ishigami_data_.func(X).flatten()
momi = moment_independent(X, Y, plabels=['x1', 'x2', 'x3'],
fname=os.path.join(tmp, 'moment_independent.pdf'))
npt.assert_almost_equal(momi[2]['Kolmogorov'], [0.236, 0.377, 0.107], decimal=2)
npt.assert_almost_equal(momi[2]['Kuiper'], [0.257, 0.407, 0.199], decimal=2)
npt.assert_almost_equal(momi[2]['Delta'], [0.211, 0.347, 0.162], decimal=2)
npt.assert_almost_equal(momi[2]['Sobol'], [0.31, 0.421, 0.002], decimal=2)
# Cramer
space = Space(corners=[[-5, -5], [5, 5]], sample=5000)
space.sampling(dists=['Normal(0, 1)', 'Normal(0, 1)'])
Y = [np.exp(x_i[0] + 2 * x_i[1]) for x_i in space]
X = np.array(space)
momi = moment_independent(X, Y,
fname=os.path.join(tmp, 'moment_independent.pdf'))
npt.assert_almost_equal(momi[2]['Cramer'], [0.113, 0.572], decimal=2)
| [
"batman.space.Space",
"numpy.testing.assert_equal",
"openturns.LHSExperiment",
"numpy.isin",
"openturns.ComposedDistribution",
"batman.visualization.Kiviat3D.mesh_connectivity",
"numpy.array",
"openturns.RandomGenerator.SetSeed",
"batman.visualization.reshow",
"batman.functions.db_Mascaret",
"co... | [((855, 864), 'batman.functions.el_nino', 'el_nino', ([], {}), '()\n', (862, 864), False, 'from batman.functions import Ishigami, db_Mascaret, el_nino\n'), ((18165, 18196), 'mock.patch', 'patch', (['"""matplotlib.pyplot.show"""'], {}), "('matplotlib.pyplot.show')\n", (18170, 18196), False, 'from mock import patch\n'), ((1033, 1064), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1047, 1064), False, 'import pytest\n'), ((3355, 3425), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'AssertionError', 'reason': '"""Global optimization"""'}), "(raises=AssertionError, reason='Global optimization')\n", (3372, 3425), False, 'import pytest\n'), ((3431, 3462), 'mock.patch', 'patch', (['"""matplotlib.pyplot.show"""'], {}), "('matplotlib.pyplot.show')\n", (3436, 3462), False, 'from mock import patch\n'), ((3978, 4048), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'AssertionError', 'reason': '"""Global optimization"""'}), "(raises=AssertionError, reason='Global optimization')\n", (3995, 4048), False, 'import pytest\n'), ((4054, 4085), 'mock.patch', 'patch', (['"""matplotlib.pyplot.show"""'], {}), "('matplotlib.pyplot.show')\n", (4059, 4085), False, 'from mock import patch\n'), ((5498, 5529), 'mock.patch', 'patch', (['"""matplotlib.pyplot.show"""'], {}), "('matplotlib.pyplot.show')\n", (5503, 5529), False, 'from mock import patch\n'), ((5834, 5865), 'mock.patch', 'patch', (['"""matplotlib.pyplot.show"""'], {}), "('matplotlib.pyplot.show')\n", (5839, 5865), False, 'from mock import patch\n'), ((5977, 6043), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not have_ffmpeg)'], {'reason': '"""ffmpeg not available"""'}), "(not have_ffmpeg, reason='ffmpeg not available')\n", (5995, 6043), False, 'import pytest\n'), ((8426, 8457), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (8440, 8457), False, 'import pytest\n'), ((8741, 8772), 'mock.patch', 'patch', (['"""matplotlib.pyplot.show"""'], {}), "('matplotlib.pyplot.show')\n", (8746, 8772), False, 'from mock import patch\n'), ((9594, 9660), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not have_ffmpeg)'], {'reason': '"""ffmpeg not available"""'}), "(not have_ffmpeg, reason='ffmpeg not available')\n", (9612, 9660), False, 'import pytest\n'), ((9923, 9954), 'mock.patch', 'patch', (['"""matplotlib.pyplot.show"""'], {}), "('matplotlib.pyplot.show')\n", (9928, 9954), False, 'from mock import patch\n'), ((9960, 10026), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not have_ffmpeg)'], {'reason': '"""ffmpeg not available"""'}), "(not have_ffmpeg, reason='ffmpeg not available')\n", (9978, 10026), False, 'import pytest\n'), ((11308, 11339), 'mock.patch', 'patch', (['"""matplotlib.pyplot.show"""'], {}), "('matplotlib.pyplot.show')\n", (11313, 11339), False, 'from mock import patch\n'), ((11866, 11897), 'mock.patch', 'patch', (['"""matplotlib.pyplot.show"""'], {}), "('matplotlib.pyplot.show')\n", (11871, 11897), False, 'from mock import patch\n'), ((12551, 12582), 'mock.patch', 'patch', (['"""matplotlib.pyplot.show"""'], {}), "('matplotlib.pyplot.show')\n", (12556, 12582), False, 'from mock import patch\n'), ((13114, 13145), 'mock.patch', 'patch', (['"""matplotlib.pyplot.show"""'], {}), "('matplotlib.pyplot.show')\n", (13119, 13145), False, 'from mock import patch\n'), ((13515, 13546), 'mock.patch', 'patch', (['"""matplotlib.pyplot.show"""'], {}), "('matplotlib.pyplot.show')\n", (13520, 13546), False, 'from mock import patch\n'), ((14046, 14082), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'ValueError'}), '(raises=ValueError)\n', (14063, 14082), False, 'import pytest\n'), ((14088, 14119), 'mock.patch', 'patch', (['"""matplotlib.pyplot.show"""'], {}), "('matplotlib.pyplot.show')\n", (14093, 14119), False, 'from mock import patch\n'), ((14560, 14591), 'mock.patch', 'patch', (['"""matplotlib.pyplot.show"""'], {}), "('matplotlib.pyplot.show')\n", (14565, 14591), False, 'from mock import patch\n'), ((15158, 15224), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not have_ffmpeg)'], {'reason': '"""ffmpeg not available"""'}), "(not have_ffmpeg, reason='ffmpeg not available')\n", (15176, 15224), False, 'import pytest\n'), ((15631, 15697), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not have_ffmpeg)'], {'reason': '"""ffmpeg not available"""'}), "(not have_ffmpeg, reason='ffmpeg not available')\n", (15649, 15697), False, 'import pytest\n'), ((17147, 17178), 'mock.patch', 'patch', (['"""matplotlib.pyplot.show"""'], {}), "('matplotlib.pyplot.show')\n", (17152, 17178), False, 'from mock import patch\n'), ((18288, 18332), 'openturns.ComposedDistribution', 'ot.ComposedDistribution', (['mascaret_data.dists'], {}), '(mascaret_data.dists)\n', (18311, 18332), True, 'import openturns as ot\n'), ((18423, 18499), 'batman.visualization.corr_cov', 'corr_cov', (['data', 'sample', 'func.x'], {'interpolation': '"""lanczos"""', 'plabels': "['Ks', 'Q']"}), "(data, sample, func.x, interpolation='lanczos', plabels=['Ks', 'Q'])\n", (18431, 18499), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((1092, 1114), 'numpy.random.seed', 'np.random.seed', (['(123456)'], {}), '(123456)\n', (1106, 1114), True, 'import numpy as np\n'), ((1123, 1157), 'openturns.RandomGenerator.SetSeed', 'ot.RandomGenerator.SetSeed', (['(123456)'], {}), '(123456)\n', (1149, 1157), True, 'import openturns as ot\n'), ((1173, 1189), 'batman.visualization.HdrBoxplot', 'HdrBoxplot', (['data'], {}), '(data)\n', (1183, 1189), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((1459, 1515), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['hdr.median', 'median_t'], {'decimal': '(2)'}), '(hdr.median, median_t, decimal=2)\n', (1482, 1515), True, 'import numpy.testing as npt\n'), ((1533, 1582), 'numpy.vstack', 'np.vstack', (['[hdr.outliers, hdr.hdr_90, hdr.hdr_50]'], {}), '([hdr.outliers, hdr.hdr_90, hdr.hdr_50])\n', (1542, 1582), True, 'import numpy as np\n'), ((1601, 2243), 'numpy.vstack', 'np.vstack', (['[[27.2, 28.16, 29.0, 28.94, 28.27, 27.24, 25.84, 24.01, 22.37, 22.24, 22.38,\n 23.26], [23.94, 26.16, 27.07, 26.5, 26.4, 25.92, 25.36, 24.7, 24.52, \n 24.67, 25.76, 27.02], [28.01, 28.83, 29.12, 28.23, 27.18, 25.33, 23.41,\n 22.11, 21.25, 21.56, 21.64, 23.01], [25.63, 26.99, 27.63, 27.11, 26.1, \n 24.65, 23.55, 22.5, 22.13, 22.51, 23.37, 24.54], [23.04, 24.58, 24.71, \n 23.41, 21.98, 20.74, 19.85, 19.09, 18.85, 19.04, 19.58, 20.8], [24.85, \n 26.15, 26.56, 25.78, 24.58, 23.2, 22.11, 21.17, 20.93, 21.25, 22.0, \n 23.23], [23.67, 25.14, 25.46, 24.28, 22.94, 21.62, 20.59, 19.75, 19.51,\n 19.73, 20.37, 21.54]]'], {}), '([[27.2, 28.16, 29.0, 28.94, 28.27, 27.24, 25.84, 24.01, 22.37, \n 22.24, 22.38, 23.26], [23.94, 26.16, 27.07, 26.5, 26.4, 25.92, 25.36, \n 24.7, 24.52, 24.67, 25.76, 27.02], [28.01, 28.83, 29.12, 28.23, 27.18, \n 25.33, 23.41, 22.11, 21.25, 21.56, 21.64, 23.01], [25.63, 26.99, 27.63,\n 27.11, 26.1, 24.65, 23.55, 22.5, 22.13, 22.51, 23.37, 24.54], [23.04, \n 24.58, 24.71, 23.41, 21.98, 20.74, 19.85, 19.09, 18.85, 19.04, 19.58, \n 20.8], [24.85, 26.15, 26.56, 25.78, 24.58, 23.2, 22.11, 21.17, 20.93, \n 21.25, 22.0, 23.23], [23.67, 25.14, 25.46, 24.28, 22.94, 21.62, 20.59, \n 19.75, 19.51, 19.73, 20.37, 21.54]])\n', (1610, 2243), True, 'import numpy as np\n'), ((2608, 2658), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['quant', 'quant_t'], {'decimal': '(0)'}), '(quant, quant_t, decimal=0)\n', (2631, 2658), True, 'import numpy.testing as npt\n'), ((3044, 3059), 'batman.visualization.reshow', 'reshow', (['figs[2]'], {}), '(figs[2])\n', (3050, 3059), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((3068, 3095), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 10]', '[25, 25]'], {}), '([0, 10], [25, 25])\n', (3076, 3095), True, 'import matplotlib.pyplot as plt\n'), ((3220, 3235), 'batman.visualization.reshow', 'reshow', (['figs[1]'], {}), '(figs[1])\n', (3226, 3235), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((3524, 3553), 'batman.visualization.HdrBoxplot', 'HdrBoxplot', (['data'], {'alpha': '[0.7]'}), '(data, alpha=[0.7])\n', (3534, 3553), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((3578, 3747), 'numpy.vstack', 'np.vstack', (['[[25.1, 26.4, 26.9, 26.3, 25.2, 23.9, 22.7, 21.8, 21.5, 21.8, 22.5, 23.7],\n [23.4, 25.0, 25.1, 24.0, 22.6, 21.3, 20.3, 19.5, 19.2, 19.5, 20.0, 21.2]]'], {}), '([[25.1, 26.4, 26.9, 26.3, 25.2, 23.9, 22.7, 21.8, 21.5, 21.8, \n 22.5, 23.7], [23.4, 25.0, 25.1, 24.0, 22.6, 21.3, 20.3, 19.5, 19.2, \n 19.5, 20.0, 21.2]])\n', (3587, 3747), True, 'import numpy as np\n'), ((3853, 3923), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['hdr.extra_quantiles', 'extra_quant_t'], {'decimal': '(1)'}), '(hdr.extra_quantiles, extra_quant_t, decimal=1)\n', (3876, 3923), True, 'import numpy.testing as npt\n'), ((4156, 4191), 'batman.visualization.HdrBoxplot', 'HdrBoxplot', (['data'], {'alpha': '[0.4, 0.92]'}), '(data, alpha=[0.4, 0.92])\n', (4166, 4191), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((4704, 4749), 'batman.visualization.HdrBoxplot', 'HdrBoxplot', (['data'], {'alpha': '[0.8]', 'threshold': '(0.93)'}), '(data, alpha=[0.8], threshold=0.93)\n', (4714, 4749), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((4861, 4921), 'numpy.testing.assert_equal', 'npt.assert_equal', (['[[1982], [1983], [1997], [1998]]', 'outliers'], {}), '([[1982], [1983], [1997], [1998]], outliers)\n', (4877, 4921), True, 'import numpy.testing as npt\n'), ((4983, 5041), 'batman.visualization.HdrBoxplot', 'HdrBoxplot', (['data'], {'threshold': '(0.93)', 'outliers_method': '"""forest"""'}), "(data, threshold=0.93, outliers_method='forest')\n", (4993, 5041), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((5153, 5213), 'numpy.testing.assert_equal', 'npt.assert_equal', (['[[1982], [1983], [1997], [1998]]', 'outliers'], {}), '([[1982], [1983], [1997], [1998]], outliers)\n', (5169, 5213), True, 'import numpy.testing as npt\n'), ((5271, 5302), 'batman.visualization.HdrBoxplot', 'HdrBoxplot', (['data'], {'optimize': '(True)'}), '(data, optimize=True)\n', (5281, 5302), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((5435, 5491), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['hdr.median', 'median_t'], {'decimal': '(2)'}), '(hdr.median, median_t, decimal=2)\n', (5458, 5491), True, 'import numpy.testing as npt\n'), ((5588, 5618), 'batman.visualization.HdrBoxplot', 'HdrBoxplot', (['data'], {'variance': '(0.9)'}), '(data, variance=0.9)\n', (5598, 5618), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((5752, 5808), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['hdr.median', 'median_t'], {'decimal': '(2)'}), '(hdr.median, median_t, decimal=2)\n', (5775, 5808), True, 'import numpy.testing as npt\n'), ((6495, 6537), 'batman.visualization.HdrBoxplot', 'HdrBoxplot', (['data'], {'outliers_method': '"""forest"""'}), "(data, outliers_method='forest')\n", (6505, 6537), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((7700, 7754), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['samples', 'samples_t'], {'decimal': '(2)'}), '(samples, samples_t, decimal=2)\n', (7723, 7754), True, 'import numpy.testing as npt\n'), ((8657, 8711), 'batman.visualization.Kiviat3D', 'Kiviat3D', (['sample', 'data'], {'bounds': 'bounds', 'plabels': 'plabels'}), '(sample, data, bounds=bounds, plabels=plabels)\n', (8665, 8711), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((8958, 8986), 'batman.visualization.Kiviat3D', 'Kiviat3D', (['[[30], [15]]', 'data'], {}), '([[30], [15]], data)\n', (8966, 8986), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((9027, 9049), 'batman.visualization.Kiviat3D', 'Kiviat3D', (['sample', 'data'], {}), '(sample, data)\n', (9035, 9049), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((9114, 9147), 'batman.visualization.Kiviat3D', 'Kiviat3D', (['sample', 'functional_data'], {}), '(sample, functional_data)\n', (9122, 9147), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((9165, 9235), 'batman.visualization.Kiviat3D', 'Kiviat3D', (['sample', 'functional_data'], {'stack_order': '"""qoi"""', 'cbar_order': '"""hdr"""'}), "(sample, functional_data, stack_order='qoi', cbar_order='hdr')\n", (9173, 9235), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((9253, 9323), 'batman.visualization.Kiviat3D', 'Kiviat3D', (['sample', 'functional_data'], {'stack_order': '"""hdr"""', 'cbar_order': '"""qoi"""'}), "(sample, functional_data, stack_order='hdr', cbar_order='qoi')\n", (9261, 9323), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((9341, 9407), 'batman.visualization.Kiviat3D', 'Kiviat3D', (['sample', 'functional_data'], {'stack_order': '(1)', 'cbar_order': '"""hdr"""'}), "(sample, functional_data, stack_order=1, cbar_order='hdr')\n", (9349, 9407), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((9425, 9502), 'batman.visualization.Kiviat3D', 'Kiviat3D', (['sample', 'functional_data'], {'idx': '(1)', 'cbar_order': '"""hdr"""', 'range_cbar': '[0, 1]'}), "(sample, functional_data, idx=1, cbar_order='hdr', range_cbar=[0, 1])\n", (9433, 9502), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((10197, 10267), 'batman.visualization.Tree', 'Tree', (['sample', 'functional_data'], {'bounds': '[[10.0, 2500.0], [60.0, 6000.0]]'}), '(sample, functional_data, bounds=[[10.0, 2500.0], [60.0, 6000.0]])\n', (10201, 10267), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((10501, 10533), 'batman.visualization.Kiviat3D.mesh_connectivity', 'Kiviat3D.mesh_connectivity', (['(6)', '(3)'], {}), '(6, 3)\n', (10527, 10533), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((10559, 10631), 'numpy.array', 'np.array', (['[[4, 0, 1, 3, 4], [4, 1, 2, 4, 5], [4, 2, 0, 5, 3]]'], {'dtype': 'int'}), '([[4, 0, 1, 3, 4], [4, 1, 2, 4, 5], [4, 2, 0, 5, 3]], dtype=int)\n', (10567, 10631), True, 'import numpy as np\n'), ((10710, 10756), 'numpy.testing.assert_equal', 'npt.assert_equal', (['connectivity', 'connectivity_t'], {}), '(connectivity, connectivity_t)\n', (10726, 10756), True, 'import numpy.testing as npt\n'), ((10867, 10899), 'batman.visualization.Kiviat3D.mesh_connectivity', 'Kiviat3D.mesh_connectivity', (['(8)', '(4)'], {}), '(8, 4)\n', (10893, 10899), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((10925, 11019), 'numpy.array', 'np.array', (['[[4, 0, 1, 4, 5], [4, 1, 2, 5, 6], [4, 2, 3, 6, 7], [4, 3, 0, 7, 4]]'], {'dtype': 'int'}), '([[4, 0, 1, 4, 5], [4, 1, 2, 5, 6], [4, 2, 3, 6, 7], [4, 3, 0, 7, 4\n ]], dtype=int)\n', (10933, 11019), True, 'import numpy as np\n'), ((11128, 11174), 'numpy.testing.assert_equal', 'npt.assert_equal', (['connectivity', 'connectivity_t'], {}), '(connectivity, connectivity_t)\n', (11144, 11174), True, 'import numpy.testing as npt\n'), ((11415, 11459), 'openturns.ComposedDistribution', 'ot.ComposedDistribution', (['ishigami_data.dists'], {}), '(ishigami_data.dists)\n', (11438, 11459), True, 'import openturns as ot\n'), ((11480, 11559), 'batman.surrogate.SurrogateModel', 'SurrogateModel', (['"""rbf"""', 'ishigami_data.space.corners', 'ishigami_data.space.plabels'], {}), "('rbf', ishigami_data.space.corners, ishigami_data.space.plabels)\n", (11494, 11559), False, 'from batman.surrogate import SurrogateModel\n'), ((11846, 11859), 'batman.visualization.pdf', 'pdf', (['settings'], {}), '(settings)\n', (11849, 11859), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((12124, 12139), 'batman.visualization.reshow', 'reshow', (['fig_pdf'], {}), '(fig_pdf)\n', (12130, 12139), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((12148, 12175), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 10]', '[25, 25]'], {}), '([0, 10], [25, 25])\n', (12156, 12175), True, 'import matplotlib.pyplot as plt\n'), ((12184, 12194), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12192, 12194), True, 'import matplotlib.pyplot as plt\n'), ((12203, 12214), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12212, 12214), True, 'import matplotlib.pyplot as plt\n'), ((12651, 12661), 'batman.functions.Ishigami', 'Ishigami', ([], {}), '()\n', (12659, 12661), False, 'from batman.functions import Ishigami, db_Mascaret, el_nino\n'), ((12721, 12760), 'batman.visualization.sensitivity_indices', 'sensitivity_indices', (['indices'], {'conf': '(0.05)'}), '(indices, conf=0.05)\n', (12740, 12760), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((12775, 12789), 'batman.visualization.reshow', 'reshow', (['fig[0]'], {}), '(fig[0])\n', (12781, 12789), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((12798, 12827), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 10]', '[0.5, 0.5]'], {}), '([0, 10], [0.5, 0.5])\n', (12806, 12827), True, 'import matplotlib.pyplot as plt\n'), ((12984, 13070), 'batman.visualization.sensitivity_indices', 'sensitivity_indices', (['indices'], {'polar': '(True)', 'conf': '[[0.2, 0.1, 0.1], [0.1, 0.1, 0.1]]'}), '(indices, polar=True, conf=[[0.2, 0.1, 0.1], [0.1, 0.1, \n 0.1]])\n', (13003, 13070), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((13074, 13107), 'batman.visualization.sensitivity_indices', 'sensitivity_indices', (['[indices[0]]'], {}), '([indices[0]])\n', (13093, 13107), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((13207, 13220), 'batman.functions.db_Mascaret', 'db_Mascaret', ([], {}), '()\n', (13218, 13220), False, 'from batman.functions import Ishigami, db_Mascaret, el_nino\n'), ((13310, 13338), 'batman.visualization.sensitivity_indices', 'sensitivity_indices', (['indices'], {}), '(indices)\n', (13329, 13338), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((13695, 13725), 'os.path.join', 'os.path.join', (['tmp', '"""rs_1D.pdf"""'], {}), "(tmp, 'rs_1D.pdf')\n", (13707, 13725), False, 'import os\n'), ((13734, 13786), 'batman.visualization.response_surface', 'response_surface', ([], {'bounds': 'bounds', 'fun': 'fun', 'fname': 'path'}), '(bounds=bounds, fun=fun, fname=path)\n', (13750, 13786), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((13804, 13825), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (13815, 13825), True, 'import numpy as np\n'), ((13969, 14039), 'batman.visualization.response_surface', 'response_surface', ([], {'bounds': 'bounds', 'sample': 'sample', 'data': 'data', 'xdata': 'xdata'}), '(bounds=bounds, sample=sample, data=data, xdata=xdata)\n', (13985, 14039), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((14288, 14325), 'os.path.join', 'os.path.join', (['tmp', '"""rs_2D_vector.pdf"""'], {}), "(tmp, 'rs_2D_vector.pdf')\n", (14300, 14325), False, 'import os\n'), ((14334, 14410), 'batman.visualization.response_surface', 'response_surface', ([], {'bounds': 'bounds', 'sample': 'space', 'data': 'branin_data.target_space'}), '(bounds=bounds, sample=space, data=branin_data.target_space)\n', (14350, 14410), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((14419, 14533), 'batman.visualization.response_surface', 'response_surface', ([], {'bounds': 'bounds', 'fun': 'branin_data.func', 'doe': 'space', 'resampling': '(4)', 'fname': 'path', 'feat_order': '[2, 1]'}), '(bounds=bounds, fun=branin_data.func, doe=space, resampling\n =4, fname=path, feat_order=[2, 1])\n', (14435, 14533), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((14871, 14908), 'os.path.join', 'os.path.join', (['tmp', '"""rs_2D_vector.pdf"""'], {}), "(tmp, 'rs_2D_vector.pdf')\n", (14883, 14908), False, 'import os\n'), ((14917, 15003), 'batman.visualization.response_surface', 'response_surface', ([], {'bounds': 'bounds', 'sample': 'space', 'data': 'data', 'xdata': 'xdata', 'fname': 'path'}), '(bounds=bounds, sample=space, data=data, xdata=xdata, fname\n =path)\n', (14933, 15003), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((15007, 15130), 'batman.visualization.response_surface', 'response_surface', ([], {'bounds': 'bounds', 'fun': 'mascaret_data.func', 'xdata': 'xdata', 'plabels': "['Ks', 'Q']", 'feat_order': 'order', 'flabel': '"""Z"""'}), "(bounds=bounds, fun=mascaret_data.func, xdata=xdata,\n plabels=['Ks', 'Q'], feat_order=order, flabel='Z')\n", (15023, 15130), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((15438, 15471), 'os.path.join', 'os.path.join', (['tmp', '"""rs_3D_vector"""'], {}), "(tmp, 'rs_3D_vector')\n", (15450, 15471), False, 'import os\n'), ((15480, 15604), 'batman.visualization.response_surface', 'response_surface', ([], {'bounds': 'bounds', 'fun': 'fun', 'doe': 'space', 'resampling': '(30)', 'contours': '[-20, 0, 20]', 'fname': 'path', 'feat_order': 'order'}), '(bounds=bounds, fun=fun, doe=space, resampling=30, contours\n =[-20, 0, 20], fname=path, feat_order=order)\n', (15496, 15604), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((15924, 15957), 'os.path.join', 'os.path.join', (['tmp', '"""rs_4D_vector"""'], {}), "(tmp, 'rs_4D_vector')\n", (15936, 15957), False, 'import os\n'), ((15966, 16093), 'batman.visualization.response_surface', 'response_surface', ([], {'bounds': 'bounds', 'fun': 'fun', 'doe': 'space', 'resampling': '(10)', 'axis_disc': '[2, 15, 15, 15]', 'fname': 'path', 'feat_order': 'order'}), '(bounds=bounds, fun=fun, doe=space, resampling=10,\n axis_disc=[2, 15, 15, 15], fname=path, feat_order=order)\n', (15982, 16093), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((16251, 16292), 'os.path.join', 'os.path.join', (['datadir', '"""data_Garonne.csv"""'], {}), "(datadir, 'data_Garonne.csv')\n", (16263, 16292), False, 'import os\n'), ((16308, 16343), 'os.path.join', 'os.path.join', (['tmp', '"""garonne_2D.pdf"""'], {}), "(tmp, 'garonne_2D.pdf')\n", (16320, 16343), False, 'import os\n'), ((16371, 16482), 'batman.visualization.mesh_2D', 'mesh_2D', ([], {'fname': 'fname', 'fformat': '"""csv"""', 'xlabel': '"""x label"""', 'flabels': "['Variable']", 'vmins': '[vmin]', 'output_path': 'path'}), "(fname=fname, fformat='csv', xlabel='x label', flabels=['Variable'],\n vmins=[vmin], output_path=path)\n", (16378, 16482), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((16634, 16675), 'os.path.join', 'os.path.join', (['datadir', '"""data_2D_mesh.csv"""'], {}), "(datadir, 'data_2D_mesh.csv')\n", (16646, 16675), False, 'import os\n'), ((16774, 16806), 'os.path.join', 'os.path.join', (['tmp', '"""data_2D.pdf"""'], {}), "(tmp, 'data_2D.pdf')\n", (16786, 16806), False, 'import os\n'), ((16845, 16952), 'batman.visualization.mesh_2D', 'mesh_2D', ([], {'fname': 'fname', 'fformat': '"""csv"""', 'xlabel': '"""x label"""', 'var': 'var_sobol', 'flabels': 'flabels', 'output_path': 'path'}), "(fname=fname, fformat='csv', xlabel='x label', var=var_sobol,\n flabels=flabels, output_path=path)\n", (16852, 16952), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((17237, 17261), 'batman.visualization.doe', 'doe', (['mascaret_data.space'], {}), '(mascaret_data.space)\n', (17240, 17261), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((17404, 17415), 'batman.visualization.reshow', 'reshow', (['fig'], {}), '(fig)\n', (17410, 17415), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((17724, 17842), 'batman.visualization.doe_ascii', 'doe_ascii', (['ishigami_data.space'], {'plabels': "['a', 'b', 'c']", 'bounds': '[[-np.pi, -np.pi, -np.pi], [np.pi, np.pi, np.pi]]'}), "(ishigami_data.space, plabels=['a', 'b', 'c'], bounds=[[-np.pi, -\n np.pi, -np.pi], [np.pi, np.pi, np.pi]])\n", (17733, 17842), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((18860, 18926), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['cuso[2]', '[0.328, 0.353, 0.018]'], {'decimal': '(3)'}), '(cuso[2], [0.328, 0.353, 0.018], decimal=3)\n', (18883, 18926), True, 'import numpy.testing as npt\n'), ((18968, 18994), 'numpy.array', 'np.array', (['[1, 3, 6, 10, 2]'], {}), '([1, 3, 6, 10, 2])\n', (18976, 18994), True, 'import numpy as np\n'), ((19012, 19022), 'batman.visualization.density.ecdf', 'ecdf', (['data'], {}), '(data)\n', (19016, 19022), False, 'from batman.visualization.density import ecdf\n'), ((19031, 19069), 'numpy.testing.assert_equal', 'npt.assert_equal', (['xs', '[1, 2, 3, 6, 10]'], {}), '(xs, [1, 2, 3, 6, 10])\n', (19047, 19069), True, 'import numpy.testing as npt\n'), ((19078, 19125), 'numpy.testing.assert_equal', 'npt.assert_equal', (['ys', '[0, 0.25, 0.5, 0.75, 1.0]'], {}), '(ys, [0, 0.25, 0.5, 0.75, 1.0])\n', (19094, 19125), True, 'import numpy.testing as npt\n'), ((19210, 19238), 'copy.deepcopy', 'copy.deepcopy', (['ishigami_data'], {}), '(ishigami_data)\n', (19223, 19238), False, 'import copy\n'), ((19553, 19638), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["momi[2]['Kolmogorov']", '[0.236, 0.377, 0.107]'], {'decimal': '(2)'}), "(momi[2]['Kolmogorov'], [0.236, 0.377, 0.107], decimal=2\n )\n", (19576, 19638), True, 'import numpy.testing as npt\n'), ((19642, 19718), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["momi[2]['Kuiper']", '[0.257, 0.407, 0.199]'], {'decimal': '(2)'}), "(momi[2]['Kuiper'], [0.257, 0.407, 0.199], decimal=2)\n", (19665, 19718), True, 'import numpy.testing as npt\n'), ((19727, 19802), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["momi[2]['Delta']", '[0.211, 0.347, 0.162]'], {'decimal': '(2)'}), "(momi[2]['Delta'], [0.211, 0.347, 0.162], decimal=2)\n", (19750, 19802), True, 'import numpy.testing as npt\n'), ((19811, 19885), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["momi[2]['Sobol']", '[0.31, 0.421, 0.002]'], {'decimal': '(2)'}), "(momi[2]['Sobol'], [0.31, 0.421, 0.002], decimal=2)\n", (19834, 19885), True, 'import numpy.testing as npt\n'), ((19920, 19966), 'batman.space.Space', 'Space', ([], {'corners': '[[-5, -5], [5, 5]]', 'sample': '(5000)'}), '(corners=[[-5, -5], [5, 5]], sample=5000)\n', (19925, 19966), False, 'from batman.space import Space\n'), ((20101, 20116), 'numpy.array', 'np.array', (['space'], {}), '(space)\n', (20109, 20116), True, 'import numpy as np\n'), ((20252, 20321), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["momi[2]['Cramer']", '[0.113, 0.572]'], {'decimal': '(2)'}), "(momi[2]['Cramer'], [0.113, 0.572], decimal=2)\n", (20275, 20321), True, 'import numpy.testing as npt\n'), ((3153, 3203), 'os.path.join', 'os.path.join', (['tmp', '"""hdr_boxplot_change_sample.pdf"""'], {}), "(tmp, 'hdr_boxplot_change_sample.pdf')\n", (3165, 3203), False, 'import os\n'), ((3296, 3347), 'os.path.join', 'os.path.join', (['tmp', '"""hdr_boxplot_change_scatter.pdf"""'], {}), "(tmp, 'hdr_boxplot_change_scatter.pdf')\n", (3308, 3347), False, 'import os\n'), ((4593, 4617), 'numpy.vstack', 'np.vstack', (['extra_quant_t'], {}), '(extra_quant_t)\n', (4602, 4617), True, 'import numpy as np\n'), ((4778, 4805), 'numpy.isin', 'np.isin', (['data', 'hdr.outliers'], {}), '(data, hdr.outliers)\n', (4785, 4805), True, 'import numpy as np\n'), ((5070, 5097), 'numpy.isin', 'np.isin', (['data', 'hdr.outliers'], {}), '(data, hdr.outliers)\n', (5077, 5097), True, 'import numpy as np\n'), ((6782, 6825), 'os.path.join', 'os.path.join', (['tmp', '"""song-fHOPs-samples.wav"""'], {}), "(tmp, 'song-fHOPs-samples.wav')\n", (6794, 6825), False, 'import os\n'), ((7125, 7160), 'os.path.join', 'os.path.join', (['tmp', '"""song-fHOPs.wav"""'], {}), "(tmp, 'song-fHOPs.wav')\n", (7137, 7160), False, 'import os\n'), ((10771, 10796), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10784, 10796), False, 'import pytest\n'), ((10810, 10842), 'batman.visualization.Kiviat3D.mesh_connectivity', 'Kiviat3D.mesh_connectivity', (['(6)', '(4)'], {}), '(6, 4)\n', (10836, 10842), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((16200, 16225), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (16215, 16225), False, 'import os\n'), ((16583, 16608), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (16598, 16608), False, 'import os\n'), ((17049, 17074), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17062, 17074), False, 'import pytest\n'), ((17088, 17123), 'batman.visualization.mesh_2D', 'mesh_2D', ([], {'fname': 'fname', 'var': 'var_sobol'}), '(fname=fname, var=var_sobol)\n', (17095, 17123), False, 'from batman.visualization import HdrBoxplot, Kiviat3D, Tree, pdf, sensitivity_indices, corr_cov, reshow, response_surface, doe, doe_ascii, pairplot, mesh_2D, cusunoro, moment_independent\n'), ((17472, 17507), 'os.path.join', 'os.path.join', (['tmp', '"""DOE_change.pdf"""'], {}), "(tmp, 'DOE_change.pdf')\n", (17484, 17507), False, 'import os\n'), ((18541, 18574), 'os.path.join', 'os.path.join', (['tmp', '"""corr_cov.pdf"""'], {}), "(tmp, 'corr_cov.pdf')\n", (18553, 18574), False, 'import os\n'), ((20043, 20070), 'numpy.exp', 'np.exp', (['(x_i[0] + 2 * x_i[1])'], {}), '(x_i[0] + 2 * x_i[1])\n', (20049, 20070), True, 'import numpy as np\n'), ((2695, 2731), 'os.path.join', 'os.path.join', (['tmp', '"""hdr_boxplot.pdf"""'], {}), "(tmp, 'hdr_boxplot.pdf')\n", (2707, 2731), False, 'import os\n'), ((2815, 2837), 'numpy.linspace', 'np.linspace', (['(1)', '(12)', '(12)'], {}), '(1, 12, 12)\n', (2826, 2837), True, 'import numpy as np\n'), ((6112, 6134), 'numpy.linspace', 'np.linspace', (['(1)', '(12)', '(12)'], {}), '(1, 12, 12)\n', (6123, 6134), True, 'import numpy as np\n'), ((6305, 6336), 'os.path.join', 'os.path.join', (['tmp', '"""f-HOPs.mp4"""'], {}), "(tmp, 'f-HOPs.mp4')\n", (6317, 6336), False, 'import os\n'), ((6375, 6406), 'os.path.join', 'os.path.join', (['tmp', '"""f-HOPs.mp4"""'], {}), "(tmp, 'f-HOPs.mp4')\n", (6387, 6406), False, 'import os\n'), ((6447, 6478), 'os.path.join', 'os.path.join', (['tmp', '"""f-HOPs.mp4"""'], {}), "(tmp, 'f-HOPs.mp4')\n", (6459, 6478), False, 'import os\n'), ((6563, 6594), 'os.path.join', 'os.path.join', (['tmp', '"""f-HOPs.mp4"""'], {}), "(tmp, 'f-HOPs.mp4')\n", (6575, 6594), False, 'import os\n'), ((6661, 6704), 'os.path.join', 'os.path.join', (['tmp', '"""song-fHOPs-samples.wav"""'], {}), "(tmp, 'song-fHOPs-samples.wav')\n", (6673, 6704), False, 'import os\n'), ((6909, 6949), 'os.path.join', 'os.path.join', (['tmp', '"""song-fHOPs-data.wav"""'], {}), "(tmp, 'song-fHOPs-data.wav')\n", (6921, 6949), False, 'import os\n'), ((7057, 7092), 'os.path.join', 'os.path.join', (['tmp', '"""song-fHOPs.wav"""'], {}), "(tmp, 'song-fHOPs.wav')\n", (7069, 7092), False, 'import os\n'), ((9555, 9586), 'os.path.join', 'os.path.join', (['tmp', '"""kiviat.pdf"""'], {}), "(tmp, 'kiviat.pdf')\n", (9567, 9586), False, 'import os\n'), ((9801, 9837), 'os.path.join', 'os.path.join', (['tmp', '"""kiviat_fill.mp4"""'], {}), "(tmp, 'kiviat_fill.mp4')\n", (9813, 9837), False, 'import os\n'), ((9872, 9903), 'os.path.join', 'os.path.join', (['tmp', '"""kiviat.mp4"""'], {}), "(tmp, 'kiviat.mp4')\n", (9884, 9903), False, 'import os\n'), ((10312, 10341), 'os.path.join', 'os.path.join', (['tmp', '"""tree.pdf"""'], {}), "(tmp, 'tree.pdf')\n", (10324, 10341), False, 'import os\n'), ((10413, 10442), 'os.path.join', 'os.path.join', (['tmp', '"""tree.mp4"""'], {}), "(tmp, 'tree.mp4')\n", (10425, 10442), False, 'import os\n'), ((11272, 11300), 'os.path.join', 'os.path.join', (['tmp', '"""pdf.pdf"""'], {}), "(tmp, 'pdf.pdf')\n", (11284, 11300), False, 'import os\n'), ((11975, 11997), 'numpy.linspace', 'np.linspace', (['(1)', '(12)', '(12)'], {}), '(1, 12, 12)\n', (11986, 11997), True, 'import numpy as np\n'), ((12083, 12114), 'os.path.join', 'os.path.join', (['tmp', '"""pdf_nd.pdf"""'], {}), "(tmp, 'pdf_nd.pdf')\n", (12095, 12114), False, 'import os\n'), ((12330, 12369), 'os.path.join', 'os.path.join', (['tmp', '"""pdf_nd_moments.pdf"""'], {}), "(tmp, 'pdf_nd_moments.pdf')\n", (12342, 12369), False, 'import os\n'), ((12482, 12518), 'os.path.join', 'os.path.join', (['tmp', '"""pdf_dotplot.pdf"""'], {}), "(tmp, 'pdf_dotplot.pdf')\n", (12494, 12518), False, 'import os\n'), ((12944, 12974), 'os.path.join', 'os.path.join', (['tmp', '"""sobol.pdf"""'], {}), "(tmp, 'sobol.pdf')\n", (12956, 12974), False, 'import os\n'), ((13444, 13478), 'os.path.join', 'os.path.join', (['tmp', '"""sobol_map.pdf"""'], {}), "(tmp, 'sobol_map.pdf')\n", (13456, 13478), False, 'import os\n'), ((17359, 17387), 'os.path.join', 'os.path.join', (['tmp', '"""DOE.pdf"""'], {}), "(tmp, 'DOE.pdf')\n", (17371, 17387), False, 'import os\n'), ((17630, 17663), 'os.path.join', 'os.path.join', (['tmp', '"""DOE_mufi.pdf"""'], {}), "(tmp, 'DOE_mufi.pdf')\n", (17642, 17663), False, 'import os\n'), ((17907, 17941), 'os.path.join', 'os.path.join', (['tmp', '"""DOE_ascii.pdf"""'], {}), "(tmp, 'DOE_ascii.pdf')\n", (17919, 17941), False, 'import os\n'), ((18127, 18160), 'os.path.join', 'os.path.join', (['tmp', '"""pairplot.pdf"""'], {}), "(tmp, 'pairplot.pdf')\n", (18139, 18160), False, 'import os\n'), ((18355, 18382), 'openturns.LHSExperiment', 'ot.LHSExperiment', (['dist', '(500)'], {}), '(dist, 500)\n', (18371, 18382), True, 'import openturns as ot\n'), ((18816, 18849), 'os.path.join', 'os.path.join', (['tmp', '"""cusunoro.pdf"""'], {}), "(tmp, 'cusunoro.pdf')\n", (18828, 18849), False, 'import os\n'), ((19499, 19542), 'os.path.join', 'os.path.join', (['tmp', '"""moment_independent.pdf"""'], {}), "(tmp, 'moment_independent.pdf')\n", (19511, 19542), False, 'import os\n'), ((20198, 20241), 'os.path.join', 'os.path.join', (['tmp', '"""moment_independent.pdf"""'], {}), "(tmp, 'moment_independent.pdf')\n", (20210, 20241), False, 'import os\n')] |
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.figure import Figure
import glob
import os
from typing import Tuple
from scipy.stats import wasserstein_distance
from scipy.cluster.hierarchy import linkage, dendrogram
from scipy.spatial.distance import pdist, squareform
import pathlib
import json
import math
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.ticker as ticker
from scipy.spatial import ConvexHull
import pandas as pd
def getFileList(path:str, extension:str) -> list:
"""Returns the files in a given path
with a given extension.
Args:
path (str): path string
extension (str): the extension of files
Returns:
list: list of file paths (string values)
"""
fileList = []
for fn in os.listdir(path):
if extension in fn:
fileList.append('{0}/{1}'.format(path, fn))
fileList.sort()
return fileList
def gammaAdjust(im:np.ndarray, gamma:float) -> np.ndarray:
"""Adjust the gamma of an image
Args:
im (numpy.ndarray): opnecv image
gamma (float): gamma value
Returns:
numpy.ndarray: opencv imge
"""
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
return cv2.LUT(im, table)
def rgb2hsv0(img:np.ndarray) -> np.ndarray:
"""Fast rgb to hsv conversion using numpy arrays
Args:
img (numpy.ndarray): numpy array representation of cv2 RGB image
Returns:
numpy.ndarray: HSV image packed in 3D numpy array
"""
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
h = hsv[:,:,0] * 2
s = hsv[:,:,1] / 2.55
v = hsv[:,:,2] / 2.55
return np.array([h, s, v]).transpose(1, -1, 0)
def hsvFilter(img:np.ndarray, hSlice=(0,360), sSlice=(0,100), vSlice=(0,100)) -> np.ndarray:
imm = img.copy()
hsv = rgb2hsv0(img)
if hSlice[0] != 0 or hSlice[1] != 360:
mask = np.logical_and((hsv[...,0] >= hSlice[0]), (hsv[...,0] < hSlice[1]))
imm[np.logical_not(mask)] = 0
if sSlice[0] != 0 or sSlice[1] != 100:
mask = np.logical_and((hsv[...,1] >= sSlice[0]), (hsv[...,1] < sSlice[1]))
imm[np.logical_not(mask)] = 0
if vSlice[0] != 0 or vSlice[1] != 100:
mask = np.logical_and((hsv[...,2] >= vSlice[0]), (hsv[...,2] < vSlice[1]))
imm[np.logical_not(mask)] = 0
return imm
def hsvHistogramScatter(img, bins:list) -> dict:
"""Gets RGB image, converts it to HSV, bins coordinates
and returns this binned coordinate list in a dict.
This function generates histograms for fast plotting.
Args:
img (numpy.ndarray): cv2 image
bins (list): bin size list in the form of [h, s, v]
Returns:
dict: keys->3D coordinates as tuples, values->counts
"""
hsv = rgb2hsv0(img)
hsvb = np.floor(hsv / np.array(bins)) * np.array(bins)
hsvScatter = {}
for i in range(0, hsvb.shape[0]):
for j in range(0, hsvb.shape[1]):
hsvc = (hsvb[i,j,0], hsvb[i,j,1], hsvb[i,j,2])
if hsvc not in hsvScatter:
hsvScatter[hsvc] = 1
else:
hsvScatter[hsvc] = hsvScatter[hsvc] +1
return hsvScatter
def hsvHistogramCubic(img, bins:list) -> np.ndarray:
"""Generates 3D HSV histogram of an image. Given HSV ranges are binned and
the histogram is returned as a matrix in the form of 3D numpy array.
This form of histogram is suitable for distance matrix calculations by using EMD
Args:
img (numpy.ndarray): cv2 image
bins (list): bin size list for h, s and v components
Returns:
np.ndarray: 3D histogram matrix
"""
hRange = (0,360)
sRange = (0,100)
vRange = (0, 20)
hsv = rgb2hsv0(img)
hbins = int((hRange[1] - hRange[0]) / bins[0])
sbins = int((sRange[1] - sRange[0]) / bins[1])
vbins = int((vRange[1] - vRange[0]) / bins[2])
histogram = [[[0 for v in range(0, vbins)] for s in range(0, sbins)] for h in range(0, hbins)]
hsvb = np.floor(hsv / np.array(bins))
for i in range(0, hsvb.shape[0]):
for j in range(0, hsvb.shape[1]):
h = int(hsvb[i,j,0])
if h >= hbins:
h -= 1
s = int(hsvb[i,j,1])
if s >= sbins:
s -= 1
v = int(hsvb[i,j,1])
if v >= vbins:
v -= 1
histogram[h][s][v] += 1
hr = [h for h in range(hRange[0], hRange[1], bins[0])]
sr = [s for s in range(sRange[0], sRange[1], bins[1])]
vr = [v for v in range(vRange[0], vRange[1], bins[2])]
return (np.array(histogram), hr, sr, vr)
def plotHistogram3D(hist:dict, axs:mpl.axes, rot=(10,0)) -> (list, list, list):
hc = []
sc = []
vc = []
cl = []
cn = []
for k,v in hist.items():
hc.append(k[0])
sc.append(k[1])
vc.append(k[2])
cl.append(k[0]*100/360)
cn.append(math.log(v)*32)
axs.scatter(hc, sc, vc, s=cn, c=cl, cmap='hsv')
axs.set_xlabel('hue', fontsize=20, rotation=0)
axs.set_ylabel('saturation', fontsize=20, rotation=0)
axs.set_zlabel('value', fontsize=20, rotation=60)
axs.view_init(*rot)
return (hc, sc, vc)
def filterScatterHist(scatterHist:dict, hSlice=(0,360), sSlice=(0,255), vSlice=(0,255), cSlice=(0,1000000000)) -> dict:
outHist = {}
for k,v in scatterHist.items():
if k[0] >= hSlice[0] and k[0] < hSlice[1] and k[1] >= sSlice[0] and k[1] < sSlice[1] and k[2] >= vSlice[0] and k[2] < vSlice[1]:
if v >= cSlice[0] and v < cSlice[1]:
outHist[k] = v
return outHist
def hist3Dpanel(img:np.ndarray, fig:Figure, bins=[18,5,1]) -> dict:
axs = fig.add_subplot(221)
axs.imshow(gammaAdjust(img, 2.5))
hsv = hsvHistogramScatter(img, bins)
axs = fig.add_subplot(224, projection='3d')
plotHistogram3D(hsv, axs, rot=(5, 45+270))
axs = fig.add_subplot(222, projection='3d')
plotHistogram3D(hsv, axs, rot=(5, 270))
axs = fig.add_subplot(223, projection='3d')
plotHistogram3D(hsv, axs, rot=(5, 0))
plt.show()
return hsv
def convexHull(scatterHist:dict, fig:Figure, pointSize=(18,5,1), hSlice=(0,360), sSlice=(0,255), vSlice=(0,255), cmin=0) -> (float, float):
h = filterScatterHist(scatterHist, hSlice, sSlice, vSlice, cSlice=(cmin, 1E12))
coords = []
pixels = 0
for k,v in h.items():
coords.append(list(k))
k2 = []
for i, j in zip(k, pointSize):
k2.append(i+j)
coords.append(k2)
pixels += v
ch = ConvexHull(np.array(coords), qhull_options='QJ')
axs = fig.add_subplot(221, projection='3d')
hc, sc, vc = plotHistogram3D(h, axs, rot=(0,270))
axs.plot(hc, sc, vc)
axs = fig.add_subplot(222, projection='3d')
plotHistogram3D(h, axs, rot=(0,0))
axs = fig.add_subplot(223, projection='3d')
plotHistogram3D(h, axs, rot=(90,270))
axs = fig.add_subplot(224, projection='3d')
plotHistogram3D(h, axs, rot=(10,300))
return (ch.volume, pixels)
def batchConvexHull(scatterHistSet:dict, outdir:str, pointSize=(18,5,1), hSlice=(0,360), sSlice=(0,255), vSlice=(0,255), cmin=0) -> pd.DataFrame:
volNdist = []
for k,v in scatterHistSet.items():
fig = plt.figure()
fig.set_size_inches(w=24, h=24)
ch, p = convexHull(v, fig, pointSize, hSlice, sSlice, vSlice, cmin=10)
fig.suptitle('{0} Vch:{1} pix:{2}'.format(k, ch, p), fontsize=32)
fig.savefig('{0}/{1}_convexHull.png'.format(outdir, k))
plt.show()
plt.close(fig)
volNdist.append({'name': k, 'V_ch': ch, 'pixels': p})
return pd.DataFrame(volNdist)
def processImage(fname:str, outpath:str, bins=[18,5,1]) -> (dict, np.ndarray):
im = cv2.imread(fname)
imgtitle = '{0}'.format(pathlib.Path(fname).stem)
print('processing: {0}'.format(imgtitle))
fig:Figure = plt.figure()
fig.set_size_inches(w=24, h=24)
fig.suptitle(imgtitle, fontsize=32)
scatterHist = hist3Dpanel(im, fig, bins)
fig.savefig('{0}/{1}_histograms.png'.format(outpath, imgtitle))
plt.show()
plt.close(fig)
cubicHist, hc, sc, vc = hsvHistogramCubic(im, bins)
return cubicHist, scatterHist
def batchProcessImages(sourcepath:str, ext:str, outpath:str) -> dict:
hhCubic = {}
hhScatter = {}
for f in getFileList(sourcepath, ext):
print(f)
cubicHist, scatterHist = processImage(f, outpath)
hhCubic[pathlib.Path(f).stem] = cubicHist
hhScatter[pathlib.Path(f).stem] = scatterHist
return hhCubic, hhScatter
def emd(hist1:np.ndarray, hist2:np.ndarray) -> float:
"""Calculates earth movers distance between two histograms
(either 1D or 2D). If the histograms are in 2D, they are flattened.
Wasserstein distance between the histograms is returned.
Args:
hist1 (numpy.ndarray): Histogram1 (1D/2D)
hist2 (numpy.ndarray): Histogram2 (1D/2D)
Returns:
float: Wasserstein distance
"""
if len(hist1.shape) > 1:
hist1 = hist1.flatten()
if len(hist2.shape) > 1:
hist2 = hist2.flatten()
return wasserstein_distance(hist1, hist2)
def pairwiseEmd(histograms:list) -> list:
matrix = []
for i in range(len(histograms)):
for j in range(i+1, len(histograms)):
wd = emd(histograms[i], histograms[j])
matrix.append(wd)
return matrix
def heatmap(histograms:dict, outpath:str):
names = []
hists = []
for k,v in histograms.items():
names.append(k)
hists.append(v)
e = pairwiseEmd(hists)
Y = linkage(e, method='single', metric='braycurtis')
sqdist = squareform(e)
# heatmap
fig = plt.figure()
hmsize = len(names)
fig.set_size_inches(w=hmsize, h=hmsize)
leftDendAxs = fig.add_axes([0, 0, 0.15, 0.9]) # [left, bottom, width, height]
Z = dendrogram(Y, labels=names, orientation='left')
ssqdist = sqdist[:,Z['leaves']]
ssqdist = ssqdist[Z['leaves'],:]
rightHeatmapAxs = fig.add_axes([0.15, 0, 0.9, 0.9])
rightHeatmapAxs.matshow(ssqdist, origin='bottom', cmap='afmhot')
rightHeatmapAxs.set_xticklabels(['']+Z['ivl'], rotation=90, size='30')
rightHeatmapAxs.yaxis.set_ticks_position('right')
rightHeatmapAxs.set_yticklabels(['']+Z['ivl'], size='30')
rightHeatmapAxs.xaxis.set_major_locator(ticker.MultipleLocator(1))
rightHeatmapAxs.yaxis.set_major_locator(ticker.MultipleLocator(1))
fig.savefig('{0}/heatmap.png'.format(outpath))
plt.show()
plt.close(fig)
# dendrogram
fig = plt.figure()
fig.set_size_inches(w=hmsize, h=hmsize/2)
Z = dendrogram(Y, labels=names, leaf_font_size=30, leaf_rotation=90)
fig.savefig('{0}/dendrogram.png'.format(outpath))
plt.show()
plt.close(fig)
| [
"numpy.logical_not",
"math.log",
"numpy.array",
"numpy.arange",
"os.listdir",
"pathlib.Path",
"matplotlib.pyplot.close",
"scipy.stats.wasserstein_distance",
"scipy.cluster.hierarchy.linkage",
"pandas.DataFrame",
"cv2.LUT",
"scipy.spatial.distance.squareform",
"cv2.cvtColor",
"cv2.imread",
... | [((814, 830), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (824, 830), False, 'import os\n'), ((1338, 1356), 'cv2.LUT', 'cv2.LUT', (['im', 'table'], {}), '(im, table)\n', (1345, 1356), False, 'import cv2\n'), ((1630, 1666), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HSV'], {}), '(img, cv2.COLOR_RGB2HSV)\n', (1642, 1666), False, 'import cv2\n'), ((6163, 6173), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6171, 6173), True, 'import matplotlib.pyplot as plt\n'), ((7722, 7744), 'pandas.DataFrame', 'pd.DataFrame', (['volNdist'], {}), '(volNdist)\n', (7734, 7744), True, 'import pandas as pd\n'), ((7835, 7852), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (7845, 7852), False, 'import cv2\n'), ((7970, 7982), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7980, 7982), True, 'import matplotlib.pyplot as plt\n'), ((8176, 8186), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8184, 8186), True, 'import matplotlib.pyplot as plt\n'), ((8191, 8205), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (8200, 8205), True, 'import matplotlib.pyplot as plt\n'), ((9211, 9245), 'scipy.stats.wasserstein_distance', 'wasserstein_distance', (['hist1', 'hist2'], {}), '(hist1, hist2)\n', (9231, 9245), False, 'from scipy.stats import wasserstein_distance\n'), ((9681, 9729), 'scipy.cluster.hierarchy.linkage', 'linkage', (['e'], {'method': '"""single"""', 'metric': '"""braycurtis"""'}), "(e, method='single', metric='braycurtis')\n", (9688, 9729), False, 'from scipy.cluster.hierarchy import linkage, dendrogram\n'), ((9743, 9756), 'scipy.spatial.distance.squareform', 'squareform', (['e'], {}), '(e)\n', (9753, 9756), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((9781, 9793), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9791, 9793), True, 'import matplotlib.pyplot as plt\n'), ((9952, 9999), 'scipy.cluster.hierarchy.dendrogram', 'dendrogram', (['Y'], {'labels': 'names', 'orientation': '"""left"""'}), "(Y, labels=names, orientation='left')\n", (9962, 9999), False, 'from scipy.cluster.hierarchy import linkage, dendrogram\n'), ((10586, 10596), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10594, 10596), True, 'import matplotlib.pyplot as plt\n'), ((10601, 10615), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (10610, 10615), True, 'import matplotlib.pyplot as plt\n'), ((10643, 10655), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10653, 10655), True, 'import matplotlib.pyplot as plt\n'), ((10710, 10774), 'scipy.cluster.hierarchy.dendrogram', 'dendrogram', (['Y'], {'labels': 'names', 'leaf_font_size': '(30)', 'leaf_rotation': '(90)'}), '(Y, labels=names, leaf_font_size=30, leaf_rotation=90)\n', (10720, 10774), False, 'from scipy.cluster.hierarchy import linkage, dendrogram\n'), ((10833, 10843), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10841, 10843), True, 'import matplotlib.pyplot as plt\n'), ((10848, 10862), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (10857, 10862), True, 'import matplotlib.pyplot as plt\n'), ((1992, 2057), 'numpy.logical_and', 'np.logical_and', (['(hsv[..., 0] >= hSlice[0])', '(hsv[..., 0] < hSlice[1])'], {}), '(hsv[..., 0] >= hSlice[0], hsv[..., 0] < hSlice[1])\n', (2006, 2057), True, 'import numpy as np\n'), ((2156, 2221), 'numpy.logical_and', 'np.logical_and', (['(hsv[..., 1] >= sSlice[0])', '(hsv[..., 1] < sSlice[1])'], {}), '(hsv[..., 1] >= sSlice[0], hsv[..., 1] < sSlice[1])\n', (2170, 2221), True, 'import numpy as np\n'), ((2320, 2385), 'numpy.logical_and', 'np.logical_and', (['(hsv[..., 2] >= vSlice[0])', '(hsv[..., 2] < vSlice[1])'], {}), '(hsv[..., 2] >= vSlice[0], hsv[..., 2] < vSlice[1])\n', (2334, 2385), True, 'import numpy as np\n'), ((2931, 2945), 'numpy.array', 'np.array', (['bins'], {}), '(bins)\n', (2939, 2945), True, 'import numpy as np\n'), ((4686, 4705), 'numpy.array', 'np.array', (['histogram'], {}), '(histogram)\n', (4694, 4705), True, 'import numpy as np\n'), ((6651, 6667), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (6659, 6667), True, 'import numpy as np\n'), ((7333, 7345), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7343, 7345), True, 'import matplotlib.pyplot as plt\n'), ((7615, 7625), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7623, 7625), True, 'import matplotlib.pyplot as plt\n'), ((7634, 7648), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (7643, 7648), True, 'import matplotlib.pyplot as plt\n'), ((10433, 10458), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (10455, 10458), True, 'import matplotlib.ticker as ticker\n'), ((10504, 10529), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (10526, 10529), True, 'import matplotlib.ticker as ticker\n'), ((1753, 1772), 'numpy.array', 'np.array', (['[h, s, v]'], {}), '([h, s, v])\n', (1761, 1772), True, 'import numpy as np\n'), ((2072, 2092), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (2086, 2092), True, 'import numpy as np\n'), ((2236, 2256), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (2250, 2256), True, 'import numpy as np\n'), ((2400, 2420), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (2414, 2420), True, 'import numpy as np\n'), ((4115, 4129), 'numpy.array', 'np.array', (['bins'], {}), '(bins)\n', (4123, 4129), True, 'import numpy as np\n'), ((7881, 7900), 'pathlib.Path', 'pathlib.Path', (['fname'], {}), '(fname)\n', (7893, 7900), False, 'import pathlib\n'), ((2913, 2927), 'numpy.array', 'np.array', (['bins'], {}), '(bins)\n', (2921, 2927), True, 'import numpy as np\n'), ((5012, 5023), 'math.log', 'math.log', (['v'], {}), '(v)\n', (5020, 5023), False, 'import math\n'), ((8538, 8553), 'pathlib.Path', 'pathlib.Path', (['f'], {}), '(f)\n', (8550, 8553), False, 'import pathlib\n'), ((8590, 8605), 'pathlib.Path', 'pathlib.Path', (['f'], {}), '(f)\n', (8602, 8605), False, 'import pathlib\n'), ((1291, 1308), 'numpy.arange', 'np.arange', (['(0)', '(256)'], {}), '(0, 256)\n', (1300, 1308), True, 'import numpy as np\n')] |
import random, os, glob, sys, shutil
import numpy as np
import torch
import logging
import json
import transformers
def add_filehandler_for_logger(output_path, logger, out_name="train"):
logFormatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s')
fileHandler = logging.FileHandler(os.path.join(output_path, f"{out_name}.log"), mode="a")
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
def set_seed(seed, n_gpu):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(seed)
def get_existing_cks(output_path, best_ck=False, return_best_ck=True):
cks_already = [name for name in os.listdir(output_path) if os.path.isdir(os.path.join(output_path, name))]
if best_ck:
for ex in [each for each in cks_already if each.startswith("best")]:
cks_already.remove(ex)
shutil.rmtree(os.path.join(output_path, ex))
index2path = {}
for each_ck in cks_already:
if return_best_ck or not each_ck.startswith("best"):
index2path[int(os.path.basename(each_ck).split("_")[-1])] = os.path.join(output_path, each_ck)
sorted_indices = sorted(index2path) # index here refers to the epoch number
return sorted_indices, index2path
def save_ck(args, logger, model, tokenizer, steps=0, tag="epoch", best_ck=False):
sorted_indices, index2path = get_existing_cks(args.output_path, best_ck=best_ck)
if len(sorted_indices) >= args.keep_ck_num:
logger.info(
f"there are already {len(sorted_indices)} checkpoints saved that will be more than keep_ck_num={args.keep_ck_num}")
logger.info(f"hence, remove the oldest one: {index2path[sorted_indices[0]]}")
shutil.rmtree(
index2path[sorted_indices[0]]) # remove the oldest checkpoint, i.e., the one with the lowest epoch number
if best_ck:
logger.info(
f'save best model weights and tokenizer to {os.path.join(args.output_path, f"best_ck_at_{tag}_{steps}.h5")}')
tokenizer.save_pretrained(os.path.join(args.output_path, f"best_ck_at_{tag}_{steps}"))
if isinstance(model, torch.nn.DataParallel):
model.module.save_pretrained(os.path.join(args.output_path, f"best_ck_at_{tag}_{steps}"))
else:
model.save_pretrained(os.path.join(args.output_path, f"best_ck_at_{tag}_{steps}"))
else:
logger.info(
f'save model weights and tokenizer to {os.path.join(args.output_path, f"ck_at_{tag}_{steps}")}')
tokenizer.save_pretrained(os.path.join(args.output_path, f"ck_at_{tag}_{steps}"))
if isinstance(model, torch.nn.DataParallel):
model.module.save_pretrained(os.path.join(args.output_path, f"ck_at_{tag}_{steps}"))
else:
model.save_pretrained(os.path.join(args.output_path, f"ck_at_{tag}_{steps}"))
def save_and_check_if_early_stop(eval_score, args, logger, model, tokenizer, steps=0, tag="epoch"):
logger.info("\n")
logger.info(f"*******eval at {tag} = {steps}*********")
logger.info(f"val_{args.eval_on}: {eval_score}")
if args.eval_on == "acc":
if eval_score >= args.best:
args.wait = 0
args.best = eval_score
logger.info(f"so far the best check point at {tag}={steps} based on eval_on {args.eval_on}")
save_ck(args, logger, model, tokenizer, steps=steps, tag=tag, best_ck=True)
else:
args.wait += 1
else:
raise ValueError("not support yet")
logger.info(f"best so far({args.eval_on}): {args.best}")
logger.info(f"early stop count: {args.wait}/{args.patience}")
save_ck(args, logger, model, tokenizer, steps=steps, tag=tag, best_ck=False)
if args.wait >= args.patience:
logger.info("run out of patience, early stop")
return True
return False
def check_output_path(output_path, force=False):
if os.path.isdir(output_path):
if force:
print(f"{output_path} exists, remove it as force=True")
shutil.rmtree(output_path)
os.makedirs(output_path, exist_ok=True)
else:
out = input(
"Output directory ({}) already exists and is not empty, you wanna remove it before start training? (y/n)".format(
output_path))
if out.lower() == "y":
shutil.rmtree(output_path)
os.makedirs(output_path, exist_ok=True)
else:
sys.exit(0)
else:
print(f"{output_path} not found, create it now")
os.makedirs(output_path, exist_ok=True)
def get_scheduler(optimizer, scheduler: str, warmup_steps: int, num_total: int):
assert scheduler in ["constantlr", "warmuplinear", "warmupconstant", "warmupcosine",
"warmupcosinewithhardrestarts"], (
'scheduler should be one of ["constantlr","warmupconstant","warmupcosine","warmupcosinewithhardrestarts"]')
if scheduler == 'constantlr':
return transformers.get_constant_schedule(optimizer)
elif scheduler == 'warmupconstant':
return transformers.get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)
elif scheduler == 'warmuplinear':
return transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
num_training_steps=num_total)
elif scheduler == 'warmupcosine':
return transformers.get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
num_training_steps=num_total)
elif scheduler == 'warmupcosinewithhardrestarts':
return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_total)
def get_optimizer(args, optim_groups):
args.optimizer = args.__dict__.pop("optimizer", "adamw").lower()
assert args.optimizer in ["adamw", "adam", "adagrad", "adadelta", "sgd"], (
'optimizer now only supports ["adamw", "adam", "adagrad", "adadelta", "sgd"]')
if args.optimizer == 'adam':
args.adam_eps = args.__dict__.pop("adam_eps", 1e-8)
args.adam_betas = args.__dict__.pop("adam_betas", (0.9, 0.95))
return torch.optim.Adam(optim_groups, lr=args.lr, eps=args.adam_eps, betas=args.adam_betas)
elif args.optimizer == 'adagrad':
args.adagrad_lr_decay = args.__dict__.pop("adagrad_lr_decay", 0)
args.adagrad_eps = args.__dict__.pop("adagrad_eps", 1e-10)
return torch.optim.Adagrad(optim_groups, lr=args.lr, lr_decay=args.adagrad_lr_decay,
eps=args.__dict__.pop("adagrad_eps", 1e-10))
elif args.optimizer == 'adadelta':
args.adadelta_eps = args.__dict__.pop("adadelta_eps", 1e-10)
return torch.optim.Adadelta(optim_groups, lr=args.lr, eps=args.adadelta_eps)
elif args.optimizer == 'sgd':
args.sgd_momentum = args.__dict__.pop("sgd_momentum", 0)
return torch.optim.SGD(optim_groups, lr=args.lr, momentum=args.sgd_momentum)
else:
args.adamw_eps = args.__dict__.pop("adamw_eps", 1e-8)
args.adamw_betas = args.__dict__.pop("adamw_betas", (0.9, 0.95))
return torch.optim.AdamW(optim_groups, lr=args.lr, eps=args.adamw_eps, betas=args.adamw_betas)
def write_args(args):
with open(os.path.join(args.output_path, "args.json"), "w") as f:
print(json.dumps(args.__dict__, indent=2))
f.write(json.dumps(args.__dict__, indent=2))
def print_model_state_dict(model, logger):
for param_tensor in model.state_dict():
logger.info(f"{param_tensor}\t{model.state_dict()[param_tensor].size()}")
def count_params(model, logger, print_details=False):
trainable_count = 0
total_count = 0
if isinstance(model, torch.nn.Sequential):
for index in model._modules:
if print_details:
print_model_state_dict(model._modules[index], logger)
logger.info(model._modules[index])
trainable_count += sum(p.numel() for p in model._modules[index].parameters() if p.requires_grad)
total_count += sum(p.numel() for p in model._modules[index].parameters())
else:
if print_details:
print_model_state_dict(model, logger)
logger.info(model)
total_count = sum(p.numel() for p in model.parameters())
trainable_count = sum(p.numel() for p in model.parameters() if p.requires_grad)
logger.info(f' Model Total params: {total_count}')
logger.info(f' Model Trainable params: {trainable_count}')
logger.info(f' Model Non-trainable params: {total_count - trainable_count}')
| [
"transformers.get_constant_schedule_with_warmup",
"sys.exit",
"os.listdir",
"transformers.get_constant_schedule",
"json.dumps",
"os.path.isdir",
"numpy.random.seed",
"torch.optim.Adadelta",
"torch.optim.SGD",
"transformers.get_cosine_with_hard_restarts_schedule_with_warmup",
"torch.cuda.manual_s... | [((207, 314), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s"""'], {}), "(\n '%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s'\n )\n", (224, 314), False, 'import logging\n'), ((509, 526), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (520, 526), False, 'import random, os, glob, sys, shutil\n'), ((531, 551), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (545, 551), True, 'import numpy as np\n'), ((556, 579), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (573, 579), False, 'import torch\n'), ((3990, 4016), 'os.path.isdir', 'os.path.isdir', (['output_path'], {}), '(output_path)\n', (4003, 4016), False, 'import random, os, glob, sys, shutil\n'), ((343, 387), 'os.path.join', 'os.path.join', (['output_path', 'f"""{out_name}.log"""'], {}), "(output_path, f'{out_name}.log')\n", (355, 387), False, 'import random, os, glob, sys, shutil\n'), ((606, 638), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (632, 638), False, 'import torch\n'), ((1809, 1853), 'shutil.rmtree', 'shutil.rmtree', (['index2path[sorted_indices[0]]'], {}), '(index2path[sorted_indices[0]])\n', (1822, 1853), False, 'import random, os, glob, sys, shutil\n'), ((4653, 4692), 'os.makedirs', 'os.makedirs', (['output_path'], {'exist_ok': '(True)'}), '(output_path, exist_ok=True)\n', (4664, 4692), False, 'import random, os, glob, sys, shutil\n'), ((5089, 5134), 'transformers.get_constant_schedule', 'transformers.get_constant_schedule', (['optimizer'], {}), '(optimizer)\n', (5123, 5134), False, 'import transformers\n'), ((6558, 6647), 'torch.optim.Adam', 'torch.optim.Adam', (['optim_groups'], {'lr': 'args.lr', 'eps': 'args.adam_eps', 'betas': 'args.adam_betas'}), '(optim_groups, lr=args.lr, eps=args.adam_eps, betas=args.\n adam_betas)\n', (6574, 6647), False, 'import torch\n'), ((747, 770), 'os.listdir', 'os.listdir', (['output_path'], {}), '(output_path)\n', (757, 770), False, 'import random, os, glob, sys, shutil\n'), ((1195, 1229), 'os.path.join', 'os.path.join', (['output_path', 'each_ck'], {}), '(output_path, each_ck)\n', (1207, 1229), False, 'import random, os, glob, sys, shutil\n'), ((2137, 2196), 'os.path.join', 'os.path.join', (['args.output_path', 'f"""best_ck_at_{tag}_{steps}"""'], {}), "(args.output_path, f'best_ck_at_{tag}_{steps}')\n", (2149, 2196), False, 'import random, os, glob, sys, shutil\n'), ((2636, 2690), 'os.path.join', 'os.path.join', (['args.output_path', 'f"""ck_at_{tag}_{steps}"""'], {}), "(args.output_path, f'ck_at_{tag}_{steps}')\n", (2648, 2690), False, 'import random, os, glob, sys, shutil\n'), ((4116, 4142), 'shutil.rmtree', 'shutil.rmtree', (['output_path'], {}), '(output_path)\n', (4129, 4142), False, 'import random, os, glob, sys, shutil\n'), ((4155, 4194), 'os.makedirs', 'os.makedirs', (['output_path'], {'exist_ok': '(True)'}), '(output_path, exist_ok=True)\n', (4166, 4194), False, 'import random, os, glob, sys, shutil\n'), ((5190, 5283), 'transformers.get_constant_schedule_with_warmup', 'transformers.get_constant_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'warmup_steps'}), '(optimizer, num_warmup_steps=\n warmup_steps)\n', (5236, 5283), False, 'import transformers\n'), ((7656, 7699), 'os.path.join', 'os.path.join', (['args.output_path', '"""args.json"""'], {}), "(args.output_path, 'args.json')\n", (7668, 7699), False, 'import random, os, glob, sys, shutil\n'), ((7726, 7761), 'json.dumps', 'json.dumps', (['args.__dict__'], {'indent': '(2)'}), '(args.__dict__, indent=2)\n', (7736, 7761), False, 'import json\n'), ((7779, 7814), 'json.dumps', 'json.dumps', (['args.__dict__'], {'indent': '(2)'}), '(args.__dict__, indent=2)\n', (7789, 7814), False, 'import json\n'), ((788, 819), 'os.path.join', 'os.path.join', (['output_path', 'name'], {}), '(output_path, name)\n', (800, 819), False, 'import random, os, glob, sys, shutil\n'), ((977, 1006), 'os.path.join', 'os.path.join', (['output_path', 'ex'], {}), '(output_path, ex)\n', (989, 1006), False, 'import random, os, glob, sys, shutil\n'), ((2292, 2351), 'os.path.join', 'os.path.join', (['args.output_path', 'f"""best_ck_at_{tag}_{steps}"""'], {}), "(args.output_path, f'best_ck_at_{tag}_{steps}')\n", (2304, 2351), False, 'import random, os, glob, sys, shutil\n'), ((2401, 2460), 'os.path.join', 'os.path.join', (['args.output_path', 'f"""best_ck_at_{tag}_{steps}"""'], {}), "(args.output_path, f'best_ck_at_{tag}_{steps}')\n", (2413, 2460), False, 'import random, os, glob, sys, shutil\n'), ((2786, 2840), 'os.path.join', 'os.path.join', (['args.output_path', 'f"""ck_at_{tag}_{steps}"""'], {}), "(args.output_path, f'ck_at_{tag}_{steps}')\n", (2798, 2840), False, 'import random, os, glob, sys, shutil\n'), ((2890, 2944), 'os.path.join', 'os.path.join', (['args.output_path', 'f"""ck_at_{tag}_{steps}"""'], {}), "(args.output_path, f'ck_at_{tag}_{steps}')\n", (2902, 2944), False, 'import random, os, glob, sys, shutil\n'), ((4449, 4475), 'shutil.rmtree', 'shutil.rmtree', (['output_path'], {}), '(output_path)\n', (4462, 4475), False, 'import random, os, glob, sys, shutil\n'), ((4492, 4531), 'os.makedirs', 'os.makedirs', (['output_path'], {'exist_ok': '(True)'}), '(output_path, exist_ok=True)\n', (4503, 4531), False, 'import random, os, glob, sys, shutil\n'), ((4566, 4577), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4574, 4577), False, 'import random, os, glob, sys, shutil\n'), ((5332, 5453), 'transformers.get_linear_schedule_with_warmup', 'transformers.get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'warmup_steps', 'num_training_steps': 'num_total'}), '(optimizer, num_warmup_steps=\n warmup_steps, num_training_steps=num_total)\n', (5376, 5453), False, 'import transformers\n'), ((7117, 7186), 'torch.optim.Adadelta', 'torch.optim.Adadelta', (['optim_groups'], {'lr': 'args.lr', 'eps': 'args.adadelta_eps'}), '(optim_groups, lr=args.lr, eps=args.adadelta_eps)\n', (7137, 7186), False, 'import torch\n'), ((2037, 2099), 'os.path.join', 'os.path.join', (['args.output_path', 'f"""best_ck_at_{tag}_{steps}.h5"""'], {}), "(args.output_path, f'best_ck_at_{tag}_{steps}.h5')\n", (2049, 2099), False, 'import random, os, glob, sys, shutil\n'), ((2544, 2598), 'os.path.join', 'os.path.join', (['args.output_path', 'f"""ck_at_{tag}_{steps}"""'], {}), "(args.output_path, f'ck_at_{tag}_{steps}')\n", (2556, 2598), False, 'import random, os, glob, sys, shutil\n'), ((5562, 5683), 'transformers.get_cosine_schedule_with_warmup', 'transformers.get_cosine_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'warmup_steps', 'num_training_steps': 'num_total'}), '(optimizer, num_warmup_steps=\n warmup_steps, num_training_steps=num_total)\n', (5606, 5683), False, 'import transformers\n'), ((7301, 7370), 'torch.optim.SGD', 'torch.optim.SGD', (['optim_groups'], {'lr': 'args.lr', 'momentum': 'args.sgd_momentum'}), '(optim_groups, lr=args.lr, momentum=args.sgd_momentum)\n', (7316, 7370), False, 'import torch\n'), ((7531, 7623), 'torch.optim.AdamW', 'torch.optim.AdamW', (['optim_groups'], {'lr': 'args.lr', 'eps': 'args.adamw_eps', 'betas': 'args.adamw_betas'}), '(optim_groups, lr=args.lr, eps=args.adamw_eps, betas=args.\n adamw_betas)\n', (7548, 7623), False, 'import torch\n'), ((5808, 5947), 'transformers.get_cosine_with_hard_restarts_schedule_with_warmup', 'transformers.get_cosine_with_hard_restarts_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'warmup_steps', 'num_training_steps': 'num_total'}), '(optimizer,\n num_warmup_steps=warmup_steps, num_training_steps=num_total)\n', (5871, 5947), False, 'import transformers\n'), ((1150, 1175), 'os.path.basename', 'os.path.basename', (['each_ck'], {}), '(each_ck)\n', (1166, 1175), False, 'import random, os, glob, sys, shutil\n')] |
# setup.py
# only if building in place: ``python setup.py build_ext --inplace``
import os
import sys
import platform
import glob
from setuptools import setup, find_packages
from numpy.distutils.core import setup, Extension
os.environ['NPY_DISTUTILS_APPEND_FLAGS'] = '1'
#if os.name == 'nt': # Windows.
# extra_compile_args = ['/TC', '/D', 'ANSI'] # for msvs
# # TODO: Not with Anaconda MINGW
#else:
#extra_compile_args = ''
froot = 'pyframe3dd' + os.sep + 'src' + os.sep
pyframeExt = Extension('pyframe3dd._pyframe3dd', sources=[froot+'py_HPGmatrix.c',
froot+'HPGutil.c',
froot+'NRutil.c',
froot+'coordtrans.c',
froot+'preframe.c',
froot+'py_eig.c',
froot+'py_frame3dd.c',
froot+'py_io.c',
froot+'py_main.c'])
setup(
name='pyFrame3DD',
version='1.1.1',
description='Python bindings to Frame3DD',
author='NREL WISDEM Team',
author_email='<EMAIL>',
#package_dir={'': 'src'},
#py_modules=['pyframe3dd'],
package_data={'pyframe3dd': []},
packages=['pyframe3dd'],
license='Apache License, Version 2.0',
ext_modules=[pyframeExt],
zip_safe=False
)
| [
"numpy.distutils.core.Extension",
"numpy.distutils.core.setup"
] | [((496, 752), 'numpy.distutils.core.Extension', 'Extension', (['"""pyframe3dd._pyframe3dd"""'], {'sources': "[froot + 'py_HPGmatrix.c', froot + 'HPGutil.c', froot + 'NRutil.c', froot +\n 'coordtrans.c', froot + 'preframe.c', froot + 'py_eig.c', froot +\n 'py_frame3dd.c', froot + 'py_io.c', froot + 'py_main.c']"}), "('pyframe3dd._pyframe3dd', sources=[froot + 'py_HPGmatrix.c', \n froot + 'HPGutil.c', froot + 'NRutil.c', froot + 'coordtrans.c', froot +\n 'preframe.c', froot + 'py_eig.c', froot + 'py_frame3dd.c', froot +\n 'py_io.c', froot + 'py_main.c'])\n", (505, 752), False, 'from numpy.distutils.core import setup, Extension\n'), ((1099, 1391), 'numpy.distutils.core.setup', 'setup', ([], {'name': '"""pyFrame3DD"""', 'version': '"""1.1.1"""', 'description': '"""Python bindings to Frame3DD"""', 'author': '"""NREL WISDEM Team"""', 'author_email': '"""<EMAIL>"""', 'package_data': "{'pyframe3dd': []}", 'packages': "['pyframe3dd']", 'license': '"""Apache License, Version 2.0"""', 'ext_modules': '[pyframeExt]', 'zip_safe': '(False)'}), "(name='pyFrame3DD', version='1.1.1', description=\n 'Python bindings to Frame3DD', author='NREL WISDEM Team', author_email=\n '<EMAIL>', package_data={'pyframe3dd': []}, packages=['pyframe3dd'],\n license='Apache License, Version 2.0', ext_modules=[pyframeExt],\n zip_safe=False)\n", (1104, 1391), False, 'from numpy.distutils.core import setup, Extension\n')] |
import math
import numpy as np
from time import time
from srauv_settings import SETTINGS
thruster_min_change_time = 0.5 # 500ms
base_speed = 20 # a guess
if SETTINGS["hardware"]["coral"] is True:
from pycoral.utils import edgetpu
else:
import tensorflow as tf
class AutoPilot:
def __init__(self, tel_msg: dict):
if SETTINGS["hardware"]["coral"] is True:
self.interpreter = edgetpu.make_interpreter('pilot.tflite')
else:
self.interpreter = tf.lite.Interpreter(model_path='pilot.tflite')
# for smoothing logic
self.thruster_timers = [(time(), 0), (time(), 0), (time(), 0),
(time(), 0), (time(), 0), (time(), 0)] # TODO: -1 len for new model
# for velocity calc logic
self.velocity_timer = 0
self.last_pos_x = tel_msg['pos_x']
self.last_pos_y = tel_msg['pos_y']
self.last_pos_z = tel_msg['pos_z']
self.tel_msg = tel_msg
self.exp = np.vectorize(math.exp)
self.interpreter.allocate_tensors()
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
self.action_masks = np.array(np.ones(self.input_details[0]['shape']), dtype=np.float32)
def _collect_observations_accel(self):
return np.reshape(np.array([
self.tel_msg['tag_dict']['recent'][0],
self.tel_msg['pos_x'],
self.tel_msg['pos_y'],
self.tel_msg['pos_z'],
self.tel_msg['heading'],
self.tel_msg['target_pos_x'],
self.tel_msg['target_pos_y'],
self.tel_msg['target_pos_z'],
self.tel_msg['imu_dict']['linear_accel_x'],
self.tel_msg['imu_dict']['linear_accel_y'],
self.tel_msg['imu_dict']['linear_accel_z'],
self.tel_msg['imu_dict']['gyro_y']
], dtype=np.float32), self.input_details[1]['shape'])
def _collect_observations_vel(self):
curr_time = time()
vel_x = 0
vel_y = 0
vel_z = 0
if self.velocity_timer != 0:
vel_x = (self.tel_msg['pos_x'] - self.last_pos_x)/(curr_time - self.velocity_timer)
vel_y = (self.tel_msg['pos_y'] - self.last_pos_y)/(curr_time - self.velocity_timer)
vel_z = (self.tel_msg['pos_z'] - self.last_pos_z)/(curr_time - self.velocity_timer)
if abs(vel_x) > 0.15: vel_x = 0.15 if vel_x > 0 else -0.15
if abs(vel_y) > 0.15: vel_y = 0.15 if vel_y > 0 else -0.15
if abs(vel_z) > 0.15: vel_z = 0.15 if vel_z > 0 else -0.15
# update params for next run
self.velocity_timer = curr_time
self.last_pos_x = self.tel_msg['pos_x']
self.last_pos_y = self.tel_msg['pos_y']
self.last_pos_z = self.tel_msg['pos_z']
return np.reshape(np.array([
self.tel_msg['tag_dict']['recent'][0],
self.tel_msg['pos_x'],
self.tel_msg['pos_y'],
self.tel_msg['pos_z'],
self.tel_msg['heading'],
self.tel_msg['target_pos_x'],
self.tel_msg['target_pos_y'],
self.tel_msg['target_pos_z'], #TODO: add goal heading next for new model
vel_x,
vel_y,
vel_z,
self.tel_msg['imu_dict']['gyro_y']
], dtype=np.float32), self.input_details[1]['shape'])
def _thruster_safety(self, dir_thrust):
curr_time = time()
for i in range(0, len(dir_thrust)):
if dir_thrust[i] != self.thruster_timers[i][1]:
if (self.thruster_timers[i][0] + thruster_min_change_time) < curr_time:
# been long enough, its okay to swap dir
self.thruster_timers[i] = (curr_time, dir_thrust[i])
else:
# keep old thrust dir, hasnt been long enough to swap
dir_thrust[i] = self.thruster_timers[i][1]
return dir_thrust
def get_action(self):
self.interpreter.set_tensor(self.input_details[0]['index'], self.action_masks)
self.interpreter.set_tensor(self.input_details[1]['index'], self._collect_observations_vel())
self.interpreter.invoke()
actions = self.exp(self.interpreter.get_tensor(129)[0]) # 129 or 111 for new model
if not actions.any():
return [0, 0, 0, 0, 0, 0]
#TODO: remove thrust 0 for new model
thruster0 = actions[0:7].argmax(axis=0)
thruster1 = actions[7:14].argmax(axis=0)
thruster2 = actions[14:21].argmax(axis=0)
thruster3 = actions[21:28].argmax(axis=0)
verts = actions[28:35].argmax(axis=0)
# print(f'thruster0: {thruster0} {actions[0:7]} {actions[0:7].sum()}')
# print(f'thruster1: {thruster1} {actions[7:14]} {actions[7:14].sum()}')
# print(f'thruster2: {thruster2} {actions[14:21]} {actions[14:21].sum()}')
# print(f'thruster3: {thruster3} {actions[21:28]} {actions[21:28].sum()}')
# print(f'verts: {verts} {actions[28:35]} {actions[28:35].sum()}')
# print(actions)
dir_thrust = [thruster0, thruster1, thruster2, thruster3, verts, verts]
for i in range(0, len(dir_thrust)):
spd = 0
if dir_thrust[i] == 0:
spd = -base_speed
elif dir_thrust[i] == 1:
spd = -base_speed / 2
elif dir_thrust[i] == 2:
spd = -base_speed / 4
elif dir_thrust[i] == 4:
spd = base_speed / 4
elif dir_thrust[i] == 5:
spd = base_speed / 2
elif dir_thrust[i] == 6:
spd = base_speed
dir_thrust[i] = spd
return self._thruster_safety(dir_thrust)
def get_action_old(self):
self.interpreter.set_tensor(self.input_details[0]['index'], self.action_masks)
self.interpreter.set_tensor(self.input_details[1]['index'], self._collect_observations_vel())
self.interpreter.invoke()
actions = self.exp(self.interpreter.get_tensor(111)[0]) # 111 0r 105
if not actions.any():
return ['_', '_', '_', '_']
longitudinal = actions[0:3].argmax(axis=0)
laterial = actions[3:6].argmax(axis=0)
vertical = actions[6:9].argmax(axis=0)
yaw = actions[9:12].argmax(axis=0)
# print(f'longitudinal: {longitudinal} {actions[0:3]} {actions[0:3].sum()}')
# print(f'laterial: {laterial} {actions[3:6]} {actions[3:6].sum()}')
# print(f'vertical: {vertical} {actions[6:9]} {actions[6:9].sum()}')
# print(f'yaw: {yaw} {actions[9:12]} {actions[9:12].sum()}')
# print(actions)
dir_thrust = []
if longitudinal == 1:
dir_thrust.append('fwd')
elif longitudinal == 2:
dir_thrust.append('rev')
else:
dir_thrust.append('_')
if laterial == 1:
dir_thrust.append('lat_right')
elif laterial == 2:
dir_thrust.append('lat_left')
else:
dir_thrust.append('_')
if yaw == 1:
dir_thrust.append('rot_right')
elif yaw == 2:
dir_thrust.append('rot_left')
else:
dir_thrust.append('_')
if vertical == 1:
dir_thrust.append('up')
elif vertical == 2:
dir_thrust.append('down')
else:
dir_thrust.append('_')
return self._thruster_safety(dir_thrust)
| [
"tensorflow.lite.Interpreter",
"numpy.ones",
"pycoral.utils.edgetpu.make_interpreter",
"numpy.array",
"time.time",
"numpy.vectorize"
] | [((1006, 1028), 'numpy.vectorize', 'np.vectorize', (['math.exp'], {}), '(math.exp)\n', (1018, 1028), True, 'import numpy as np\n'), ((2048, 2054), 'time.time', 'time', ([], {}), '()\n', (2052, 2054), False, 'from time import time\n'), ((3539, 3545), 'time.time', 'time', ([], {}), '()\n', (3543, 3545), False, 'from time import time\n'), ((411, 451), 'pycoral.utils.edgetpu.make_interpreter', 'edgetpu.make_interpreter', (['"""pilot.tflite"""'], {}), "('pilot.tflite')\n", (435, 451), False, 'from pycoral.utils import edgetpu\n'), ((497, 543), 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_path': '"""pilot.tflite"""'}), "(model_path='pilot.tflite')\n", (516, 543), True, 'import tensorflow as tf\n'), ((1246, 1285), 'numpy.ones', 'np.ones', (["self.input_details[0]['shape']"], {}), "(self.input_details[0]['shape'])\n", (1253, 1285), True, 'import numpy as np\n'), ((1375, 1816), 'numpy.array', 'np.array', (["[self.tel_msg['tag_dict']['recent'][0], self.tel_msg['pos_x'], self.tel_msg\n ['pos_y'], self.tel_msg['pos_z'], self.tel_msg['heading'], self.tel_msg\n ['target_pos_x'], self.tel_msg['target_pos_y'], self.tel_msg[\n 'target_pos_z'], self.tel_msg['imu_dict']['linear_accel_x'], self.\n tel_msg['imu_dict']['linear_accel_y'], self.tel_msg['imu_dict'][\n 'linear_accel_z'], self.tel_msg['imu_dict']['gyro_y']]"], {'dtype': 'np.float32'}), "([self.tel_msg['tag_dict']['recent'][0], self.tel_msg['pos_x'],\n self.tel_msg['pos_y'], self.tel_msg['pos_z'], self.tel_msg['heading'],\n self.tel_msg['target_pos_x'], self.tel_msg['target_pos_y'], self.\n tel_msg['target_pos_z'], self.tel_msg['imu_dict']['linear_accel_x'],\n self.tel_msg['imu_dict']['linear_accel_y'], self.tel_msg['imu_dict'][\n 'linear_accel_z'], self.tel_msg['imu_dict']['gyro_y']], dtype=np.float32)\n", (1383, 1816), True, 'import numpy as np\n'), ((2934, 3260), 'numpy.array', 'np.array', (["[self.tel_msg['tag_dict']['recent'][0], self.tel_msg['pos_x'], self.tel_msg\n ['pos_y'], self.tel_msg['pos_z'], self.tel_msg['heading'], self.tel_msg\n ['target_pos_x'], self.tel_msg['target_pos_y'], self.tel_msg[\n 'target_pos_z'], vel_x, vel_y, vel_z, self.tel_msg['imu_dict']['gyro_y']]"], {'dtype': 'np.float32'}), "([self.tel_msg['tag_dict']['recent'][0], self.tel_msg['pos_x'],\n self.tel_msg['pos_y'], self.tel_msg['pos_z'], self.tel_msg['heading'],\n self.tel_msg['target_pos_x'], self.tel_msg['target_pos_y'], self.\n tel_msg['target_pos_z'], vel_x, vel_y, vel_z, self.tel_msg['imu_dict'][\n 'gyro_y']], dtype=np.float32)\n", (2942, 3260), True, 'import numpy as np\n'), ((608, 614), 'time.time', 'time', ([], {}), '()\n', (612, 614), False, 'from time import time\n'), ((621, 627), 'time.time', 'time', ([], {}), '()\n', (625, 627), False, 'from time import time\n'), ((634, 640), 'time.time', 'time', ([], {}), '()\n', (638, 640), False, 'from time import time\n'), ((679, 685), 'time.time', 'time', ([], {}), '()\n', (683, 685), False, 'from time import time\n'), ((692, 698), 'time.time', 'time', ([], {}), '()\n', (696, 698), False, 'from time import time\n'), ((705, 711), 'time.time', 'time', ([], {}), '()\n', (709, 711), False, 'from time import time\n')] |
from data_loader import DataLoader
from arguments import Arguments
from const import N_USERS, N_ITEMS
import numpy as np
import torch
import pickle
from secure_aggregation.aggregator import Aggregator
from ldp import LocalDP
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
class Client:
def __init__(self, u_id, data_loader, args):
self.u_id = u_id
self.data_loader = data_loader
# Populate the train/test data and unseen/seen items for the client
self.train_data = data_loader.get_train_data_by_uid(u_id)
self.test_data = data_loader.get_test_data_by_uid(u_id)
self.seen_items = data_loader.get_train_items_by_uid(u_id)
self.unseen_items = data_loader.get_unseen_items_by_uid(u_id)
self.args = args
self.item_generator = LocalDP(self.seen_items)
self.acceptance_rate_for_pos_items = 1.0
def commit_train_items(self):
seen_item_set = np.random.choice(self.seen_items, replace=True, size=self.args.n_pos_per_user)
unseen_item_set = np.random.choice(self.unseen_items, replace=True, size=self.args.n_pos_per_user * self.args.n_neg_per_pos)
seen_item_set = np.unique(seen_item_set)
unseen_item_set = np.unique(unseen_item_set)
# Randomly select/discard some items in the sampled set of positive items...
seen_item_set = self.select_positive_items_via_randomization(seen_item_set)
items_to_train = np.concatenate((seen_item_set, unseen_item_set), axis=0)
return items_to_train
def select_positive_items_via_randomization(self, pre_selected_positive_items):
# Adjust acceptance rate of positive items
# to balance between the update frequency of seen and unseen items...
self.acceptance_rate_for_pos_items = min(
self.args.n_pos_per_user * len(self.seen_items) / len(self.unseen_items),
1.0
)
selected_items = []
for i_id in pre_selected_positive_items:
if np.random.rand() < self.acceptance_rate_for_pos_items:
selected_items.append(i_id)
return np.array(selected_items)
def partition_seen_and_unseen_items(self, i_ids):
seen_items, unseen_items = [], []
for i_id in i_ids:
if i_id in self.seen_items:
seen_items.append(i_id)
else:
unseen_items.append(i_id)
return np.array(seen_items), np.array(unseen_items)
if __name__ == '__main__':
global_args = Arguments()
train_data = torch.load('train_data.pt')
test_data = torch.load('test_data.pt')
with open('unseen_items.pickle', 'rb') as f:
unseen_data = pickle.load(f)
data_loader = DataLoader(global_args, train_data, test_data, unseen_data)
aggregator = Aggregator()
clients = []
for u_id in range(N_USERS):
args = Arguments()
# It is more efficient to let all clients and the server have the same model initialization
# as it is much quicker to converge and more consistent in updating model parameters
clients.append(Client(u_id, data_loader, args))
# if len(clients[u_id].seen_items) < 21:
# print(u_id)
examine_u_id = 894 #33, 925, 894, 725, 511, 170, 171, 677, 218, 784, 826
examine_client = clients[examine_u_id]
# Keep track of item frequency for each requested item with respect to the selected user
item_freq = {}
for t in range(500):
randomized_items = examine_client.commit_train_items()
for i_id in randomized_items:
if i_id not in item_freq:
item_freq[i_id] = 1
else:
item_freq[i_id] += 1
seen_items, unseen_items = examine_client.partition_seen_and_unseen_items(list(item_freq.keys()))
n_total_seen_items = len(examine_client.seen_items)
print('N seen items={}, N unseen items={}'.format(n_total_seen_items, N_ITEMS - n_total_seen_items - 1))
print('Acceptance rate for positive items {}'.format(examine_client.acceptance_rate_for_pos_items))
print(seen_items)
for i_id in item_freq.keys():
if i_id in seen_items:
print('Id {} is requested {} times'.format(i_id, item_freq[i_id]))
i_ids = np.array(list(item_freq.keys()))
freqs = np.array(list(item_freq.values()))
labels = ['Seen' if i_id in seen_items else 'Unseen' for i_id in i_ids]
df_plot = pd.DataFrame(data={
'id': i_ids,
'freq': freqs,
'type': labels
})
ax = sns.scatterplot(data=df_plot, x='id', y='freq', hue='type')
ax.set(xlabel='Item ID', ylabel='Frequency')
ax.set_title('Update/Request frequency of seen and unseen items with respect to user {}\n N_SEEN_ITEMS={}, N_UNSEEN_ITEMS={}'.format(examine_u_id, n_total_seen_items, N_ITEMS - n_total_seen_items - 1))
plt.show()
| [
"numpy.unique",
"numpy.random.rand",
"data_loader.DataLoader",
"numpy.random.choice",
"torch.load",
"pickle.load",
"arguments.Arguments",
"numpy.array",
"secure_aggregation.aggregator.Aggregator",
"seaborn.scatterplot",
"numpy.concatenate",
"pandas.DataFrame",
"ldp.LocalDP",
"matplotlib.py... | [((2539, 2550), 'arguments.Arguments', 'Arguments', ([], {}), '()\n', (2548, 2550), False, 'from arguments import Arguments\n'), ((2568, 2595), 'torch.load', 'torch.load', (['"""train_data.pt"""'], {}), "('train_data.pt')\n", (2578, 2595), False, 'import torch\n'), ((2612, 2638), 'torch.load', 'torch.load', (['"""test_data.pt"""'], {}), "('test_data.pt')\n", (2622, 2638), False, 'import torch\n'), ((2744, 2803), 'data_loader.DataLoader', 'DataLoader', (['global_args', 'train_data', 'test_data', 'unseen_data'], {}), '(global_args, train_data, test_data, unseen_data)\n', (2754, 2803), False, 'from data_loader import DataLoader\n'), ((2822, 2834), 'secure_aggregation.aggregator.Aggregator', 'Aggregator', ([], {}), '()\n', (2832, 2834), False, 'from secure_aggregation.aggregator import Aggregator\n'), ((4449, 4512), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'id': i_ids, 'freq': freqs, 'type': labels}"}), "(data={'id': i_ids, 'freq': freqs, 'type': labels})\n", (4461, 4512), True, 'import pandas as pd\n'), ((4552, 4611), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'df_plot', 'x': '"""id"""', 'y': '"""freq"""', 'hue': '"""type"""'}), "(data=df_plot, x='id', y='freq', hue='type')\n", (4567, 4611), True, 'import seaborn as sns\n'), ((4871, 4881), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4879, 4881), True, 'import matplotlib.pyplot as plt\n'), ((827, 851), 'ldp.LocalDP', 'LocalDP', (['self.seen_items'], {}), '(self.seen_items)\n', (834, 851), False, 'from ldp import LocalDP\n'), ((960, 1038), 'numpy.random.choice', 'np.random.choice', (['self.seen_items'], {'replace': '(True)', 'size': 'self.args.n_pos_per_user'}), '(self.seen_items, replace=True, size=self.args.n_pos_per_user)\n', (976, 1038), True, 'import numpy as np\n'), ((1065, 1176), 'numpy.random.choice', 'np.random.choice', (['self.unseen_items'], {'replace': '(True)', 'size': '(self.args.n_pos_per_user * self.args.n_neg_per_pos)'}), '(self.unseen_items, replace=True, size=self.args.\n n_pos_per_user * self.args.n_neg_per_pos)\n', (1081, 1176), True, 'import numpy as np\n'), ((1196, 1220), 'numpy.unique', 'np.unique', (['seen_item_set'], {}), '(seen_item_set)\n', (1205, 1220), True, 'import numpy as np\n'), ((1247, 1273), 'numpy.unique', 'np.unique', (['unseen_item_set'], {}), '(unseen_item_set)\n', (1256, 1273), True, 'import numpy as np\n'), ((1468, 1524), 'numpy.concatenate', 'np.concatenate', (['(seen_item_set, unseen_item_set)'], {'axis': '(0)'}), '((seen_item_set, unseen_item_set), axis=0)\n', (1482, 1524), True, 'import numpy as np\n'), ((2144, 2168), 'numpy.array', 'np.array', (['selected_items'], {}), '(selected_items)\n', (2152, 2168), True, 'import numpy as np\n'), ((2710, 2724), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2721, 2724), False, 'import pickle\n'), ((2900, 2911), 'arguments.Arguments', 'Arguments', ([], {}), '()\n', (2909, 2911), False, 'from arguments import Arguments\n'), ((2448, 2468), 'numpy.array', 'np.array', (['seen_items'], {}), '(seen_items)\n', (2456, 2468), True, 'import numpy as np\n'), ((2470, 2492), 'numpy.array', 'np.array', (['unseen_items'], {}), '(unseen_items)\n', (2478, 2492), True, 'import numpy as np\n'), ((2030, 2046), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2044, 2046), True, 'import numpy as np\n')] |
"""
Definition of the SqliteCaseReader.
"""
from __future__ import print_function, absolute_import
from copy import deepcopy
import os
import re
import sys
import sqlite3
from collections import OrderedDict
from six import PY2, PY3, reraise
from six.moves import range
import json
import numpy as np
from openmdao.recorders.base_case_reader import BaseCaseReader
from openmdao.recorders.case import DriverCase, SystemCase, SolverCase, ProblemCase, \
PromotedToAbsoluteMap, DriverDerivativesCase
from openmdao.recorders.cases import BaseCases
from openmdao.utils.record_util import is_valid_sqlite3_db, json_to_np_array, convert_to_np_array
from openmdao.recorders.sqlite_recorder import blob_to_array, format_version
from openmdao.utils.write_outputs import write_outputs
if PY2:
import cPickle as pickle
if PY3:
import pickle
_DEFAULT_OUT_STREAM = object()
class SqliteCaseReader(BaseCaseReader):
"""
A CaseReader specific to files created with SqliteRecorder.
Parameters
----------
filename : str
The path to the filename containing the recorded data.
Attributes
----------
format_version : int
The version of the format assumed when loading the file.
output2meta : dict
Dictionary mapping output variables to their metadata
input2meta : dict
Dictionary mapping input variables to their metadata
_abs2meta : dict
Dictionary mapping variables to their metadata
_abs2prom : {'input': dict, 'output': dict}
Dictionary mapping absolute names to promoted names.
_prom2abs : {'input': dict, 'output': dict}
Dictionary mapping promoted names to absolute names.
_coordinate_split_re : RegularExpression
Regular expression used for splitting iteration coordinates.
_var_settings : dict
Dictionary mapping absolute variable names to variable settings.
"""
def __init__(self, filename):
"""
Initialize.
Parameters
----------
filename : str
The path to the filename containing the recorded data.
"""
super(SqliteCaseReader, self).__init__(filename)
if filename is not None:
if not is_valid_sqlite3_db(filename):
if not os.path.exists(filename):
raise IOError('File does not exist({0})'.format(filename))
else:
raise IOError('File does not contain a valid '
'sqlite database ({0})'.format(filename))
self._coordinate_split_re = re.compile('\|\\d+\|*')
with sqlite3.connect(self.filename) as con:
cur = con.cursor()
# need to see what columns are in the metadata table before we query it
cursor = cur.execute('select * from metadata')
names = [description[0] for description in cursor.description]
if "var_settings" in names:
cur.execute(
"SELECT format_version, abs2prom, prom2abs, abs2meta, var_settings "
"FROM metadata")
else:
cur.execute(
"SELECT format_version, abs2prom, prom2abs, abs2meta FROM metadata")
row = cur.fetchone()
self.format_version = row[0]
self._abs2prom = None
self._prom2abs = None
self._abs2meta = None
self._var_settings = None
if self.format_version >= 4:
self._var_settings = json.loads(row[4])
if self.format_version >= 3:
self._abs2prom = json.loads(row[1])
self._prom2abs = json.loads(row[2])
self._abs2meta = json.loads(row[3])
for name in self._abs2meta:
if 'lower' in self._abs2meta[name]:
self._abs2meta[name]['lower'] =\
convert_to_np_array(self._abs2meta[name]['lower'])
if 'upper' in self._abs2meta[name]:
self._abs2meta[name]['upper'] =\
convert_to_np_array(self._abs2meta[name]['upper'])
elif self.format_version in (1, 2):
if PY2:
self._abs2prom = pickle.loads(str(row[1])) if row[1] is not None else None
self._prom2abs = pickle.loads(str(row[2])) if row[2] is not None else None
self._abs2meta = pickle.loads(str(row[3])) if row[3] is not None else None
if PY3:
try:
self._abs2prom = pickle.loads(row[1]) if row[1] is not None else None
self._prom2abs = pickle.loads(row[2]) if row[2] is not None else None
self._abs2meta = pickle.loads(row[3]) if row[3] is not None else None
except TypeError:
# Reading in a python 2 pickle recorded pre-OpenMDAO 2.4.
self._abs2prom = pickle.loads(row[1].encode()) if row[1] is not\
None else None
self._prom2abs = pickle.loads(row[2].encode()) if row[2] is not\
None else None
self._abs2meta = pickle.loads(row[3].encode()) if row[3] is not\
None else None
con.close()
self.output2meta = PromotedToAbsoluteMap(self._abs2meta, self._prom2abs,
self._abs2prom, True)
self.input2meta = PromotedToAbsoluteMap(self._abs2meta, self._prom2abs,
self._abs2prom, False)
self._load()
def _load(self):
"""
Load data from the sqlite database file.
Load the metadata from the sqlite file, populating the
`format_version`, `parameters`, and `unknowns` attributes of this
CaseReader.
The `iterations` table is read to load the keys which identify
the individual cases/iterations from the recorded file.
"""
self.driver_cases = DriverCases(self.filename, self.format_version, self._abs2prom,
self._abs2meta, self._prom2abs, self._var_settings)
self.driver_derivative_cases = DriverDerivativeCases(self.filename, self.format_version,
self._abs2prom, self._abs2meta,
self._prom2abs)
self.system_cases = SystemCases(self.filename, self.format_version, self._abs2prom,
self._abs2meta, self._prom2abs)
self.solver_cases = SolverCases(self.filename, self.format_version, self._abs2prom,
self._abs2meta, self._prom2abs)
self.problem_cases = ProblemCases(self.filename, self.format_version, self._abs2prom,
self._abs2meta, self._prom2abs)
if self.format_version in range(1, format_version + 1):
with sqlite3.connect(self.filename) as con:
# Read in iterations from Drivers, Systems, Problems, and Solvers
cur = con.cursor()
cur.execute("SELECT iteration_coordinate FROM driver_iterations ORDER BY id ASC")
rows = cur.fetchall()
self.driver_cases._case_keys = [coord[0] for coord in rows]
self.driver_cases.num_cases = len(self.driver_cases._case_keys)
try:
cur.execute("SELECT iteration_coordinate FROM driver_derivatives "
"ORDER BY id ASC")
rows = cur.fetchall()
dcase = self.driver_derivative_cases
dcase._case_keys = [coord[0] for coord in rows]
dcase.num_cases = len(dcase._case_keys)
except sqlite3.OperationalError as err:
# Cases recorded in version 1 won't have a derivatives table.
if self.format_version >= 2:
reraise(*sys.exc_info())
cur.execute("SELECT iteration_coordinate FROM system_iterations ORDER BY id ASC")
rows = cur.fetchall()
self.system_cases._case_keys = [coord[0] for coord in rows]
self.system_cases.num_cases = len(self.system_cases._case_keys)
cur.execute("SELECT iteration_coordinate FROM solver_iterations ORDER BY id ASC")
rows = cur.fetchall()
self.solver_cases._case_keys = [coord[0] for coord in rows]
self.solver_cases.num_cases = len(self.solver_cases._case_keys)
try:
cur.execute("SELECT case_name FROM problem_cases ORDER BY id ASC")
rows = cur.fetchall()
self.problem_cases._case_keys = [coord[0] for coord in rows]
self.problem_cases.num_cases = len(self.problem_cases._case_keys)
except sqlite3.OperationalError as err:
# Cases recorded in some early iterations of version 1 won't have a problem
# table.
if self.format_version >= 2:
reraise(*sys.exc_info())
# Read in metadata for Drivers, Systems, and Solvers
cur.execute("SELECT model_viewer_data FROM driver_metadata")
row = cur.fetchone()
if row is not None:
if self.format_version >= 3:
self.driver_metadata = json.loads(row[0])
elif self.format_version in (1, 2):
if PY2:
self.driver_metadata = pickle.loads(str(row[0]))
if PY3:
self.driver_metadata = pickle.loads(row[0])
cur.execute("SELECT id, scaling_factors, component_metadata FROM system_metadata")
for row in cur:
id = row[0]
self.system_metadata[id] = {}
if PY2:
self.system_metadata[id]['scaling_factors'] = pickle.loads(str(row[1]))
self.system_metadata[id]['component_options'] = pickle.loads(str(row[2]))
if PY3:
self.system_metadata[id]['scaling_factors'] = pickle.loads(row[1])
self.system_metadata[id]['component_options'] = pickle.loads(row[2])
cur.execute("SELECT id, solver_options, solver_class FROM solver_metadata")
for row in cur:
id = row[0]
if PY2:
solver_options = pickle.loads(str(row[1]))
if PY3:
solver_options = pickle.loads(row[1])
solver_class = row[2]
self.solver_metadata[id] = {
'solver_options': solver_options,
'solver_class': solver_class,
}
con.close()
else:
raise ValueError('SQliteCaseReader encountered an unhandled '
'format version: {0}'.format(self.format_version))
def load_cases(self):
"""
Load all driver, solver, and system cases into memory.
"""
self.driver_cases.load_cases()
self.solver_cases.load_cases()
self.system_cases.load_cases()
self.problem_cases.load_cases()
def get_cases(self, parent=None, recursive=False):
"""
Allow one to iterate over the driver and solver cases.
Generator giving Driver and/or Solver cases in order.
Parameters
----------
parent : DriverCase or SolverCase or str, optional
Identifies which case's children to return. None indicates Root. Can pass a
driver case, a solver case, or an iteration coordinate identifying a solver
or driver case. Defaults to None.
recursive : bool, optional
If True, will enable iterating over all successors in case hierarchy
rather than just the direct children. Defaults to False.
"""
iter_coord = ''
if parent is not None:
if parent is DriverCase or parent is SolverCase:
iter_coord = parent.iteration_coordinate
elif type(parent) is str:
iter_coord = parent
else:
raise TypeError("parent parameter can only be DriverCase, SolverCase, or string")
driver_iter = []
solver_iter = []
with sqlite3.connect(self.filename) as con:
cur = con.cursor()
cur.execute("SELECT iteration_coordinate FROM driver_iterations "
"ORDER BY counter ASC")
driver_iter = cur.fetchall()
cur.execute("SELECT iteration_coordinate, counter FROM solver_iterations "
"ORDER BY counter ASC")
solver_iter = cur.fetchall()
con.close()
split_iter_coord = self._split_coordinate(iter_coord) if iter_coord is not ''\
else []
# grab an array of possible lengths of coordinates
coord_lengths = [2] # start with 2 because that is the length of driver iteration coords
for s in solver_iter:
s_len = len(self._split_coordinate(s[0]))
if s_len not in coord_lengths:
coord_lengths.append(s_len)
coord_lengths = sorted(coord_lengths)
# grab full set of cases to iterate over
iter_set = self._find_child_cases(iter_coord, split_iter_coord, driver_iter,
solver_iter, recursive, coord_lengths)
# iterate over set of cases
for iteration in iter_set:
if iteration[1] is 'driver':
yield self.driver_cases.get_case(iteration[0])
else:
yield self.solver_cases.get_case(iteration[0])
def _find_child_cases(self, parent_iter_coord, split_parent_iter_coord, driver_iter,
solver_iter, recursive, coord_lengths):
"""
Find all children of a given parent case.
Parameters
----------
parent_iter_coord : str
Iteration coordinate of the parent case. If empty string, assumes root is parent.
split_parent_iter_coord : [str]
The split parent iteration coordinate.
driver_iter : [(str)]
The ordered list of driver iteration coordinates.
solver_iter : [(str)]
The ordered list of solver iteration coordinates.
recursive : bool
If True, will grab all successors recursively. Otherwise, will only return direct
children.
coord_lengths : [int]
Sorted array of possible coordinate lengths. Used to determine if case is child
of another case.
Returns
-------
list of tuples
List of tuples of the form ('iteration_coordinate', 'type of iteration')
"""
ret = []
par_len = len(split_parent_iter_coord)
par_len_idx = coord_lengths.index(par_len if par_len is not 0 else 2)
expected_child_length = coord_lengths[par_len_idx + 1] if par_len_idx <\
len(coord_lengths) - 1 else -1
if parent_iter_coord is '': # CASE: grabbing children of 'root'
if len(driver_iter) > 0: # grabbing all driver cases
for d in driver_iter:
ret.append((d[0], 'driver'))
if recursive:
ret += self._find_child_cases(d[0], self._split_coordinate(d[0]),
driver_iter, solver_iter, recursive,
coord_lengths)
elif len(solver_iter) > 0: # grabbing first layer of solver iterations
# find the iteration coordinate length of the first layer of solver iterations
min_iter_len = -1
if len(coord_lengths) > 1:
min_iter_len = coord_lengths[1]
for s in solver_iter:
split_coord = self._split_coordinate(s[0])
if len(split_coord) is min_iter_len:
ret.append((s[0], 'solver'))
if recursive:
ret += self._find_child_cases(s[0], split_coord, driver_iter,
solver_iter, recursive, coord_lengths)
else: # CASE: grabbing children of a case
for s in solver_iter:
if self._is_case_child(parent_iter_coord, s[0], expected_child_length):
ret.append((s[0], 'solver'))
if recursive:
ret += self._find_child_cases(s[0], self._split_coordinate(s[0]),
driver_iter, solver_iter, recursive,
coord_lengths)
return ret
def _is_case_child(self, parent_coordinate, coordinate, expected_child_length):
"""
Tell if the given case is a child case of the parent.
Parameters
----------
parent_coordinate : str
The iteration coordinate of the potential parent.
coordinate : str
Iteration coordinate of the case we want to test.
expected_child_length : int
Expected length of the split child iteration coordinate
Returns
-------
bool
True if the given coordinate indicates that the case is a child of the
given parent case. False otherwise.
"""
split_coord = self._split_coordinate(coordinate)
if coordinate.startswith(parent_coordinate) and\
len(split_coord) is expected_child_length:
return True
return False
def _split_coordinate(self, coordinate):
"""
Split up an iteration coordinate string based on the iteration index.
Parameters
----------
coordinate : str
The iteration coordinate to split.
Returns
-------
list
coordinate as list of strings.
"""
return self._coordinate_split_re.split(coordinate)
def list_inputs(self,
case=None,
values=True,
units=False,
hierarchical=True,
print_arrays=False,
out_stream=_DEFAULT_OUT_STREAM):
"""
Return and optionally log a list of input names and other optional information.
Also optionally logs the information to a user defined output stream.
Parameters
----------
case : Case, optional
The case whose inputs will be listed. If None, gives all inputs. Defaults to None.
values : bool, optional
When True, display/return input values. Default is True.
units : bool, optional
When True, display/return units. Default is False.
hierarchical : bool, optional
When True, human readable output shows variables in hierarchical format.
print_arrays : bool, optional
When False, in the columnar display, just display norm of any ndarrays with size > 1.
The norm is surrounded by vertical bars to indicate that it is a norm.
When True, also display full values of the ndarray below the row. Format is affected
by the values set with numpy.set_printoptions
Default is False.
out_stream : file-like object
Where to send human readable output. Default is sys.stdout.
Set to None to suppress.
Returns
-------
list
list of input names and other optional information about those inputs
"""
meta = self._abs2meta
if case is None:
sys_vars = self._get_all_sysvars(False)
else:
sys_vars = self._get_case_sysvars(case, False)
inputs = []
if sys_vars is not None and len(sys_vars) > 0:
for name in sys_vars:
outs = {}
if values:
outs['value'] = sys_vars[name]['value']
if units:
outs['units'] = meta[name]['units']
inputs.append((name, outs))
if out_stream == _DEFAULT_OUT_STREAM:
out_stream = sys.stdout
if out_stream:
if sys_vars is None:
out_stream.write('WARNING: No system cases recorded. Make sure the recorder ' +
'is attached to a system object\n')
elif len(sys_vars) is 0:
out_stream.write('WARNING: Inputs not recorded. Make sure your recording ' +
'settings have record_inputs set to True\n')
self._write_outputs('input', None, inputs, hierarchical, print_arrays, out_stream)
return inputs
def list_outputs(self,
case=None,
explicit=True, implicit=True,
values=True,
residuals=False,
residuals_tol=None,
units=False,
shape=False,
bounds=False,
scaling=False,
hierarchical=True,
print_arrays=False,
out_stream=_DEFAULT_OUT_STREAM):
"""
Return and optionally log a list of output names and other optional information.
Also optionally logs the information to a user defined output stream.
Parameters
----------
case : Case, optional
The case whose outputs will be listed. If None, gives all outputs. Defaults to None.
explicit : bool, optional
include outputs from explicit components. Default is True.
implicit : bool, optional
include outputs from implicit components. Default is True.
values : bool, optional
When True, display/return output values. Default is True.
residuals : bool, optional
When True, display/return residual values. Default is False.
residuals_tol : float, optional
If set, limits the output of list_outputs to only variables where
the norm of the resids array is greater than the given 'residuals_tol'.
Default is None.
units : bool, optional
When True, display/return units. Default is False.
shape : bool, optional
When True, display/return the shape of the value. Default is False.
bounds : bool, optional
When True, display/return bounds (lower and upper). Default is False.
scaling : bool, optional
When True, display/return scaling (ref, ref0, and res_ref). Default is False.
hierarchical : bool, optional
When True, human readable output shows variables in hierarchical format.
print_arrays : bool, optional
When False, in the columnar display, just display norm of any ndarrays with size > 1.
The norm is surrounded by vertical bars to indicate that it is a norm.
When True, also display full values of the ndarray below the row. Format is affected
by the values set with numpy.set_printoptions
Default is False.
out_stream : file-like
Where to send human readable output. Default is sys.stdout.
Set to None to suppress.
Returns
-------
list
list of output names and other optional information about those outputs
"""
meta = self._abs2meta
expl_outputs = []
impl_outputs = []
sys_vars = self._get_all_sysvars()
if case is None:
sys_vars = self._get_all_sysvars()
else:
sys_vars = self._get_case_sysvars(case)
if sys_vars is not None and len(sys_vars) > 0:
for name in sys_vars:
if residuals_tol and residuals_vars is not None and\
sys_vars[name]['residuals'] is not 'Not Recorded' and\
np.linalg.norm(sys_vars[name]['residuals']) < residuals_tol:
continue
outs = {}
if values:
outs['value'] = sys_vars[name]['value']
if residuals:
outs['resids'] = sys_vars[name]['residuals']
if units:
outs['units'] = meta[name]['units']
if shape:
outs['shape'] = sys_vars[name]['value'].shape
if bounds:
outs['lower'] = meta[name]['lower']
outs['upper'] = meta[name]['upper']
if scaling:
outs['ref'] = meta[name]['ref']
outs['ref0'] = meta[name]['ref0']
outs['res_ref'] = meta[name]['res_ref']
if meta[name]['explicit']:
expl_outputs.append((name, outs))
else:
impl_outputs.append((name, outs))
if out_stream == _DEFAULT_OUT_STREAM:
out_stream = sys.stdout
if out_stream:
if sys_vars is None:
out_stream.write('WARNING: No system cases recorded. Make sure the recorder ' +
'is attached to a system object\n')
elif len(sys_vars) is 0:
out_stream.write('WARNING: Outputs not recorded. Make sure your recording ' +
'settings have record_outputs set to True\n')
if explicit:
self._write_outputs('output', 'Explicit', expl_outputs, hierarchical, print_arrays,
out_stream)
if implicit:
self._write_outputs('output', 'Implicit', impl_outputs, hierarchical, print_arrays,
out_stream)
if explicit and implicit:
return expl_outputs + impl_outputs
elif explicit:
return expl_outputs
elif implicit:
return impl_outputs
else:
raise RuntimeError('You have excluded both Explicit and Implicit components.')
def _get_case_sysvars(self, case, get_outputs=True):
"""
Get the set of output or input variables and their values for a given case.
Parameters
----------
case : Case
The case whose variables will be returned.
get_outputs : bool, optional
indicates if the returned set should contain outputs. If false, returns inputs.
Returns
-------
dictionary
dictionary of global variable names to their values. None if no system iterations
were recorded.
"""
variables = {}
if get_outputs and case.outputs is None:
return variables
outputs = case.outputs._values if case.outputs is not None else None
residuals = case.residuals._values if case.residuals is not None else None
inputs = case.inputs._values if case.inputs is not None else None
if get_outputs:
for var_name in outputs.dtype.names:
variables[var_name] = {'value': outputs[var_name]}
if residuals is not None and var_name in residuals.dtype.names:
variables[var_name]['residuals'] = residuals[var_name]
else:
variables[var_name]['residuals'] = 'Not Recorded'
elif inputs is not None:
for var_name in inputs.dtype.names:
if var_name not in variables:
variables[var_name] = {'value': inputs[var_name]}
return variables
def _get_all_sysvars(self, get_outputs=True):
"""
Get the set of output or input variables and their values.
Parameters
----------
get_outputs : bool, optional
indicates if the returned set should contain outputs. If false, returns inputs.
Returns
-------
dictionary
dictionary of global variable names to their values. None if no system iterations
were recorded.
"""
coords = self.system_cases._case_keys
# store the iteration coordinates without iteration numbers.
# coord_map intializes each iter_key to False, indicating we haven't
# grabbed values from this system
coord_map = {}
for c in coords:
split_iter = self._split_coordinate(c)
iter_key = ':'.join(split_iter)
coord_map[iter_key] = False
# didn't record any system iterations, return None
if len(coord_map) is 0:
return None
variables = {}
iteration_num = -1
# iterate over cases from end to start, unless we've grabbed values from
# every system
while not self._has_all_values(coord_map):
iteration = self.system_cases._case_keys[iteration_num]
iteration_num -= 1
split_iter = self._split_coordinate(iteration)
iter_key = ':'.join(split_iter)
# if coord_map[iter_key] is False, we haven't grabbed variable values
# from this system
if not coord_map[iter_key]:
coord_map[iter_key] = True
case = self.system_cases.get_case(iteration)
if get_outputs and case.outputs is None:
continue
if not get_outputs and case.inputs is None:
continue
outputs = case.outputs._values if case.outputs is not None else None
residuals = case.residuals._values if case.residuals is not None else None
inputs = case.inputs._values if case.inputs is not None else None
if get_outputs:
for var_name in outputs.dtype.names:
if var_name not in variables:
variables[var_name] = {'value': outputs[var_name]}
if residuals is not None and var_name in residuals.dtype.names:
variables[var_name]['residuals'] = residuals[var_name]
else:
variables[var_name]['residuals'] = 'Not Recorded'
elif inputs is not None:
for var_name in inputs.dtype.names:
if var_name not in variables:
variables[var_name] = {'value': inputs[var_name]}
return variables
def _has_all_values(self, coord_map):
"""
Tell if all variables from every recorded system have been iterated over.
Parameters
----------
coord_map : dict
maps stripped iteration coordinates to a bool indicating whether or not the system(s)
associated with that iteration coordinate have been iterated over.
Returns
-------
bool
True if coord_map is True for each key, False otherwise.
"""
for coord in coord_map:
if not coord_map[coord]:
return False
return True
def _write_outputs(self, in_or_out, comp_type, outputs, hierarchical, print_arrays,
out_stream):
"""
Write table of variable names, values, residuals, and metadata to out_stream.
The output values could actually represent input variables.
In this context, outputs refers to the data that is being logged to an output stream.
Parameters
----------
in_or_out : str, 'input' or 'output'
indicates whether the values passed in are from inputs or output variables.
comp_type : str, 'Explicit' or 'Implicit'
the type of component with the output values.
outputs : list
list of (name, dict of vals and metadata) tuples.
hierarchical : bool
When True, human readable output shows variables in hierarchical format.
print_arrays : bool
When False, in the columnar display, just display norm of any ndarrays with size > 1.
The norm is surrounded by vertical bars to indicate that it is a norm.
When True, also display full values of the ndarray below the row. Format is affected
by the values set with numpy.set_printoptions
Default is False.
out_stream : file-like object
Where to send human readable output.
Set to None to suppress.
"""
if out_stream is None:
return
# Only local metadata but the most complete
meta = self._abs2meta
# Make a dict of outputs. Makes it easier to work with in this method
dict_of_outputs = OrderedDict()
for name, vals in outputs:
dict_of_outputs[name] = vals
allprocs_abs_names = {
'input': dict_of_outputs.keys(),
'output': dict_of_outputs.keys()
}
write_outputs(in_or_out, comp_type, dict_of_outputs, hierarchical, print_arrays, out_stream,
'model', allprocs_abs_names)
class DriverCases(BaseCases):
"""
Case specific to the entries that might be recorded in a Driver iteration.
Attributes
----------
_var_settings : dict
Dictionary mapping absolute variable names to variable settings.
"""
def __init__(self, filename, format_version, abs2prom, abs2meta, prom2abs, var_settings):
"""
Initialize.
Parameters
----------
filename : str
The name of the recording file from which to instantiate the case reader.
format_version : int
The version of the format assumed when loading the file.
abs2prom : {'input': dict, 'output': dict}
Dictionary mapping absolute names to promoted names.
abs2meta : dict
Dictionary mapping absolute variable names to variable metadata.
prom2abs : {'input': dict, 'output': dict}
Dictionary mapping promoted names to absolute names.
var_settings : dict
Dictionary mapping absolute variable names to variable settings.
"""
super(DriverCases, self).__init__(filename, format_version, abs2prom, abs2meta, prom2abs)
self._var_settings = var_settings
def _extract_case_from_row(self, row):
"""
Pull data out of a queried SQLite row.
Parameters
----------
row : (id, counter, iter_coordinate, timestamp, success, msg, inputs, outputs)
Queried SQLite driver table row.
Returns
-------
DriverCase
Case for associated row.
"""
idx, counter, iteration_coordinate, timestamp, success, msg, inputs_text, \
outputs_text, = row
if self.format_version >= 3:
inputs_array = json_to_np_array(inputs_text)
outputs_array = json_to_np_array(outputs_text)
elif self.format_version in (1, 2):
inputs_array = blob_to_array(inputs_text)
outputs_array = blob_to_array(outputs_text)
case = DriverCase(self.filename, counter, iteration_coordinate, timestamp,
success, msg, inputs_array, outputs_array,
self._prom2abs, self._abs2prom, self._abs2meta, self._var_settings)
return case
def load_cases(self):
"""
Load all driver cases into memory.
"""
with sqlite3.connect(self.filename) as con:
cur = con.cursor()
cur.execute("SELECT * FROM driver_iterations")
rows = cur.fetchall()
for row in rows:
case = self._extract_case_from_row(row)
self._cases[case.iteration_coordinate] = case
def get_case(self, case_id, scaled=False):
"""
Get a case from the database.
Parameters
----------
case_id : int or str
The integer index or string-identifier of the case to be retrieved.
scaled : bool
If True, return variables scaled. Otherwise, return physical values.
Returns
-------
An instance of a Driver Case populated with data from the
specified case/iteration.
"""
# check to see if we've already cached this case
iteration_coordinate = self.get_iteration_coordinate(case_id)
if iteration_coordinate in self._cases:
case = self._cases[iteration_coordinate]
else:
# Get an unscaled case if does not already exist in _cases
with sqlite3.connect(self.filename) as con:
cur = con.cursor()
cur.execute("SELECT * FROM driver_iterations WHERE "
"iteration_coordinate=:iteration_coordinate",
{"iteration_coordinate": iteration_coordinate})
# Initialize the Case object from the iterations data
row = cur.fetchone()
con.close()
case = self._extract_case_from_row(row)
# save so we don't query again
self._cases[case.iteration_coordinate] = case
if scaled:
# We have to do some scaling first before we return it
# Need to make a copy, otherwise we modify the object in the cache
case = deepcopy(case)
case.scale()
return case
class DriverDerivativeCases(BaseCases):
"""
Case specific to the entries that might be recorded in a Driver derivatives computation.
"""
def _extract_case_from_row(self, row):
"""
Pull data out of a queried SQLite row.
Parameters
----------
row : (id, counter, iter_coordinate, timestamp, success, msg, totals)
Queried SQLite driver derivatives table row.
Returns
-------
DriverDerivativesCase
Case for associated row.
"""
idx, counter, iteration_coordinate, timestamp, success, msg, totals_blob = row
totals_array = blob_to_array(totals_blob)
case = DriverDerivativesCase(self.filename, counter, iteration_coordinate,
timestamp, success, msg, totals_array,
self._prom2abs, self._abs2prom, self._abs2meta)
return case
def load_cases(self):
"""
Load all driver cases into memory.
"""
with sqlite3.connect(self.filename) as con:
cur = con.cursor()
cur.execute("SELECT * FROM driver_derivatives")
rows = cur.fetchall()
for row in rows:
case = self._extract_case_from_row(row)
self._cases[case.iteration_coordinate] = case
def get_case(self, case_id):
"""
Get a case from the database.
Parameters
----------
case_id : int or str
The integer index or string-identifier of the case to be retrieved.
Returns
-------
An instance of a Driver Case populated with data from the
specified case/iteration.
"""
# check to see if we've already cached this case
iteration_coordinate = self.get_iteration_coordinate(case_id)
if iteration_coordinate in self._cases:
return self._cases[iteration_coordinate]
with sqlite3.connect(self.filename) as con:
cur = con.cursor()
cur.execute("SELECT * FROM driver_derivatives WHERE "
"iteration_coordinate=:iteration_coordinate",
{"iteration_coordinate": iteration_coordinate})
# Initialize the Case object from the iterations data
row = cur.fetchone()
con.close()
case = self._extract_case_from_row(row)
# save so we don't query again
self._cases[case.iteration_coordinate] = case
return case
class ProblemCases(BaseCases):
"""
Case specific to the entries that might be recorded in a Driver iteration.
"""
def _extract_case_from_row(self, row):
"""
Pull data out of a queried SQLite row.
Parameters
----------
row : (id, counter, iter_coordinate, timestamp, success, msg, outputs)
Queried SQLite problems table row.
Returns
-------
ProblemCase
Case for associated row.
"""
idx, counter, case_name, timestamp, success, msg, \
outputs_text, = row
if self.format_version >= 3:
outputs_array = json_to_np_array(outputs_text)
elif self.format_version in (1, 2):
outputs_array = blob_to_array(outputs_text)
case = ProblemCase(self.filename, counter, case_name, timestamp,
success, msg, outputs_array, self._prom2abs,
self._abs2prom, self._abs2meta)
return case
def load_cases(self):
"""
Load all problem cases into memory.
"""
with sqlite3.connect(self.filename) as con:
cur = con.cursor()
cur.execute("SELECT * FROM problem_cases")
rows = cur.fetchall()
for row in rows:
case = self._extract_case_from_row(row)
self._cases[case.iteration_coordinate] = case
def get_case(self, case_name):
"""
Get a case from the database.
Parameters
----------
case_name : str
The string-identifier of the case to be retrieved.
Returns
-------
An instance of a Driver Case populated with data from the
specified case/iteration.
"""
# check to see if we've already cached this case
if case_name in self._cases:
return self._cases[case_name]
with sqlite3.connect(self.filename) as con:
cur = con.cursor()
cur.execute("SELECT * FROM problem_cases WHERE "
"case_name=:case_name",
{"case_name": case_name})
# Initialize the Case object from the iterations data
row = cur.fetchone()
con.close()
case = self._extract_case_from_row(row)
# save so we don't query again
self._cases[case_name] = case
return case
class SystemCases(BaseCases):
"""
Case specific to the entries that might be recorded in a System iteration.
"""
def _extract_case_from_row(self, row):
"""
Pull data out of a queried SQLite row.
Parameters
----------
row : (id, counter, iter_coordinate, timestamp, success, msg, inputs, outputs, residuals)
Queried SQLite systems table row.
Returns
-------
SystemCase
Case for associated row.
"""
idx, counter, iteration_coordinate, timestamp, success, msg, inputs_text,\
outputs_text, residuals_text = row
if self.format_version >= 3:
inputs_array = json_to_np_array(inputs_text)
outputs_array = json_to_np_array(outputs_text)
residuals_array = json_to_np_array(residuals_text)
elif self.format_version in (1, 2):
inputs_array = blob_to_array(inputs_text)
outputs_array = blob_to_array(outputs_text)
residuals_array = blob_to_array(residuals_text)
case = SystemCase(self.filename, counter, iteration_coordinate, timestamp,
success, msg, inputs_array, outputs_array, residuals_array,
self._prom2abs, self._abs2prom, self._abs2meta)
return case
def load_cases(self):
"""
Load all system cases into memory.
"""
with sqlite3.connect(self.filename) as con:
cur = con.cursor()
cur.execute("SELECT * FROM system_iterations")
rows = cur.fetchall()
for row in rows:
case = self._extract_case_from_row(row)
self._cases[case.iteration_coordinate] = case
def get_case(self, case_id):
"""
Get a case from the database.
Parameters
----------
case_id : int or str
The integer index or string-identifier of the case to be retrieved.
Returns
-------
An instance of a System Case populated with data from the
specified case/iteration.
"""
# check to see if we've already cached this case
iteration_coordinate = self.get_iteration_coordinate(case_id)
if iteration_coordinate in self._cases:
return self._cases[iteration_coordinate]
with sqlite3.connect(self.filename) as con:
cur = con.cursor()
cur.execute("SELECT * FROM system_iterations WHERE "
"iteration_coordinate=:iteration_coordinate",
{"iteration_coordinate": iteration_coordinate})
# Initialize the Case object from the iterations data
row = cur.fetchone()
con.close()
case = self._extract_case_from_row(row)
# save so we don't query again
self._cases[case.iteration_coordinate] = case
return case
class SolverCases(BaseCases):
"""
Case specific to the entries that might be recorded in a Solver iteration.
"""
def _extract_case_from_row(self, row):
"""
Pull data out of a queried SQLite row.
Parameters
----------
row : (id, counter, iter_coordinate, timestamp, success, msg, abs_err, rel_err,
inputs, outputs, residuals)
Queried SQLite solvers table row.
Returns
-------
SolverCase
Case for associated row.
"""
idx, counter, iteration_coordinate, timestamp, success, msg, abs_err, rel_err, \
input_text, output_text, residuals_text = row
if self.format_version >= 3:
input_array = json_to_np_array(input_text)
output_array = json_to_np_array(output_text)
residuals_array = json_to_np_array(residuals_text)
elif self.format_version in (1, 2):
input_array = blob_to_array(input_text)
output_array = blob_to_array(output_text)
residuals_array = blob_to_array(residuals_text)
case = SolverCase(self.filename, counter, iteration_coordinate, timestamp,
success, msg, abs_err, rel_err, input_array, output_array,
residuals_array, self._prom2abs, self._abs2prom, self._abs2meta)
return case
def load_cases(self):
"""
Load all solver cases into memory.
"""
with sqlite3.connect(self.filename) as con:
cur = con.cursor()
cur.execute("SELECT * FROM solver_iterations")
rows = cur.fetchall()
for row in rows:
case = self._extract_case_from_row(row)
self._cases[case.iteration_coordinate] = case
def get_case(self, case_id):
"""
Get a case from the database.
Parameters
----------
case_id : int or str
The integer index or string-identifier of the case to be retrieved.
Returns
-------
An instance of a solver Case populated with data from the
specified case/iteration.
"""
# check to see if we've already cached this case
iteration_coordinate = self.get_iteration_coordinate(case_id)
if iteration_coordinate in self._cases:
return self._cases[iteration_coordinate]
with sqlite3.connect(self.filename) as con:
cur = con.cursor()
cur.execute("SELECT * FROM solver_iterations WHERE "
"iteration_coordinate=:iteration_coordinate",
{"iteration_coordinate": iteration_coordinate})
# Initialize the Case object from the iterations data
row = cur.fetchone()
con.close()
case = self._extract_case_from_row(row)
# save so we don't query again
self._cases[iteration_coordinate] = case
return case
| [
"re.compile",
"sys.exc_info",
"copy.deepcopy",
"pickle.loads",
"numpy.linalg.norm",
"openmdao.recorders.case.PromotedToAbsoluteMap",
"openmdao.utils.record_util.json_to_np_array",
"openmdao.utils.write_outputs.write_outputs",
"os.path.exists",
"openmdao.recorders.case.SolverCase",
"openmdao.reco... | [((2585, 2610), 're.compile', 're.compile', (['"""\\\\|\\\\d+\\\\|*"""'], {}), "('\\\\|\\\\d+\\\\|*')\n", (2595, 2610), False, 'import re\n'), ((5436, 5511), 'openmdao.recorders.case.PromotedToAbsoluteMap', 'PromotedToAbsoluteMap', (['self._abs2meta', 'self._prom2abs', 'self._abs2prom', '(True)'], {}), '(self._abs2meta, self._prom2abs, self._abs2prom, True)\n', (5457, 5511), False, 'from openmdao.recorders.case import DriverCase, SystemCase, SolverCase, ProblemCase, PromotedToAbsoluteMap, DriverDerivativesCase\n'), ((5587, 5663), 'openmdao.recorders.case.PromotedToAbsoluteMap', 'PromotedToAbsoluteMap', (['self._abs2meta', 'self._prom2abs', 'self._abs2prom', '(False)'], {}), '(self._abs2meta, self._prom2abs, self._abs2prom, False)\n', (5608, 5663), False, 'from openmdao.recorders.case import DriverCase, SystemCase, SolverCase, ProblemCase, PromotedToAbsoluteMap, DriverDerivativesCase\n'), ((33505, 33518), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (33516, 33518), False, 'from collections import OrderedDict\n'), ((33736, 33861), 'openmdao.utils.write_outputs.write_outputs', 'write_outputs', (['in_or_out', 'comp_type', 'dict_of_outputs', 'hierarchical', 'print_arrays', 'out_stream', '"""model"""', 'allprocs_abs_names'], {}), "(in_or_out, comp_type, dict_of_outputs, hierarchical,\n print_arrays, out_stream, 'model', allprocs_abs_names)\n", (33749, 33861), False, 'from openmdao.utils.write_outputs import write_outputs\n'), ((35914, 36101), 'openmdao.recorders.case.DriverCase', 'DriverCase', (['self.filename', 'counter', 'iteration_coordinate', 'timestamp', 'success', 'msg', 'inputs_array', 'outputs_array', 'self._prom2abs', 'self._abs2prom', 'self._abs2meta', 'self._var_settings'], {}), '(self.filename, counter, iteration_coordinate, timestamp, success,\n msg, inputs_array, outputs_array, self._prom2abs, self._abs2prom, self.\n _abs2meta, self._var_settings)\n', (35924, 36101), False, 'from openmdao.recorders.case import DriverCase, SystemCase, SolverCase, ProblemCase, PromotedToAbsoluteMap, DriverDerivativesCase\n'), ((38890, 38916), 'openmdao.recorders.sqlite_recorder.blob_to_array', 'blob_to_array', (['totals_blob'], {}), '(totals_blob)\n', (38903, 38916), False, 'from openmdao.recorders.sqlite_recorder import blob_to_array, format_version\n'), ((38933, 39095), 'openmdao.recorders.case.DriverDerivativesCase', 'DriverDerivativesCase', (['self.filename', 'counter', 'iteration_coordinate', 'timestamp', 'success', 'msg', 'totals_array', 'self._prom2abs', 'self._abs2prom', 'self._abs2meta'], {}), '(self.filename, counter, iteration_coordinate,\n timestamp, success, msg, totals_array, self._prom2abs, self._abs2prom,\n self._abs2meta)\n', (38954, 39095), False, 'from openmdao.recorders.case import DriverCase, SystemCase, SolverCase, ProblemCase, PromotedToAbsoluteMap, DriverDerivativesCase\n'), ((41590, 41728), 'openmdao.recorders.case.ProblemCase', 'ProblemCase', (['self.filename', 'counter', 'case_name', 'timestamp', 'success', 'msg', 'outputs_array', 'self._prom2abs', 'self._abs2prom', 'self._abs2meta'], {}), '(self.filename, counter, case_name, timestamp, success, msg,\n outputs_array, self._prom2abs, self._abs2prom, self._abs2meta)\n', (41601, 41728), False, 'from openmdao.recorders.case import DriverCase, SystemCase, SolverCase, ProblemCase, PromotedToAbsoluteMap, DriverDerivativesCase\n'), ((44314, 44498), 'openmdao.recorders.case.SystemCase', 'SystemCase', (['self.filename', 'counter', 'iteration_coordinate', 'timestamp', 'success', 'msg', 'inputs_array', 'outputs_array', 'residuals_array', 'self._prom2abs', 'self._abs2prom', 'self._abs2meta'], {}), '(self.filename, counter, iteration_coordinate, timestamp, success,\n msg, inputs_array, outputs_array, residuals_array, self._prom2abs, self\n ._abs2prom, self._abs2meta)\n', (44324, 44498), False, 'from openmdao.recorders.case import DriverCase, SystemCase, SolverCase, ProblemCase, PromotedToAbsoluteMap, DriverDerivativesCase\n'), ((47298, 47498), 'openmdao.recorders.case.SolverCase', 'SolverCase', (['self.filename', 'counter', 'iteration_coordinate', 'timestamp', 'success', 'msg', 'abs_err', 'rel_err', 'input_array', 'output_array', 'residuals_array', 'self._prom2abs', 'self._abs2prom', 'self._abs2meta'], {}), '(self.filename, counter, iteration_coordinate, timestamp, success,\n msg, abs_err, rel_err, input_array, output_array, residuals_array, self\n ._prom2abs, self._abs2prom, self._abs2meta)\n', (47308, 47498), False, 'from openmdao.recorders.case import DriverCase, SystemCase, SolverCase, ProblemCase, PromotedToAbsoluteMap, DriverDerivativesCase\n'), ((2623, 2653), 'sqlite3.connect', 'sqlite3.connect', (['self.filename'], {}), '(self.filename)\n', (2638, 2653), False, 'import sqlite3\n'), ((7105, 7133), 'six.moves.range', 'range', (['(1)', '(format_version + 1)'], {}), '(1, format_version + 1)\n', (7110, 7133), False, 'from six.moves import range\n'), ((12830, 12860), 'sqlite3.connect', 'sqlite3.connect', (['self.filename'], {}), '(self.filename)\n', (12845, 12860), False, 'import sqlite3\n'), ((35655, 35684), 'openmdao.utils.record_util.json_to_np_array', 'json_to_np_array', (['inputs_text'], {}), '(inputs_text)\n', (35671, 35684), False, 'from openmdao.utils.record_util import is_valid_sqlite3_db, json_to_np_array, convert_to_np_array\n'), ((35713, 35743), 'openmdao.utils.record_util.json_to_np_array', 'json_to_np_array', (['outputs_text'], {}), '(outputs_text)\n', (35729, 35743), False, 'from openmdao.utils.record_util import is_valid_sqlite3_db, json_to_np_array, convert_to_np_array\n'), ((36272, 36302), 'sqlite3.connect', 'sqlite3.connect', (['self.filename'], {}), '(self.filename)\n', (36287, 36302), False, 'import sqlite3\n'), ((38178, 38192), 'copy.deepcopy', 'deepcopy', (['case'], {}), '(case)\n', (38186, 38192), False, 'from copy import deepcopy\n'), ((39289, 39319), 'sqlite3.connect', 'sqlite3.connect', (['self.filename'], {}), '(self.filename)\n', (39304, 39319), False, 'import sqlite3\n'), ((40227, 40257), 'sqlite3.connect', 'sqlite3.connect', (['self.filename'], {}), '(self.filename)\n', (40242, 40257), False, 'import sqlite3\n'), ((41443, 41473), 'openmdao.utils.record_util.json_to_np_array', 'json_to_np_array', (['outputs_text'], {}), '(outputs_text)\n', (41459, 41473), False, 'from openmdao.utils.record_util import is_valid_sqlite3_db, json_to_np_array, convert_to_np_array\n'), ((41907, 41937), 'sqlite3.connect', 'sqlite3.connect', (['self.filename'], {}), '(self.filename)\n', (41922, 41937), False, 'import sqlite3\n'), ((42728, 42758), 'sqlite3.connect', 'sqlite3.connect', (['self.filename'], {}), '(self.filename)\n', (42743, 42758), False, 'import sqlite3\n'), ((43932, 43961), 'openmdao.utils.record_util.json_to_np_array', 'json_to_np_array', (['inputs_text'], {}), '(inputs_text)\n', (43948, 43961), False, 'from openmdao.utils.record_util import is_valid_sqlite3_db, json_to_np_array, convert_to_np_array\n'), ((43990, 44020), 'openmdao.utils.record_util.json_to_np_array', 'json_to_np_array', (['outputs_text'], {}), '(outputs_text)\n', (44006, 44020), False, 'from openmdao.utils.record_util import is_valid_sqlite3_db, json_to_np_array, convert_to_np_array\n'), ((44051, 44083), 'openmdao.utils.record_util.json_to_np_array', 'json_to_np_array', (['residuals_text'], {}), '(residuals_text)\n', (44067, 44083), False, 'from openmdao.utils.record_util import is_valid_sqlite3_db, json_to_np_array, convert_to_np_array\n'), ((44669, 44699), 'sqlite3.connect', 'sqlite3.connect', (['self.filename'], {}), '(self.filename)\n', (44684, 44699), False, 'import sqlite3\n'), ((45606, 45636), 'sqlite3.connect', 'sqlite3.connect', (['self.filename'], {}), '(self.filename)\n', (45621, 45636), False, 'import sqlite3\n'), ((46923, 46951), 'openmdao.utils.record_util.json_to_np_array', 'json_to_np_array', (['input_text'], {}), '(input_text)\n', (46939, 46951), False, 'from openmdao.utils.record_util import is_valid_sqlite3_db, json_to_np_array, convert_to_np_array\n'), ((46979, 47008), 'openmdao.utils.record_util.json_to_np_array', 'json_to_np_array', (['output_text'], {}), '(output_text)\n', (46995, 47008), False, 'from openmdao.utils.record_util import is_valid_sqlite3_db, json_to_np_array, convert_to_np_array\n'), ((47039, 47071), 'openmdao.utils.record_util.json_to_np_array', 'json_to_np_array', (['residuals_text'], {}), '(residuals_text)\n', (47055, 47071), False, 'from openmdao.utils.record_util import is_valid_sqlite3_db, json_to_np_array, convert_to_np_array\n'), ((47669, 47699), 'sqlite3.connect', 'sqlite3.connect', (['self.filename'], {}), '(self.filename)\n', (47684, 47699), False, 'import sqlite3\n'), ((48606, 48636), 'sqlite3.connect', 'sqlite3.connect', (['self.filename'], {}), '(self.filename)\n', (48621, 48636), False, 'import sqlite3\n'), ((2224, 2253), 'openmdao.utils.record_util.is_valid_sqlite3_db', 'is_valid_sqlite3_db', (['filename'], {}), '(filename)\n', (2243, 2253), False, 'from openmdao.utils.record_util import is_valid_sqlite3_db, json_to_np_array, convert_to_np_array\n'), ((3536, 3554), 'json.loads', 'json.loads', (['row[4]'], {}), '(row[4])\n', (3546, 3554), False, 'import json\n'), ((3630, 3648), 'json.loads', 'json.loads', (['row[1]'], {}), '(row[1])\n', (3640, 3648), False, 'import json\n'), ((3682, 3700), 'json.loads', 'json.loads', (['row[2]'], {}), '(row[2])\n', (3692, 3700), False, 'import json\n'), ((3734, 3752), 'json.loads', 'json.loads', (['row[3]'], {}), '(row[3])\n', (3744, 3752), False, 'import json\n'), ((7152, 7182), 'sqlite3.connect', 'sqlite3.connect', (['self.filename'], {}), '(self.filename)\n', (7167, 7182), False, 'import sqlite3\n'), ((35815, 35841), 'openmdao.recorders.sqlite_recorder.blob_to_array', 'blob_to_array', (['inputs_text'], {}), '(inputs_text)\n', (35828, 35841), False, 'from openmdao.recorders.sqlite_recorder import blob_to_array, format_version\n'), ((35870, 35897), 'openmdao.recorders.sqlite_recorder.blob_to_array', 'blob_to_array', (['outputs_text'], {}), '(outputs_text)\n', (35883, 35897), False, 'from openmdao.recorders.sqlite_recorder import blob_to_array, format_version\n'), ((37414, 37444), 'sqlite3.connect', 'sqlite3.connect', (['self.filename'], {}), '(self.filename)\n', (37429, 37444), False, 'import sqlite3\n'), ((41546, 41573), 'openmdao.recorders.sqlite_recorder.blob_to_array', 'blob_to_array', (['outputs_text'], {}), '(outputs_text)\n', (41559, 41573), False, 'from openmdao.recorders.sqlite_recorder import blob_to_array, format_version\n'), ((44155, 44181), 'openmdao.recorders.sqlite_recorder.blob_to_array', 'blob_to_array', (['inputs_text'], {}), '(inputs_text)\n', (44168, 44181), False, 'from openmdao.recorders.sqlite_recorder import blob_to_array, format_version\n'), ((44210, 44237), 'openmdao.recorders.sqlite_recorder.blob_to_array', 'blob_to_array', (['outputs_text'], {}), '(outputs_text)\n', (44223, 44237), False, 'from openmdao.recorders.sqlite_recorder import blob_to_array, format_version\n'), ((44268, 44297), 'openmdao.recorders.sqlite_recorder.blob_to_array', 'blob_to_array', (['residuals_text'], {}), '(residuals_text)\n', (44281, 44297), False, 'from openmdao.recorders.sqlite_recorder import blob_to_array, format_version\n'), ((47142, 47167), 'openmdao.recorders.sqlite_recorder.blob_to_array', 'blob_to_array', (['input_text'], {}), '(input_text)\n', (47155, 47167), False, 'from openmdao.recorders.sqlite_recorder import blob_to_array, format_version\n'), ((47195, 47221), 'openmdao.recorders.sqlite_recorder.blob_to_array', 'blob_to_array', (['output_text'], {}), '(output_text)\n', (47208, 47221), False, 'from openmdao.recorders.sqlite_recorder import blob_to_array, format_version\n'), ((47252, 47281), 'openmdao.recorders.sqlite_recorder.blob_to_array', 'blob_to_array', (['residuals_text'], {}), '(residuals_text)\n', (47265, 47281), False, 'from openmdao.recorders.sqlite_recorder import blob_to_array, format_version\n'), ((2278, 2302), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (2292, 2302), False, 'import os\n'), ((3939, 3989), 'openmdao.utils.record_util.convert_to_np_array', 'convert_to_np_array', (["self._abs2meta[name]['lower']"], {}), "(self._abs2meta[name]['lower'])\n", (3958, 3989), False, 'from openmdao.utils.record_util import is_valid_sqlite3_db, json_to_np_array, convert_to_np_array\n'), ((4131, 4181), 'openmdao.utils.record_util.convert_to_np_array', 'convert_to_np_array', (["self._abs2meta[name]['upper']"], {}), "(self._abs2meta[name]['upper'])\n", (4150, 4181), False, 'from openmdao.utils.record_util import is_valid_sqlite3_db, json_to_np_array, convert_to_np_array\n'), ((9725, 9743), 'json.loads', 'json.loads', (['row[0]'], {}), '(row[0])\n', (9735, 9743), False, 'import json\n'), ((10548, 10568), 'pickle.loads', 'pickle.loads', (['row[1]'], {}), '(row[1])\n', (10560, 10568), False, 'import pickle\n'), ((10641, 10661), 'pickle.loads', 'pickle.loads', (['row[2]'], {}), '(row[2])\n', (10653, 10661), False, 'import pickle\n'), ((10983, 11003), 'pickle.loads', 'pickle.loads', (['row[1]'], {}), '(row[1])\n', (10995, 11003), False, 'import pickle\n'), ((24695, 24738), 'numpy.linalg.norm', 'np.linalg.norm', (["sys_vars[name]['residuals']"], {}), "(sys_vars[name]['residuals'])\n", (24709, 24738), True, 'import numpy as np\n'), ((4630, 4650), 'pickle.loads', 'pickle.loads', (['row[1]'], {}), '(row[1])\n', (4642, 4650), False, 'import pickle\n'), ((4724, 4744), 'pickle.loads', 'pickle.loads', (['row[2]'], {}), '(row[2])\n', (4736, 4744), False, 'import pickle\n'), ((4818, 4838), 'pickle.loads', 'pickle.loads', (['row[3]'], {}), '(row[3])\n', (4830, 4838), False, 'import pickle\n'), ((9992, 10012), 'pickle.loads', 'pickle.loads', (['row[0]'], {}), '(row[0])\n', (10004, 10012), False, 'import pickle\n'), ((8209, 8223), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8221, 8223), False, 'import sys\n'), ((9393, 9407), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (9405, 9407), False, 'import sys\n')] |
"""Hello nematic droplet example."""
import dtmm
import numpy as np
#: pixel size in nm
PIXELSIZE = 200
#: compute box dimensions
NLAYERS, HEIGHT, WIDTH = 60, 96,96
#: illumination wavelengths in nm
WAVELENGTHS = np.linspace(380,780,9)
#: create some experimental data (stack)
optical_data = [dtmm.nematic_droplet_data((NLAYERS, HEIGHT, WIDTH),
radius = 30, profile = "r", no = 1.5, ne = 1.6, nhost = 1.5)]
#: create non-polarized input light
field_data_in = dtmm.illumination_data((HEIGHT, WIDTH), WAVELENGTHS,
pixelsize = PIXELSIZE)
#: transfer input light through stack
field_data_out = dtmm.transfer_field(field_data_in, optical_data, diffraction = 0, betamax = 1)
#: visualize output field
# no diffraction to simulate output of standard jones method
viewer1 = dtmm.field_viewer(field_data_out, diffraction = False)
viewer1.set_parameters(sample = 0, intensity = 2,
polarizer = 0, analyzer = 90)
fig,ax = viewer1.plot()
ax.set_title("diffraction = 0")
field_data_out = dtmm.transfer_field(field_data_in, optical_data, diffraction = 1, betamax = 1)
viewer2 = dtmm.field_viewer(field_data_out)
viewer2.set_parameters(sample = 0, intensity = 2,
polarizer = 0, focus = -14, analyzer = 90)
fig,ax = viewer2.plot()
ax.set_title("diffraction = 1")
import os
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
#limited memory on RTD server requires split diffraction calculation
SPLITDIFF = True
else:
SPLITDIFF = False
field_data_out = dtmm.transfer_field(field_data_in, optical_data, diffraction = 5, betamax = 1, split_diffraction = SPLITDIFF)
viewer3 = dtmm.field_viewer(field_data_out)
viewer3.set_parameters(sample = 0, intensity = 2,
polarizer = 0, focus = -14, analyzer = 90)
fig,ax = viewer3.plot()
ax.set_title("diffraction = 5")
| [
"dtmm.illumination_data",
"dtmm.field_viewer",
"os.environ.get",
"numpy.linspace",
"dtmm.nematic_droplet_data",
"dtmm.transfer_field"
] | [((215, 239), 'numpy.linspace', 'np.linspace', (['(380)', '(780)', '(9)'], {}), '(380, 780, 9)\n', (226, 239), True, 'import numpy as np\n'), ((473, 546), 'dtmm.illumination_data', 'dtmm.illumination_data', (['(HEIGHT, WIDTH)', 'WAVELENGTHS'], {'pixelsize': 'PIXELSIZE'}), '((HEIGHT, WIDTH), WAVELENGTHS, pixelsize=PIXELSIZE)\n', (495, 546), False, 'import dtmm\n'), ((650, 724), 'dtmm.transfer_field', 'dtmm.transfer_field', (['field_data_in', 'optical_data'], {'diffraction': '(0)', 'betamax': '(1)'}), '(field_data_in, optical_data, diffraction=0, betamax=1)\n', (669, 724), False, 'import dtmm\n'), ((827, 879), 'dtmm.field_viewer', 'dtmm.field_viewer', (['field_data_out'], {'diffraction': '(False)'}), '(field_data_out, diffraction=False)\n', (844, 879), False, 'import dtmm\n'), ((1054, 1128), 'dtmm.transfer_field', 'dtmm.transfer_field', (['field_data_in', 'optical_data'], {'diffraction': '(1)', 'betamax': '(1)'}), '(field_data_in, optical_data, diffraction=1, betamax=1)\n', (1073, 1128), False, 'import dtmm\n'), ((1146, 1179), 'dtmm.field_viewer', 'dtmm.field_viewer', (['field_data_out'], {}), '(field_data_out)\n', (1163, 1179), False, 'import dtmm\n'), ((1557, 1664), 'dtmm.transfer_field', 'dtmm.transfer_field', (['field_data_in', 'optical_data'], {'diffraction': '(5)', 'betamax': '(1)', 'split_diffraction': 'SPLITDIFF'}), '(field_data_in, optical_data, diffraction=5, betamax=1,\n split_diffraction=SPLITDIFF)\n', (1576, 1664), False, 'import dtmm\n'), ((1679, 1712), 'dtmm.field_viewer', 'dtmm.field_viewer', (['field_data_out'], {}), '(field_data_out)\n', (1696, 1712), False, 'import dtmm\n'), ((295, 401), 'dtmm.nematic_droplet_data', 'dtmm.nematic_droplet_data', (['(NLAYERS, HEIGHT, WIDTH)'], {'radius': '(30)', 'profile': '"""r"""', 'no': '(1.5)', 'ne': '(1.6)', 'nhost': '(1.5)'}), "((NLAYERS, HEIGHT, WIDTH), radius=30, profile='r',\n no=1.5, ne=1.6, nhost=1.5)\n", (320, 401), False, 'import dtmm\n'), ((1366, 1395), 'os.environ.get', 'os.environ.get', (['"""READTHEDOCS"""'], {}), "('READTHEDOCS')\n", (1380, 1395), False, 'import os\n')] |
import sys
import math
import numpy as np
try:
from scipy.spatial import cKDTree as kd_tree
except ImportError:
from scipy.spatial import cKDTree as kd_tree
import maya.OpenMaya as om
import logging_util
# import progress_bar
class GeoCache(object):
"""
container for cached triangulated geometry
note: no extra type checking or error handling is done!
"""
def __init__(self):
log_lvl = sys._global_spore_dispatcher.spore_globals['LOG_LEVEL']
self.logger = logging_util.SporeLogger(__name__, log_lvl)
self.p0 = om.MPointArray()
self.p1 = om.MPointArray()
self.p2 = om.MPointArray()
self.normals = om.MVectorArray()
self.poly_id = om.MIntArray()
self.AB = om.MVectorArray()
self.AC = om.MVectorArray()
self.poly_verts = om.MPointArray()
self.uv_kd_tree = None
self.neighbor_lookup = {}
self.mesh = None
self.cached = True
self.weighted_ids = []
# @progress_bar.ProgressBar('Caching Geometry...')
def cache_geometry(self, mesh):
""" cache the given geometry
:param mesh: the mesh which will be cached
:type mesh: MDagPath to the mesh """
self.flush_cache()
self.mesh = mesh
self.logger.debug('Cache geometry: {}'.format(mesh.fullPathName())) # TODO - get node name
# in_mesh = node_utils.get_connected_in_mesh(self.thisMObject(), False)
mesh_fn = om.MFnMesh(self.mesh)
# num_polys = mesh_fn.numPolygons() # TODO - get in mesh fn
# num_iter = num_polys / 100
# store ferts for validating the cache later
mesh_fn.getPoints(self.poly_verts)
# get bb in world space
dag_fn = om.MFnDagNode(self.mesh)
bb = dag_fn.boundingBox()
inv_matrix = self.mesh.exclusiveMatrix()
bb.transformUsing(inv_matrix)
# initialize triangle data
tri_points = om.MPointArray()
vert_ids = om.MIntArray()
tris_area = []
smallest_tri = None
# iter mesh
poly_iter = om.MItMeshPolygon(self.mesh)
while not poly_iter.isDone():
# get face triangles
poly_index = poly_iter.index()
poly_iter.getTriangles(tri_points, vert_ids, om.MSpace.kWorld)
# get triangle data
for i in xrange(tri_points.length() / 3):
p0 = tri_points[i * 3]
p1 = tri_points[i * 3 + 1]
p2 = tri_points[i * 3 + 2]
area, AB, AC, normal = self.get_triangle_area(p0, p1, p2)
if area < smallest_tri or smallest_tri is None:
smallest_tri = area
tris_area.append(area)
self.cache = (p0, p1, p2, normal, poly_index, AB, AC)
# update progressbar
# if poly_index >= num_iter:
# self.cache_geometry.increment()
# num_iter += num_polys / 100
poly_iter.next()
probability = [int(math.ceil(area / smallest_tri)) for area in tris_area]
[self.weighted_ids.extend([idx] * chance) for idx, chance in enumerate(probability)]
self.cached = True
def get_triangle_area(self, p0, p1, p2):
"""
return size of a triangle and the vector p1-p0 and p2-p0
:param p0: MPoint 1
:param p1: MPoint 2
:param p2: MPoint 3
:return: triangle area, vector AB, vector AC, and the normalized triangle normal
"""
AB = om.MVector(p1 - p0)
AC = om.MVector(p2 - p0)
normal = (AB ^ AC)
# actually the real surface area is area/2
# but since all tris are handled the same way it does not make any difference
# hence I can save computation by omitting area/2
area = math.sqrt(normal[0] ** 2 + normal[1] ** 2 + normal[2] ** 2)
normal.normalize()
return area, AB, AC, normal
def create_uv_lookup(self):
""" create a dict with an entry for every vertex and a list of
neighbouring faces as well as a kd tree tro look up close face ids """
self.logger.debug('Create UV lookup for the current GeoCache')
util = om.MScriptUtil()
connected_faces = om.MIntArray()
mesh_fn = om.MFnMesh(self.mesh)
num_verts = mesh_fn.numVertices()
points = np.zeros(shape=(num_verts, 2))
vert_iter = om.MItMeshVertex(self.mesh)
while not vert_iter.isDone():
index = vert_iter.index()
vert_iter.getConnectedFaces(connected_faces)
self.neighbor_lookup[index] = [connected_faces[i] for i in xrange(connected_faces.length())]
util.createFromDouble(0.0, 0.0)
uv_ptr = util.asFloat2Ptr()
vert_iter.getUV(uv_ptr)
u_coord = util.getFloat2ArrayItem(uv_ptr, 0, 0)
v_coord = util.getFloat2ArrayItem(uv_ptr, 0, 1)
points[index] = (u_coord, v_coord)
vert_iter.next()
self.uv_kd_tree = kd_tree(points)
def get_close_face_ids(self, u_coord, v_coord):
""" get a list of neighbour face ids to the give u and v coords """
distance, index = self.uv_kd_tree.query((u_coord, v_coord), 1)
return self.neighbor_lookup[index]
def validate_cache(self):
""" check if the current cache is valid """
points = om.MPointArray()
mesh_fn = om.MFnMesh(self.mesh)
mesh_fn.getPoints(points)
if points.length() != self.poly_verts.length():
self.logger.debug('Validate GeoCache succeded')
return False
for i in xrange(points.length()):
if points[i] != self.poly_verts[i]:
self.logger.debug('Validate GeoCache failed')
return False
return True
"""
index = 0
tri_points = om.MPointArray()
tri_ids = om.MIntArray()
poly_iter = om.MItMeshPolygon(self.mesh)
while not poly_iter.isDone():
# get face triangles
poly_index = poly_iter.index()
poly_iter.getTriangles(tri_points, tri_ids, om.MSpace.kWorld)
# get triangle data
for i in xrange(tri_points.length() / 3):
# assert self.p0[i * 3] == tri_points[i * 3]
# assert self.p1[i * 3 + 1] == tri_points[i * 3 + 1]
# assert self.p2[i * 3 + 2] == tri_points[i * 3 + 2]
print self.p0[i*3].x, tri_points[i*3].x
print self.p0[i*3].y, tri_points[i*3].y
print self.p0[i*3].z, tri_points[i*3].z
print '-'
print self.p0[i*3+1].x, tri_points[i*3+1].x
print self.p0[i*3+1].y, tri_points[i*3+1].y
print self.p0[i*3+1].z, tri_points[i*3+1].z
print '-'
print self.p0[i*3+2].x, tri_points[i*3+2].x
print self.p0[i*3+2].y, tri_points[i*3+2].y
print self.p0[i*3+2].z, tri_points[i*3+2].z
# except AssertionError:
# return False
index += 1
poly_iter.next()
return True
"""
################################################################################################
# cache property
################################################################################################
@property
def cache(self):
""" cache getter
:return: tuple of entire geo cache:
id content data type
0 - p0 - MPointArray
1 - p2 - MPointArray
2 - p1 - MPointArray
3 - face normal - MVectorArray
4 - polygon id - MIntArray
5 - vector AB - MVectorArray
6 - vector AC - MvectorArray
"""
return self.p0,\
self.p1,\
self.p2,\
self.normals,\
self.poly_id,\
self.AB,\
self.AC
@cache.setter
def cache(self, triangle):
""" cache setter
append one triangle to the end of the current cache
:param triangle: argument must be of type tuple or list
it must consist of the following items in the exact same order:
id content data type
0 - p0 - MPointArray
1 - p2 - MPointArray
2 - p1 - MPointArray
3 - face normal - MVectorArray
4 - polygon id - MIntArray
5 - vector AB - MVectorArray
6 - vector AC - MvectorArray
note: no error or type checking is done!
"""
self.p0.append(triangle[0])
self.p1.append(triangle[1])
self.p2.append(triangle[2])
self.normals.append(triangle[3])
self.poly_id.append(int(triangle[4]))
self.AB.append(triangle[5])
self.AC.append(triangle[6])
def flush_cache(self):
self.logger.debug('Flush GeoCache')
self.p0 = om.MPointArray()
self.p1 = om.MPointArray()
self.p2 = om.MPointArray()
self.normals = om.MVectorArray()
self.poly_id = om.MIntArray()
self.AB = om.MVectorArray()
self.AC = om.MVectorArray()
self.cached = False
def __len__(self):
return p0.length()
| [
"maya.OpenMaya.MVectorArray",
"math.ceil",
"scipy.spatial.cKDTree",
"logging_util.SporeLogger",
"maya.OpenMaya.MScriptUtil",
"math.sqrt",
"maya.OpenMaya.MIntArray",
"maya.OpenMaya.MFnDagNode",
"numpy.zeros",
"maya.OpenMaya.MFnMesh",
"maya.OpenMaya.MItMeshPolygon",
"maya.OpenMaya.MPointArray",
... | [((509, 552), 'logging_util.SporeLogger', 'logging_util.SporeLogger', (['__name__', 'log_lvl'], {}), '(__name__, log_lvl)\n', (533, 552), False, 'import logging_util\n'), ((572, 588), 'maya.OpenMaya.MPointArray', 'om.MPointArray', ([], {}), '()\n', (586, 588), True, 'import maya.OpenMaya as om\n'), ((607, 623), 'maya.OpenMaya.MPointArray', 'om.MPointArray', ([], {}), '()\n', (621, 623), True, 'import maya.OpenMaya as om\n'), ((642, 658), 'maya.OpenMaya.MPointArray', 'om.MPointArray', ([], {}), '()\n', (656, 658), True, 'import maya.OpenMaya as om\n'), ((682, 699), 'maya.OpenMaya.MVectorArray', 'om.MVectorArray', ([], {}), '()\n', (697, 699), True, 'import maya.OpenMaya as om\n'), ((723, 737), 'maya.OpenMaya.MIntArray', 'om.MIntArray', ([], {}), '()\n', (735, 737), True, 'import maya.OpenMaya as om\n'), ((756, 773), 'maya.OpenMaya.MVectorArray', 'om.MVectorArray', ([], {}), '()\n', (771, 773), True, 'import maya.OpenMaya as om\n'), ((792, 809), 'maya.OpenMaya.MVectorArray', 'om.MVectorArray', ([], {}), '()\n', (807, 809), True, 'import maya.OpenMaya as om\n'), ((837, 853), 'maya.OpenMaya.MPointArray', 'om.MPointArray', ([], {}), '()\n', (851, 853), True, 'import maya.OpenMaya as om\n'), ((1483, 1504), 'maya.OpenMaya.MFnMesh', 'om.MFnMesh', (['self.mesh'], {}), '(self.mesh)\n', (1493, 1504), True, 'import maya.OpenMaya as om\n'), ((1759, 1783), 'maya.OpenMaya.MFnDagNode', 'om.MFnDagNode', (['self.mesh'], {}), '(self.mesh)\n', (1772, 1783), True, 'import maya.OpenMaya as om\n'), ((1962, 1978), 'maya.OpenMaya.MPointArray', 'om.MPointArray', ([], {}), '()\n', (1976, 1978), True, 'import maya.OpenMaya as om\n'), ((1998, 2012), 'maya.OpenMaya.MIntArray', 'om.MIntArray', ([], {}), '()\n', (2010, 2012), True, 'import maya.OpenMaya as om\n'), ((2105, 2133), 'maya.OpenMaya.MItMeshPolygon', 'om.MItMeshPolygon', (['self.mesh'], {}), '(self.mesh)\n', (2122, 2133), True, 'import maya.OpenMaya as om\n'), ((3555, 3574), 'maya.OpenMaya.MVector', 'om.MVector', (['(p1 - p0)'], {}), '(p1 - p0)\n', (3565, 3574), True, 'import maya.OpenMaya as om\n'), ((3588, 3607), 'maya.OpenMaya.MVector', 'om.MVector', (['(p2 - p0)'], {}), '(p2 - p0)\n', (3598, 3607), True, 'import maya.OpenMaya as om\n'), ((3847, 3906), 'math.sqrt', 'math.sqrt', (['(normal[0] ** 2 + normal[1] ** 2 + normal[2] ** 2)'], {}), '(normal[0] ** 2 + normal[1] ** 2 + normal[2] ** 2)\n', (3856, 3906), False, 'import math\n'), ((4243, 4259), 'maya.OpenMaya.MScriptUtil', 'om.MScriptUtil', ([], {}), '()\n', (4257, 4259), True, 'import maya.OpenMaya as om\n'), ((4286, 4300), 'maya.OpenMaya.MIntArray', 'om.MIntArray', ([], {}), '()\n', (4298, 4300), True, 'import maya.OpenMaya as om\n'), ((4320, 4341), 'maya.OpenMaya.MFnMesh', 'om.MFnMesh', (['self.mesh'], {}), '(self.mesh)\n', (4330, 4341), True, 'import maya.OpenMaya as om\n'), ((4401, 4431), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_verts, 2)'}), '(shape=(num_verts, 2))\n', (4409, 4431), True, 'import numpy as np\n'), ((4453, 4480), 'maya.OpenMaya.MItMeshVertex', 'om.MItMeshVertex', (['self.mesh'], {}), '(self.mesh)\n', (4469, 4480), True, 'import maya.OpenMaya as om\n'), ((5065, 5080), 'scipy.spatial.cKDTree', 'kd_tree', (['points'], {}), '(points)\n', (5072, 5080), True, 'from scipy.spatial import cKDTree as kd_tree\n'), ((5427, 5443), 'maya.OpenMaya.MPointArray', 'om.MPointArray', ([], {}), '()\n', (5441, 5443), True, 'import maya.OpenMaya as om\n'), ((5462, 5483), 'maya.OpenMaya.MFnMesh', 'om.MFnMesh', (['self.mesh'], {}), '(self.mesh)\n', (5472, 5483), True, 'import maya.OpenMaya as om\n'), ((9142, 9158), 'maya.OpenMaya.MPointArray', 'om.MPointArray', ([], {}), '()\n', (9156, 9158), True, 'import maya.OpenMaya as om\n'), ((9177, 9193), 'maya.OpenMaya.MPointArray', 'om.MPointArray', ([], {}), '()\n', (9191, 9193), True, 'import maya.OpenMaya as om\n'), ((9212, 9228), 'maya.OpenMaya.MPointArray', 'om.MPointArray', ([], {}), '()\n', (9226, 9228), True, 'import maya.OpenMaya as om\n'), ((9252, 9269), 'maya.OpenMaya.MVectorArray', 'om.MVectorArray', ([], {}), '()\n', (9267, 9269), True, 'import maya.OpenMaya as om\n'), ((9293, 9307), 'maya.OpenMaya.MIntArray', 'om.MIntArray', ([], {}), '()\n', (9305, 9307), True, 'import maya.OpenMaya as om\n'), ((9326, 9343), 'maya.OpenMaya.MVectorArray', 'om.MVectorArray', ([], {}), '()\n', (9341, 9343), True, 'import maya.OpenMaya as om\n'), ((9362, 9379), 'maya.OpenMaya.MVectorArray', 'om.MVectorArray', ([], {}), '()\n', (9377, 9379), True, 'import maya.OpenMaya as om\n'), ((3057, 3087), 'math.ceil', 'math.ceil', (['(area / smallest_tri)'], {}), '(area / smallest_tri)\n', (3066, 3087), False, 'import math\n')] |
import numpy as np
import torch
from torch.utils.data import Dataset
from tqdm import trange
import os
from pycocotools.coco import COCO
from pycocotools import mask
from torchvision import transforms
from dataloaders import custom_transforms as tr
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from dataloaders.datasets.coco import COCOSegmentationSampleLoader
class RGBDSegmentation(Dataset):
NUM_CLASSES = 38
CAT_LIST = list(range(38))
def __init__(self,
cfg,
split='train'):
super().__init__()
base_dir = cfg.DATASET.ROOT
ann_file = os.path.join(base_dir, 'annotations/instances_{}.json'.format(split))
ids_file = os.path.join(base_dir, 'annotations/{}_ids.pth'.format(split))
self.img_dir = os.path.join(base_dir, 'images')
self.depth_dir = self.img_dir
self.split = split
self.coco = COCO(ann_file)
self.mode = cfg.DATASET.MODE
class_names = [self.coco.cats[i]['name'] for i in self.CAT_LIST]
self.loader = RGBDSegmentationSampleLoader(cfg, self.coco, split, self.CAT_LIST, class_names)
if os.path.exists(ids_file):
self.ids = torch.load(ids_file)
else:
ids = list(self.coco.imgs.keys())
self.ids = self._preprocess(ids, ids_file)
self.coco_id_index = dict(zip(self.ids, range(len(self.ids))))
self.cfg = cfg
def __getitem__(self, index):
img_path, depth_path, img_id = self.get_path(index)
sample = self.loader.load_sample(img_path, depth_path, img_id)
sample['id'] = img_id
return sample
def get_path(self, index):
coco = self.coco
img_id = self.ids[index]
img_metadata = coco.loadImgs(img_id)[0]
path = img_metadata['file_name']
img_path = os.path.join(self.img_dir, path)
if self.mode == "RGBD":
depth_path = os.path.join(self.depth_dir, img_metadata['depth_file_name'])
elif self.mode == 'RGB_HHA':
depth_path = os.path.join(self.depth_dir, path)
return img_path, depth_path, img_id
def __len__(self):
return len(self.ids)
def _preprocess(self, ids, ids_file):
print("Preprocessing mask, this will take a while. " + \
"But don't worry, it only run once for each split.")
tbar = trange(len(ids))
new_ids = []
for i in tbar:
img_id = ids[i]
cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
img_metadata = self.coco.loadImgs(img_id)[0]
mask = self.loader.gen_seg_mask(cocotarget, img_metadata['height'],
img_metadata['width'])
# more than 1k pixels
if (mask > 0).sum() > 1000:
new_ids.append(img_id)
tbar.set_description('Doing: {}/{}, got {} qualified images'. \
format(i, len(ids), len(new_ids)))
print('Found number of qualified images: ', len(new_ids))
torch.save(new_ids, ids_file)
return new_ids
class RGBDSegmentationSampleLoader(COCOSegmentationSampleLoader):
def normalizationFactors(self):
if self.mode == "RGBD":
print('Using RGB-D input')
# Data mean and std empirically determined from 1000 SUNRGBD samples
self.data_mean = [0.517, 0.511, 0.485, 0.206]
self.data_std = [0.269, 0.281, 0.288, 0.159]
elif self.mode == "RGB":
print('Using RGB input')
self.data_mean = [0.517, 0.511, 0.485]
self.data_std = [0.269, 0.281, 0.288]
elif self.mode == "RGB_HHA":
raise NotImplementedError("HHA normalization factors not implemented for SUNRGBD")
def gen_seg_mask(self, target, h, w):
mask = np.zeros((h, w), dtype=np.uint8)
coco_mask = self.coco_mask
for instance in target:
rle = instance['segmentation']
m = coco_mask.decode(rle)
cat = instance['category_id']
if cat in self.CAT_LIST:
c = self.CAT_LIST.index(cat)
else:
continue
if len(m.shape) < 3:
mask[:, :] += (mask == 0) * (m * c)
else:
mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8)
return mask
def loadDepth(self, depth_path):
if self.mode == 'RGBD':
_depth_arr = np.asarray(Image.open(depth_path), dtype='uint16')
# Conversion from SUNRGBD Toolbox readData/read3dPoints.m
_depth_arr = np.bitwise_or(np.right_shift(_depth_arr, 3), np.left_shift(_depth_arr, 16 - 3))
_depth_arr = np.asarray(_depth_arr, dtype='float') / 1000.0
_depth_arr[_depth_arr > 8] = 8
_depth_arr = _depth_arr / 8. * 255.
_depth_arr = _depth_arr.astype(np.uint8)
_depth = Image.fromarray(_depth_arr).convert('L')
elif self.mode == 'RGB_HHA':
_depth = Image.open(depth_path).convert('RGB')
return _depth
if __name__ == "__main__":
from dataloaders.config.defaults import get_cfg_defaults
from dataloaders import custom_transforms as tr
from dataloaders.utils import decode_segmap
from torch.utils.data import DataLoader
from torchvision import transforms
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(description="Test SUNRGBD Loader")
parser.add_argument('config_file', help='config file path')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg = get_cfg_defaults()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
print(cfg)
coco_val = RGBDSegmentation(cfg, split='val')
dataloader = DataLoader(coco_val, batch_size=4, shuffle=True, num_workers=0)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
img = sample['image'].numpy()
gt = sample['label'].numpy()
tmp = np.array(gt[jj]).astype(np.uint8)
segmap = decode_segmap(tmp, dataset='coco')
img_tmp = np.transpose(img[jj], axes=[1, 2, 0])
img_tmp *= coco_val.loader.data_std
img_tmp += coco_val.loader.data_mean
img_tmp *= 255.0
img_tmp = img_tmp.astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(311)
plt.imshow(img_tmp[:,:,:3])
plt.subplot(312)
plt.imshow(segmap)
plt.subplot(313)
plt.imshow(img_tmp[:,:,3])
if ii == 1:
break
plt.show(block=True)
| [
"numpy.array",
"numpy.right_shift",
"matplotlib.pyplot.imshow",
"os.path.exists",
"argparse.ArgumentParser",
"pycocotools.coco.COCO",
"numpy.asarray",
"numpy.left_shift",
"dataloaders.config.defaults.get_cfg_defaults",
"torch.save",
"matplotlib.pyplot.title",
"numpy.transpose",
"matplotlib.p... | [((5511, 5569), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test SUNRGBD Loader"""'}), "(description='Test SUNRGBD Loader')\n", (5534, 5569), False, 'import argparse\n'), ((5841, 5859), 'dataloaders.config.defaults.get_cfg_defaults', 'get_cfg_defaults', ([], {}), '()\n', (5857, 5859), False, 'from dataloaders.config.defaults import get_cfg_defaults\n'), ((6038, 6101), 'torch.utils.data.DataLoader', 'DataLoader', (['coco_val'], {'batch_size': '(4)', 'shuffle': '(True)', 'num_workers': '(0)'}), '(coco_val, batch_size=4, shuffle=True, num_workers=0)\n', (6048, 6101), False, 'from torch.utils.data import DataLoader\n'), ((6923, 6943), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (6931, 6943), True, 'import matplotlib.pyplot as plt\n'), ((811, 843), 'os.path.join', 'os.path.join', (['base_dir', '"""images"""'], {}), "(base_dir, 'images')\n", (823, 843), False, 'import os\n'), ((929, 943), 'pycocotools.coco.COCO', 'COCO', (['ann_file'], {}), '(ann_file)\n', (933, 943), False, 'from pycocotools.coco import COCO\n'), ((1170, 1194), 'os.path.exists', 'os.path.exists', (['ids_file'], {}), '(ids_file)\n', (1184, 1194), False, 'import os\n'), ((1866, 1898), 'os.path.join', 'os.path.join', (['self.img_dir', 'path'], {}), '(self.img_dir, path)\n', (1878, 1898), False, 'import os\n'), ((3102, 3131), 'torch.save', 'torch.save', (['new_ids', 'ids_file'], {}), '(new_ids, ids_file)\n', (3112, 3131), False, 'import torch\n'), ((3888, 3920), 'numpy.zeros', 'np.zeros', (['(h, w)'], {'dtype': 'np.uint8'}), '((h, w), dtype=np.uint8)\n', (3896, 3920), True, 'import numpy as np\n'), ((1219, 1239), 'torch.load', 'torch.load', (['ids_file'], {}), '(ids_file)\n', (1229, 1239), False, 'import torch\n'), ((1957, 2018), 'os.path.join', 'os.path.join', (['self.depth_dir', "img_metadata['depth_file_name']"], {}), "(self.depth_dir, img_metadata['depth_file_name'])\n", (1969, 2018), False, 'import os\n'), ((6356, 6390), 'dataloaders.utils.decode_segmap', 'decode_segmap', (['tmp'], {'dataset': '"""coco"""'}), "(tmp, dataset='coco')\n", (6369, 6390), False, 'from dataloaders.utils import decode_segmap\n'), ((6413, 6450), 'numpy.transpose', 'np.transpose', (['img[jj]'], {'axes': '[1, 2, 0]'}), '(img[jj], axes=[1, 2, 0])\n', (6425, 6450), True, 'import numpy as np\n'), ((6636, 6648), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6646, 6648), True, 'import matplotlib.pyplot as plt\n'), ((6661, 6681), 'matplotlib.pyplot.title', 'plt.title', (['"""display"""'], {}), "('display')\n", (6670, 6681), True, 'import matplotlib.pyplot as plt\n'), ((6694, 6710), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (6705, 6710), True, 'import matplotlib.pyplot as plt\n'), ((6723, 6752), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_tmp[:, :, :3]'], {}), '(img_tmp[:, :, :3])\n', (6733, 6752), True, 'import matplotlib.pyplot as plt\n'), ((6763, 6779), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (6774, 6779), True, 'import matplotlib.pyplot as plt\n'), ((6792, 6810), 'matplotlib.pyplot.imshow', 'plt.imshow', (['segmap'], {}), '(segmap)\n', (6802, 6810), True, 'import matplotlib.pyplot as plt\n'), ((6823, 6839), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (6834, 6839), True, 'import matplotlib.pyplot as plt\n'), ((6852, 6880), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_tmp[:, :, 3]'], {}), '(img_tmp[:, :, 3])\n', (6862, 6880), True, 'import matplotlib.pyplot as plt\n'), ((2082, 2116), 'os.path.join', 'os.path.join', (['self.depth_dir', 'path'], {}), '(self.depth_dir, path)\n', (2094, 2116), False, 'import os\n'), ((4558, 4580), 'PIL.Image.open', 'Image.open', (['depth_path'], {}), '(depth_path)\n', (4568, 4580), False, 'from PIL import Image, ImageFile\n'), ((4707, 4736), 'numpy.right_shift', 'np.right_shift', (['_depth_arr', '(3)'], {}), '(_depth_arr, 3)\n', (4721, 4736), True, 'import numpy as np\n'), ((4738, 4771), 'numpy.left_shift', 'np.left_shift', (['_depth_arr', '(16 - 3)'], {}), '(_depth_arr, 16 - 3)\n', (4751, 4771), True, 'import numpy as np\n'), ((4798, 4835), 'numpy.asarray', 'np.asarray', (['_depth_arr'], {'dtype': '"""float"""'}), "(_depth_arr, dtype='float')\n", (4808, 4835), True, 'import numpy as np\n'), ((5010, 5037), 'PIL.Image.fromarray', 'Image.fromarray', (['_depth_arr'], {}), '(_depth_arr)\n', (5025, 5037), False, 'from PIL import Image, ImageFile\n'), ((6301, 6317), 'numpy.array', 'np.array', (['gt[jj]'], {}), '(gt[jj])\n', (6309, 6317), True, 'import numpy as np\n'), ((5109, 5131), 'PIL.Image.open', 'Image.open', (['depth_path'], {}), '(depth_path)\n', (5119, 5131), False, 'from PIL import Image, ImageFile\n'), ((4386, 4403), 'numpy.sum', 'np.sum', (['m'], {'axis': '(2)'}), '(m, axis=2)\n', (4392, 4403), True, 'import numpy as np\n')] |
import sys
import os
from datetime import datetime, timedelta
import numpy as np
import xarray as xr
path = str(sys.argv[1])
name = str(sys.argv[2])
level = str(sys.argv[3])
member = int(sys.argv[4])
path_wrfref = os.getenv("PATH_WRFREF")
f = xr.open_dataset(path).squeeze()
initialization = datetime.strptime(f.initialization, "%Y-%m-%d %H:%M:%S")
forecast_hour = int(f.forecast_hour)
valid = initialization + timedelta(hours=forecast_hour)
domain = int(f.domain)
ref = xr.open_dataset("{}/wrfoutREFd0{}".format(path_wrfref, domain)).squeeze()
if level == "Surface":
sel = {"member": member}
else:
sel = {"member": member, "pressure": int(level)}
# Extract the forecast field from the dataset, convert to *DOUBLE* floating point
# precision (float64) as required by MET, and round to avoid adding random noise.
try:
fcst_field = np.asarray(f[name].sel(sel), dtype=float).round(5)
met_data = np.flip(fcst_field, axis=0).copy()
except KeyError as err:
sys.stderr.write("{}: KeyError: {}".format(sys.argv[0], err))
sys.exit(1)
# =====
# Create attributes dictionary as specified in MET user's guide under Python embedding
# =====
try:
xlat = ref.variables['XLAT'].data
except KeyError:
sys.stderr.write("{}: KeyError: {}".format(sys.argv[0], varkey))
sys.exit(1)
try:
xlong = ref.variables['XLONG'].data
except KeyError:
sys.stderr.write("{}: KeyError: {}".format(sys.argv[0], varkey))
sys.exit(1)
grid_attrs = {
'type': 'Lambert Conformal',
'hemisphere': 'N',
'name': 'TTU WRF',
'lat_pin': float(xlat[0, 0]),
'lon_pin': float(xlong[0, 0]),
'x_pin': 0.0,
'y_pin': 0.0,
'r_km': 6371.2,
'scale_lat_1': float(ref.attrs['TRUELAT1']),
'scale_lat_2': float(ref.attrs['TRUELAT2']),
'lon_orient': float(ref.attrs['STAND_LON']),
'd_km': float(ref.attrs['DX']) / 1000.,
'nx': int(ref.attrs['WEST-EAST_GRID_DIMENSION']),
'ny': int(ref.attrs['SOUTH-NORTH_GRID_DIMENSION']),
}
attrs = {
'valid': valid.strftime("%Y%m%d_%H"),
'init': initialization.strftime("%Y%m%d_%H"),
'lead': str(forecast_hour),
'accum': '0',
'name': name,
'long_name': name,
'level': level,
'units': str(f[name].units),
'grid': grid_attrs,
}
| [
"numpy.flip",
"os.getenv",
"datetime.datetime.strptime",
"sys.exit",
"datetime.timedelta",
"xarray.open_dataset"
] | [((217, 241), 'os.getenv', 'os.getenv', (['"""PATH_WRFREF"""'], {}), "('PATH_WRFREF')\n", (226, 241), False, 'import os\n'), ((297, 353), 'datetime.datetime.strptime', 'datetime.strptime', (['f.initialization', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(f.initialization, '%Y-%m-%d %H:%M:%S')\n", (314, 353), False, 'from datetime import datetime, timedelta\n'), ((416, 446), 'datetime.timedelta', 'timedelta', ([], {'hours': 'forecast_hour'}), '(hours=forecast_hour)\n', (425, 446), False, 'from datetime import datetime, timedelta\n'), ((247, 268), 'xarray.open_dataset', 'xr.open_dataset', (['path'], {}), '(path)\n', (262, 268), True, 'import xarray as xr\n'), ((1045, 1056), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1053, 1056), False, 'import sys\n'), ((1295, 1306), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1303, 1306), False, 'import sys\n'), ((1442, 1453), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1450, 1453), False, 'import sys\n'), ((916, 943), 'numpy.flip', 'np.flip', (['fcst_field'], {'axis': '(0)'}), '(fcst_field, axis=0)\n', (923, 943), True, 'import numpy as np\n')] |
from keras import backend as K
from keras.models import load_model
from keras.preprocessing import image
from keras.optimizers import Adam
import cv2 as cv2
import numpy as np
from matplotlib import pyplot as plt
import argparse
from models.keras_ssd512 import ssd_512
import h5py
from keras_loss_function.keras_ssd_loss import SSDLoss
from keras_loss_function.keras_ssd_loss import SSDLoss
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
from keras_layers.keras_layer_L2Normalization import L2Normalization
from bounding_box_utils.bounding_box_utils import iou as iou
from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast
from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder
from data_generator.object_detection_2d_data_generator import DataGenerator
from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels
from data_generator.object_detection_2d_geometric_ops import Resize
from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights_path', type=str , default='', help='weights_path')
parser.add_argument('--im_path', default='', type=str, help='im_path batch size')
parser.add_argument('--test_label_path', default='', type=str, help='path to test labels csv')
parser.add_argument('--val_label_path', default='', type=str, help='path to val labels csv')
args = parser.parse_args()
args_dict = vars(args)
print('Model params are:')
for k, v in args_dict.items():
print(k + ' : ' + str(v))
###############################################################################
# 0: Pre-defined parameters
###############################################################################
# Data params
batch_size = 1
img_channels = 3
# Do not change this value if you're using any of the pre-trained weights.
mean_color = [123, 117, 104]
swap_channels = [0, 1, 2]
img_height = 512
img_width = 512
# Model params
# Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO
n_classes = 6
scales_pascal = [0.07, 0.15, 0.3, 0.45, 0.6, 0.75, 0.9, 1.05]
scales_coco = [0.04, 0.1, 0.26, 0.42, 0.58, 0.74, 0.9, 1.06]
# TODO: rethink about this param Roy
scales = scales_coco
aspect_ratios = [[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5, 3.0, 1.0 / 3.0],
[1.0, 2.0, 0.5, 3.0, 1.0 / 3.0],
[1.0, 2.0, 0.5, 3.0, 1.0 / 3.0],
[1.0, 2.0, 0.5, 3.0, 1.0 / 3.0],
[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5]]
two_boxes_for_ar1 = True
# The space between two adjacent anchor box center points for each predictor layer.
steps = [8, 16, 32, 64, 128, 256, 512]
# The offsets of the first anchor box center points from the top and left borders of the image
# as a fraction of the step size for each predictor layer.
offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
# Whether or not to clip the anchor boxes to lie entirely within the image boundaries
clip_boxes = False
# The variances by which the encoded target coordinates are divided as in the original implementation
variances = [0.1, 0.1, 0.2, 0.2]
normalize_coords = True
###############################################################################
# 1: Functions
###############################################################################
def plot_predictions(orig_image, y_pred_thresh, gt_box):
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
classes = ['background', 'green', 'orange', 'white', 'gray', 'blue', 'red']
plt.figure(figsize=(20, 12))
plt.imshow(orig_image)
current_axis = plt.gca()
for box in gt_box[0]:
xmin = box[1]
ymin = box[2]
xmax = box[3]
ymax = box[4]
color = colors[10]
label = 'GT'
current_axis.add_patch(
plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, color=color, fill=False, linewidth=2))
current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor': color, 'alpha': 1.0})
for box in y_pred_thresh:
# Transform the predicted bounding boxes for the 512x512 image to the original image dimensions.
xmin = box[-4] * orig_image.shape[1] / img_width
ymin = box[-3] * orig_image.shape[0] / img_height
xmax = box[-2] * orig_image.shape[1] / img_width
ymax = box[-1] * orig_image.shape[0] / img_height
color = colors[int(box[0])]
label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])
current_axis.add_patch(
plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, color=color, fill=False, linewidth=2))
current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor': color, 'alpha': 1.0})
# Reformat the bbox to the desired format and scale to original image size
# Corners is [class, xmin, ymin, xmax, ymax] or [class, confidence, xmin, ymin, xmax, ymax]
def reformat_box(corners, scale=True):
if len(corners) == 6:
cls = corners[0]
corners = corners[2:]
else:
cls = corners[0]
corners = corners[1:]
xmin = corners[0]
ymin = corners[1]
xmax = corners[2]
ymax = corners[3]
new_cord = [xmin, ymin, xmax - xmin, ymax - ymin]
if scale:
dim = 512
org_h = 2736
org_w = 3648
scale_h = dim / float(org_h)
scale_w = dim / float(org_w)
new_cord = [int(new_cord[0] / scale_w),
int(new_cord[1] / scale_h),
int(new_cord[2] / scale_w),
int(new_cord[3] / scale_h)]
new_cord.append(cls)
return new_cord
###############################################################################
# 2: Build the Keras model
###############################################################################
K.clear_session() # Clear previous models from memory.
print('Building the model')
model = ssd_512(image_size=(img_height, img_width, img_channels),
n_classes=n_classes,
mode='inference',
l2_regularization=0.0005,
scales=scales,
aspect_ratios_per_layer=aspect_ratios,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
normalize_coords=normalize_coords,
subtract_mean=mean_color,
swap_channels=swap_channels,
confidence_thresh=0.5,
iou_threshold=0.45,
top_k=200,
nms_max_output_size=400)
model.load_weights(args.weights_path, by_name=True)
# 3: Compile the model so that Keras won't complain the next time you load it.
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
model.compile(optimizer=adam, loss=ssd_loss.compute_loss)
###############################################################################
# 3: Build the DataGenerator
###############################################################################
test_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
test_dataset.parse_csv(images_dir=args.im_path,
labels_filename=args.test_label_path,
input_format=['image_name', 'xmin', 'ymin', 'xmax', 'ymax', 'class_id'],
include_classes='all',
random_sample=False,
ret=False,
verbose=True)
val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
val_dataset.parse_csv(images_dir=args.im_path,
labels_filename=args.val_label_path,
input_format=['image_name', 'xmin', 'ymin', 'xmax', 'ymax', 'class_id'],
include_classes='all',
random_sample=False,
ret=False,
verbose=True)
# For the validation generator:
convert_to_3_channels = ConvertTo3Channels()
# Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.
predictor_sizes = [model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],
model.get_layer('fc7_mbox_conf').output_shape[1:3],
model.get_layer('conv6_2_mbox_conf').output_shape[1:3],
model.get_layer('conv7_2_mbox_conf').output_shape[1:3],
model.get_layer('conv8_2_mbox_conf').output_shape[1:3],
model.get_layer('conv9_2_mbox_conf').output_shape[1:3],
model.get_layer('conv10_2_mbox_conf').output_shape[1:3]]
ssd_input_encoder = SSDInputEncoder(img_height=img_height,
img_width=img_width,
n_classes=n_classes,
predictor_sizes=predictor_sizes,
scales=scales,
aspect_ratios_per_layer=aspect_ratios,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
matching_type='multi',
pos_iou_threshold=0.5,
neg_iou_limit=0.5,
normalize_coords=normalize_coords)
# Create the generator handles that will be passed to Keras' `fit_generator()` function.
test_generator = test_dataset.generate(batch_size=batch_size,
shuffle=False,
transformations=[],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'processed_labels',
'filenames'},
keep_images_without_gt=False)
val_generator = val_dataset.generate(batch_size=batch_size,
shuffle=False,
transformations=[],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'processed_labels',
'filenames'},
keep_images_without_gt=False)
test_dataset_size = test_dataset.get_dataset_size()
val_dataset_size = val_dataset.get_dataset_size()
print("Number of images in the test dataset:\t{:>6}".format(test_dataset_size))
print("Number of images in the validation dataset:\t{:>6}".format(val_dataset_size))
###############################################################################
# 3: Predict !
###############################################################################
###############################################
# Choose the data set to predict:
gen = test_generator
dataset_size = test_dataset_size
###############################################
im_name_ls = []
gt_label_ls = []
input_images_tensor = np.zeros([val_dataset_size, img_height, img_width, img_channels])
for i in range(dataset_size):
img, gt_label, im_name = next(gen)
input_images_tensor[i, :, :, :] = img
im_name_ls.append(im_name)
gt_label_ls.append(gt_label)
y_pred = model.predict(input_images_tensor)
confidence_threshold = 0.5
y_pred_thresh = [y_pred[k][y_pred[k,:,1] > confidence_threshold] for k in range(y_pred.shape[0])]
# Plot predictions
# for i in range(dataset_size):
# plot_predictions(input_images_tensor[i, :, :, :].astype(np.uint8), y_pred_thresh[i], gt_label_ls[i])
# Print IOU scores:
for i in range(dataset_size):
print('Image name : {}'.format(im_name_ls[i][0]))
print('Num of gt buses : {}'.format(len(gt_label_ls[i][0])))
if len(gt_label_ls[i][0]) > 1:
a = gt_label_ls[i][0][:, 1:]
gt_cls = gt_label_ls[i][0][:, 0]
else:
a = gt_label_ls[i][0][0][1:]
gt_cls = gt_label_ls[i][0][:, 0]
if len(y_pred_thresh[i]) == 0:
print('No IOU')
continue
if len(y_pred_thresh[i]) > 1:
b = y_pred_thresh[i][:, 2:]
p_cls = y_pred_thresh[i][:, 0]
else:
b = y_pred_thresh[i][0][2:]
p_cls = y_pred_thresh[i][:, 0]
iou_score = iou(np.array(a), np.array(b), coords='corners', mode='outer_product', border_pixels='half')
if len(iou_score) == 0:
print('No IOU')
else:
for row in iou_score:
print('IOU is {} '.format(max(row)))
print('gt class is {} predicted class is {}'.format(gt_cls, p_cls)) | [
"keras.optimizers.Adam",
"matplotlib.pyplot.imshow",
"keras_loss_function.keras_ssd_loss.SSDLoss",
"argparse.ArgumentParser",
"matplotlib.pyplot.gca",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"data_generator.object_detection_2d_photometric_ops.ConvertTo3Channels",
"keras.backend.c... | [((1238, 1263), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1261, 1263), False, 'import argparse\n'), ((5876, 5893), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (5891, 5893), True, 'from keras import backend as K\n'), ((5970, 6453), 'models.keras_ssd512.ssd_512', 'ssd_512', ([], {'image_size': '(img_height, img_width, img_channels)', 'n_classes': 'n_classes', 'mode': '"""inference"""', 'l2_regularization': '(0.0005)', 'scales': 'scales', 'aspect_ratios_per_layer': 'aspect_ratios', 'two_boxes_for_ar1': 'two_boxes_for_ar1', 'steps': 'steps', 'offsets': 'offsets', 'clip_boxes': 'clip_boxes', 'variances': 'variances', 'normalize_coords': 'normalize_coords', 'subtract_mean': 'mean_color', 'swap_channels': 'swap_channels', 'confidence_thresh': '(0.5)', 'iou_threshold': '(0.45)', 'top_k': '(200)', 'nms_max_output_size': '(400)'}), "(image_size=(img_height, img_width, img_channels), n_classes=\n n_classes, mode='inference', l2_regularization=0.0005, scales=scales,\n aspect_ratios_per_layer=aspect_ratios, two_boxes_for_ar1=\n two_boxes_for_ar1, steps=steps, offsets=offsets, clip_boxes=clip_boxes,\n variances=variances, normalize_coords=normalize_coords, subtract_mean=\n mean_color, swap_channels=swap_channels, confidence_thresh=0.5,\n iou_threshold=0.45, top_k=200, nms_max_output_size=400)\n", (5977, 6453), False, 'from models.keras_ssd512 import ssd_512\n'), ((6860, 6926), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': '(1e-08)', 'decay': '(0.0)'}), '(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n', (6864, 6926), False, 'from keras.optimizers import Adam\n'), ((6939, 6974), 'keras_loss_function.keras_ssd_loss.SSDLoss', 'SSDLoss', ([], {'neg_pos_ratio': '(3)', 'alpha': '(1.0)'}), '(neg_pos_ratio=3, alpha=1.0)\n', (6946, 6974), False, 'from keras_loss_function.keras_ssd_loss import SSDLoss\n'), ((7243, 7311), 'data_generator.object_detection_2d_data_generator.DataGenerator', 'DataGenerator', ([], {'load_images_into_memory': '(False)', 'hdf5_dataset_path': 'None'}), '(load_images_into_memory=False, hdf5_dataset_path=None)\n', (7256, 7311), False, 'from data_generator.object_detection_2d_data_generator import DataGenerator\n'), ((7707, 7775), 'data_generator.object_detection_2d_data_generator.DataGenerator', 'DataGenerator', ([], {'load_images_into_memory': '(False)', 'hdf5_dataset_path': 'None'}), '(load_images_into_memory=False, hdf5_dataset_path=None)\n', (7720, 7775), False, 'from data_generator.object_detection_2d_data_generator import DataGenerator\n'), ((8200, 8220), 'data_generator.object_detection_2d_photometric_ops.ConvertTo3Channels', 'ConvertTo3Channels', ([], {}), '()\n', (8218, 8220), False, 'from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels\n'), ((8889, 9289), 'ssd_encoder_decoder.ssd_input_encoder.SSDInputEncoder', 'SSDInputEncoder', ([], {'img_height': 'img_height', 'img_width': 'img_width', 'n_classes': 'n_classes', 'predictor_sizes': 'predictor_sizes', 'scales': 'scales', 'aspect_ratios_per_layer': 'aspect_ratios', 'two_boxes_for_ar1': 'two_boxes_for_ar1', 'steps': 'steps', 'offsets': 'offsets', 'clip_boxes': 'clip_boxes', 'variances': 'variances', 'matching_type': '"""multi"""', 'pos_iou_threshold': '(0.5)', 'neg_iou_limit': '(0.5)', 'normalize_coords': 'normalize_coords'}), "(img_height=img_height, img_width=img_width, n_classes=\n n_classes, predictor_sizes=predictor_sizes, scales=scales,\n aspect_ratios_per_layer=aspect_ratios, two_boxes_for_ar1=\n two_boxes_for_ar1, steps=steps, offsets=offsets, clip_boxes=clip_boxes,\n variances=variances, matching_type='multi', pos_iou_threshold=0.5,\n neg_iou_limit=0.5, normalize_coords=normalize_coords)\n", (8904, 9289), False, 'from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\n'), ((11621, 11686), 'numpy.zeros', 'np.zeros', (['[val_dataset_size, img_height, img_width, img_channels]'], {}), '([val_dataset_size, img_height, img_width, img_channels])\n', (11629, 11686), True, 'import numpy as np\n'), ((3764, 3792), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 12)'}), '(figsize=(20, 12))\n', (3774, 3792), True, 'from matplotlib import pyplot as plt\n'), ((3795, 3817), 'matplotlib.pyplot.imshow', 'plt.imshow', (['orig_image'], {}), '(orig_image)\n', (3805, 3817), True, 'from matplotlib import pyplot as plt\n'), ((3836, 3845), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3843, 3845), True, 'from matplotlib import pyplot as plt\n'), ((12788, 12799), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (12796, 12799), True, 'import numpy as np\n'), ((12801, 12812), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (12809, 12812), True, 'import numpy as np\n'), ((4008, 4104), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(xmin, ymin)', '(xmax - xmin)', '(ymax - ymin)'], {'color': 'color', 'fill': '(False)', 'linewidth': '(2)'}), '((xmin, ymin), xmax - xmin, ymax - ymin, color=color, fill=\n False, linewidth=2)\n', (4021, 4104), True, 'from matplotlib import pyplot as plt\n'), ((4675, 4771), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(xmin, ymin)', '(xmax - xmin)', '(ymax - ymin)'], {'color': 'color', 'fill': '(False)', 'linewidth': '(2)'}), '((xmin, ymin), xmax - xmin, ymax - ymin, color=color, fill=\n False, linewidth=2)\n', (4688, 4771), True, 'from matplotlib import pyplot as plt\n'), ((3651, 3672), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(21)'], {}), '(0, 1, 21)\n', (3662, 3672), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
def plot_train_cnn():
file = pd.read_csv('results/vgg_cc_2.csv')
epochs = np.array(file['epoch'])
train_acc = np.array(file['train_acc'])
valid_acc = np.array(file['valid_acc'])
plt.title('Training process of CNN method')
plt.xlabel('epoch number')
plt.ylabel('accuracy')
plt.plot(epochs, train_acc, 'b')
plt.plot(epochs, valid_acc, 'r')
# plt.show()
plt.savefig('doc/cnn.png')
def plot_train_dnn():
file = pd.read_csv('results/dnn.csv')
epochs = np.array(file['epoch'])
train_acc = np.array(file['train_acc'])
valid_acc = np.array(file['valid_acc'])
plt.title('Training process of DNN method')
plt.xlabel('epoch number')
plt.ylabel('accuracy')
plt.plot(epochs, train_acc, 'b')
plt.plot(epochs, valid_acc, 'r')
# plt.show()
plt.savefig('doc/dnn.png')
def plot_cunfusion_matrix():
train_file = pd.read_csv('../../data_hw3/train.csv')
pred_file = pd.read_csv('predictions/pred_train.csv')
ground_truth = np.array(train_file['label'])
prediction = np.array(pred_file['label'])
num = len(prediction)
cut = int(0.8 * num)
ground_truth = ground_truth[cut:]
prediction = prediction[cut:]
confusion = np.zeros((7, 7), dtype= np.float)
for label in range(7):
pred = prediction[ground_truth == label]
count = len(pred)
for i in range(7):
confusion[label, i] = np.sum(pred == i)/count
fig, ax = plt.subplots()
ax.matshow(confusion)
for (i, j), value in np.ndenumerate(confusion):
ax.text(j, i, '{:0.2f}'.format(value), ha= 'center', va= 'center')
# plt.show()
plt.savefig('doc/confusion.png')
if __name__ == '__main__':
print('- Plot -')
plot_cunfusion_matrix() | [
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.ndenumerate",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots"
] | [((110, 145), 'pandas.read_csv', 'pd.read_csv', (['"""results/vgg_cc_2.csv"""'], {}), "('results/vgg_cc_2.csv')\n", (121, 145), True, 'import pandas as pd\n'), ((159, 182), 'numpy.array', 'np.array', (["file['epoch']"], {}), "(file['epoch'])\n", (167, 182), True, 'import numpy as np\n'), ((199, 226), 'numpy.array', 'np.array', (["file['train_acc']"], {}), "(file['train_acc'])\n", (207, 226), True, 'import numpy as np\n'), ((243, 270), 'numpy.array', 'np.array', (["file['valid_acc']"], {}), "(file['valid_acc'])\n", (251, 270), True, 'import numpy as np\n'), ((280, 323), 'matplotlib.pyplot.title', 'plt.title', (['"""Training process of CNN method"""'], {}), "('Training process of CNN method')\n", (289, 323), True, 'from matplotlib import pyplot as plt\n'), ((328, 354), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch number"""'], {}), "('epoch number')\n", (338, 354), True, 'from matplotlib import pyplot as plt\n'), ((359, 381), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (369, 381), True, 'from matplotlib import pyplot as plt\n'), ((386, 418), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'train_acc', '"""b"""'], {}), "(epochs, train_acc, 'b')\n", (394, 418), True, 'from matplotlib import pyplot as plt\n'), ((423, 455), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'valid_acc', '"""r"""'], {}), "(epochs, valid_acc, 'r')\n", (431, 455), True, 'from matplotlib import pyplot as plt\n'), ((477, 503), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""doc/cnn.png"""'], {}), "('doc/cnn.png')\n", (488, 503), True, 'from matplotlib import pyplot as plt\n'), ((538, 568), 'pandas.read_csv', 'pd.read_csv', (['"""results/dnn.csv"""'], {}), "('results/dnn.csv')\n", (549, 568), True, 'import pandas as pd\n'), ((582, 605), 'numpy.array', 'np.array', (["file['epoch']"], {}), "(file['epoch'])\n", (590, 605), True, 'import numpy as np\n'), ((622, 649), 'numpy.array', 'np.array', (["file['train_acc']"], {}), "(file['train_acc'])\n", (630, 649), True, 'import numpy as np\n'), ((666, 693), 'numpy.array', 'np.array', (["file['valid_acc']"], {}), "(file['valid_acc'])\n", (674, 693), True, 'import numpy as np\n'), ((703, 746), 'matplotlib.pyplot.title', 'plt.title', (['"""Training process of DNN method"""'], {}), "('Training process of DNN method')\n", (712, 746), True, 'from matplotlib import pyplot as plt\n'), ((751, 777), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch number"""'], {}), "('epoch number')\n", (761, 777), True, 'from matplotlib import pyplot as plt\n'), ((782, 804), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (792, 804), True, 'from matplotlib import pyplot as plt\n'), ((809, 841), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'train_acc', '"""b"""'], {}), "(epochs, train_acc, 'b')\n", (817, 841), True, 'from matplotlib import pyplot as plt\n'), ((846, 878), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'valid_acc', '"""r"""'], {}), "(epochs, valid_acc, 'r')\n", (854, 878), True, 'from matplotlib import pyplot as plt\n'), ((900, 926), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""doc/dnn.png"""'], {}), "('doc/dnn.png')\n", (911, 926), True, 'from matplotlib import pyplot as plt\n'), ((974, 1013), 'pandas.read_csv', 'pd.read_csv', (['"""../../data_hw3/train.csv"""'], {}), "('../../data_hw3/train.csv')\n", (985, 1013), True, 'import pandas as pd\n'), ((1030, 1071), 'pandas.read_csv', 'pd.read_csv', (['"""predictions/pred_train.csv"""'], {}), "('predictions/pred_train.csv')\n", (1041, 1071), True, 'import pandas as pd\n'), ((1091, 1120), 'numpy.array', 'np.array', (["train_file['label']"], {}), "(train_file['label'])\n", (1099, 1120), True, 'import numpy as np\n'), ((1138, 1166), 'numpy.array', 'np.array', (["pred_file['label']"], {}), "(pred_file['label'])\n", (1146, 1166), True, 'import numpy as np\n'), ((1307, 1339), 'numpy.zeros', 'np.zeros', (['(7, 7)'], {'dtype': 'np.float'}), '((7, 7), dtype=np.float)\n', (1315, 1339), True, 'import numpy as np\n'), ((1547, 1561), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1559, 1561), True, 'from matplotlib import pyplot as plt\n'), ((1613, 1638), 'numpy.ndenumerate', 'np.ndenumerate', (['confusion'], {}), '(confusion)\n', (1627, 1638), True, 'import numpy as np\n'), ((1736, 1768), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""doc/confusion.png"""'], {}), "('doc/confusion.png')\n", (1747, 1768), True, 'from matplotlib import pyplot as plt\n'), ((1504, 1521), 'numpy.sum', 'np.sum', (['(pred == i)'], {}), '(pred == i)\n', (1510, 1521), True, 'import numpy as np\n')] |
import os
from numpy import pi, seterr, linspace
from skyfield.api import load
from skyfield.constants import GM_SUN_Pitjeva_2005_km3_s2 as GM_SUN
from skyfield.data import mpc
from skyfield.elementslib import OsculatingElements
from skyfield.keplerlib import _KeplerOrbit as KeplerOrbit, propagate
from skyfield.tests.test_elementslib import compare, ele_to_vec
from skyfield.units import Angle, Distance, Velocity
try:
from io import BytesIO
except:
from StringIO import StringIO as BytesIO
seterr(all='raise')
# Test against HORIZONS.
def test_against_horizons():
# See the following files in the Skyfield repository:
#
# horizons/ceres-orbital-elements
# horizons/ceres-position
ts = load.timescale(builtin=True)
t = ts.tdb_jd(2458886.500000000)
a = 2.768873850275102E+00 # A
e = 7.705857791518426E-02 # EC
p_au = a * (1 - e*e) # Wikipedia
k = KeplerOrbit._from_mean_anomaly(
semilatus_rectum_au=p_au,
eccentricity=e,
inclination_degrees=2.718528770987308E+01,
longitude_of_ascending_node_degrees=2.336112629072238E+01,
argument_of_perihelion_degrees=1.328964361683606E+02,
mean_anomaly_degrees=1.382501360489816E+02,
epoch=t,
gm_km3_s2=GM_SUN,
center=None,
target=None,
)
r, v = k._at(t)[:2]
sun_au = [
-0.004105894975783999, 0.006739680703224941, 0.002956344702049446,
]
horizons_au = [
1.334875927366032E+00, -2.239607658161781E+00, -1.328895183461897E+00,
]
epsilon = Distance(m=0.001).au
assert abs(r + sun_au - horizons_au).max() < epsilon
def test_minor_planet():
text = (b'00001 3.4 0.15 K205V 162.68631 73.73161 80.28698'
b' 10.58862 0.0775571 0.21406009 2.7676569 0 MPO492748'
b' 6751 115 1801-2019 0.60 M-v 30h Williams 0000 '
b'(1) Ceres 20190915\n')
ts = load.timescale(builtin=True)
t = ts.utc(2020, 6, 17)
eph = load('de421.bsp')
df = mpc.load_mpcorb_dataframe(BytesIO(text))
row = df.iloc[0]
assert row.designation_packed == '00001'
assert row.designation == '(1) Ceres'
ceres = mpc.mpcorb_orbit(row, ts, GM_SUN)
ra, dec, distance = eph['earth'].at(t).observe(eph['sun'] + ceres).radec()
assert ceres.target == '(1) Ceres'
assert abs(ra.hours - 23.1437) < 0.00005
assert abs(dec.degrees - -17.323) < 0.0005
def test_comet():
text = (b' CJ95O010 1997 03 29.6333 0.916241 0.994928 130.6448'
b' 283.3593 88.9908 20200224 -2.0 4.0 C/1995 O1 (Hale-Bopp)'
b' MPC106342\n')
ts = load.timescale(builtin=True)
t = ts.utc(2020, 5, 31)
eph = load('de421.bsp')
e = eph['earth'].at(t)
for loader in mpc.load_comets_dataframe, mpc.load_comets_dataframe_slow:
df = loader(BytesIO(text))
row = df.iloc[0]
k = mpc.comet_orbit(row, ts, GM_SUN)
p = e.observe(eph['sun'] + k)
ra, dec, distance = p.radec()
# The file authorities/mpc-hale-bopp in the repository is the
# source of these angles. TODO: can we tighten this bound and
# drive it to fractions of an arcsecond?
ra_want = Angle(hours=(23, 59, 16.6))
dec_want = Angle(degrees=(-84, 46, 58))
assert abs(ra_want.arcseconds() - ra.arcseconds()) < 2.0
assert abs(dec_want.arcseconds() - dec.arcseconds()) < 0.2
assert abs(distance.au - 43.266) < 0.0005
assert k.target == 'C/1995 O1 (Hale-Bopp)'
# Test various round-trips through the kepler orbit object.
def _data_path(filename):
return os.path.join(os.path.dirname(__file__), 'data', filename)
def check_orbit(p, e, i, Om, w, v,
p_eps=None, e_eps=None, i_eps=None, Om_eps=None, w_eps=None, v_eps=None):
pos0, vel0 = ele_to_vec(p, e, i, Om, w, v, mu)
pos1, vel1 = propagate(pos0, vel0, 0, times, mu)
ele = OsculatingElements(Distance(km=pos1), Velocity(km_per_s=vel1), dummy_time, mu)
if p_eps: compare(p, ele.semi_latus_rectum.km, p_eps)
if e_eps: compare(e, ele.eccentricity, e_eps)
if i_eps: compare(i, ele.inclination.radians, i_eps, mod=True)
if Om_eps: compare(Om, ele.longitude_of_ascending_node.radians, Om_eps, mod=True)
if w_eps: compare(w, ele.argument_of_periapsis.radians, w_eps, mod=True)
if v_eps: compare(v, ele.true_anomaly.radians, v_eps, mod=True)
times = linspace(-1e11, 1e11, 1001) # -3170 years to +3170 years, including 0
mu = 403503.2355022598
dummy_time = load.timescale().utc(2018)
def test_circular():
check_orbit(p=300000, e=0, i=.5, Om=1, w=0, v=1,
p_eps=1e-2, e_eps=1e-8, i_eps=1e-15, Om_eps=1e-15)
def test_circular_equatorial():
check_orbit(p=300000, e=0, i=0, Om=0, w=0, v=1,
p_eps=1e-2, e_eps=1e-8, i_eps=1e-15)
def test_circular_polar():
check_orbit(p=300000, e=0, i=pi/2, Om=1, w=0, v=1,
p_eps=1e-2, e_eps=1e-8, i_eps=1e-15, Om_eps=1e-15)
def test_elliptical():
check_orbit(p=300000, e=.3, i=1, Om=0, w=4, v=5,
p_eps=1e-2, e_eps=1e-8, i_eps=1e-15, Om_eps=1e-15, w_eps=1e-7)
def test_elliptical_equatorial():
check_orbit(p=300000, e=.3, i=0, Om=0, w=1, v=5,
p_eps=1e-2, e_eps=1e-8, i_eps=1e-15, Om_eps=1e-15, w_eps=1e-7)
def test_elliptical_polar():
check_orbit(p=300000, e=.2, i=pi/2, Om=1, w=2, v=3,
p_eps=1e-2, e_eps=1e-8, i_eps=1e-15, Om_eps=1e-15, w_eps=1e-8)
def test_parabolic():
check_orbit(p=300000, e=1, i=1, Om=0, w=4, v=3,
p_eps=1e-5, e_eps=1e-14, i_eps=1e-13, Om_eps=1e-13, w_eps=1e-13)
def test_parabolic_equatorial():
check_orbit(p=300000, e=1, i=0, Om=0, w=1, v=2,
p_eps=1e-5, e_eps=1e-14, i_eps=1e-15, Om_eps=1e-15, w_eps=1e-13)
def test_parabolic_polar():
check_orbit(p=300000, e=1, i=pi/2, Om=1, w=2, v=3,
p_eps=1e-5, e_eps=1e-14, i_eps=1e-14, Om_eps=1e-13, w_eps=1e-13)
def test_hyperbolic():
check_orbit(p=300000, e=1.3, i=1, Om=0, w=4, v=.5,
p_eps=1e0, e_eps=1e-6, i_eps=1e-10, Om_eps=1e-10, w_eps=1e-6)
def test_hyperbolic_equatorial():
check_orbit(p=300000, e=1.3, i=0, Om=0, w=1, v=.5,
p_eps=1e0, e_eps=1e-6, i_eps=1e-15, Om_eps=1e-15, w_eps=1e-6)
def test_hyperbolic_polar():
check_orbit(p=300000, e=1.3, i=pi/2, Om=1, w=2, v=.5,
p_eps=1e0, e_eps=1e-6, i_eps=1e-10, Om_eps=1e-10, w_eps=1e-6)
| [
"StringIO.StringIO",
"skyfield.data.mpc.mpcorb_orbit",
"skyfield.tests.test_elementslib.ele_to_vec",
"skyfield.api.load.timescale",
"numpy.linspace",
"skyfield.keplerlib.propagate",
"skyfield.units.Angle",
"os.path.dirname",
"skyfield.tests.test_elementslib.compare",
"skyfield.data.mpc.comet_orbit... | [((504, 523), 'numpy.seterr', 'seterr', ([], {'all': '"""raise"""'}), "(all='raise')\n", (510, 523), False, 'from numpy import pi, seterr, linspace\n'), ((4476, 4523), 'numpy.linspace', 'linspace', (['(-100000000000.0)', '(100000000000.0)', '(1001)'], {}), '(-100000000000.0, 100000000000.0, 1001)\n', (4484, 4523), False, 'from numpy import pi, seterr, linspace\n'), ((722, 750), 'skyfield.api.load.timescale', 'load.timescale', ([], {'builtin': '(True)'}), '(builtin=True)\n', (736, 750), False, 'from skyfield.api import load\n'), ((906, 1232), 'skyfield.keplerlib._KeplerOrbit._from_mean_anomaly', 'KeplerOrbit._from_mean_anomaly', ([], {'semilatus_rectum_au': 'p_au', 'eccentricity': 'e', 'inclination_degrees': '(27.18528770987308)', 'longitude_of_ascending_node_degrees': '(23.36112629072238)', 'argument_of_perihelion_degrees': '(132.8964361683606)', 'mean_anomaly_degrees': '(138.2501360489816)', 'epoch': 't', 'gm_km3_s2': 'GM_SUN', 'center': 'None', 'target': 'None'}), '(semilatus_rectum_au=p_au, eccentricity=e,\n inclination_degrees=27.18528770987308,\n longitude_of_ascending_node_degrees=23.36112629072238,\n argument_of_perihelion_degrees=132.8964361683606, mean_anomaly_degrees=\n 138.2501360489816, epoch=t, gm_km3_s2=GM_SUN, center=None, target=None)\n', (936, 1232), True, 'from skyfield.keplerlib import _KeplerOrbit as KeplerOrbit, propagate\n'), ((1941, 1969), 'skyfield.api.load.timescale', 'load.timescale', ([], {'builtin': '(True)'}), '(builtin=True)\n', (1955, 1969), False, 'from skyfield.api import load\n'), ((2008, 2025), 'skyfield.api.load', 'load', (['"""de421.bsp"""'], {}), "('de421.bsp')\n", (2012, 2025), False, 'from skyfield.api import load\n'), ((2198, 2231), 'skyfield.data.mpc.mpcorb_orbit', 'mpc.mpcorb_orbit', (['row', 'ts', 'GM_SUN'], {}), '(row, ts, GM_SUN)\n', (2214, 2231), False, 'from skyfield.data import mpc\n'), ((2691, 2719), 'skyfield.api.load.timescale', 'load.timescale', ([], {'builtin': '(True)'}), '(builtin=True)\n', (2705, 2719), False, 'from skyfield.api import load\n'), ((2758, 2775), 'skyfield.api.load', 'load', (['"""de421.bsp"""'], {}), "('de421.bsp')\n", (2762, 2775), False, 'from skyfield.api import load\n'), ((3882, 3915), 'skyfield.tests.test_elementslib.ele_to_vec', 'ele_to_vec', (['p', 'e', 'i', 'Om', 'w', 'v', 'mu'], {}), '(p, e, i, Om, w, v, mu)\n', (3892, 3915), False, 'from skyfield.tests.test_elementslib import compare, ele_to_vec\n'), ((3934, 3969), 'skyfield.keplerlib.propagate', 'propagate', (['pos0', 'vel0', '(0)', 'times', 'mu'], {}), '(pos0, vel0, 0, times, mu)\n', (3943, 3969), False, 'from skyfield.keplerlib import _KeplerOrbit as KeplerOrbit, propagate\n'), ((1558, 1575), 'skyfield.units.Distance', 'Distance', ([], {'m': '(0.001)'}), '(m=0.001)\n', (1566, 1575), False, 'from skyfield.units import Angle, Distance, Velocity\n'), ((2061, 2074), 'StringIO.StringIO', 'BytesIO', (['text'], {}), '(text)\n', (2068, 2074), True, 'from StringIO import StringIO as BytesIO\n'), ((2953, 2985), 'skyfield.data.mpc.comet_orbit', 'mpc.comet_orbit', (['row', 'ts', 'GM_SUN'], {}), '(row, ts, GM_SUN)\n', (2968, 2985), False, 'from skyfield.data import mpc\n'), ((3272, 3299), 'skyfield.units.Angle', 'Angle', ([], {'hours': '(23, 59, 16.6)'}), '(hours=(23, 59, 16.6))\n', (3277, 3299), False, 'from skyfield.units import Angle, Distance, Velocity\n'), ((3319, 3347), 'skyfield.units.Angle', 'Angle', ([], {'degrees': '(-84, 46, 58)'}), '(degrees=(-84, 46, 58))\n', (3324, 3347), False, 'from skyfield.units import Angle, Distance, Velocity\n'), ((3694, 3719), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3709, 3719), False, 'import os\n'), ((3999, 4016), 'skyfield.units.Distance', 'Distance', ([], {'km': 'pos1'}), '(km=pos1)\n', (4007, 4016), False, 'from skyfield.units import Angle, Distance, Velocity\n'), ((4018, 4041), 'skyfield.units.Velocity', 'Velocity', ([], {'km_per_s': 'vel1'}), '(km_per_s=vel1)\n', (4026, 4041), False, 'from skyfield.units import Angle, Distance, Velocity\n'), ((4074, 4117), 'skyfield.tests.test_elementslib.compare', 'compare', (['p', 'ele.semi_latus_rectum.km', 'p_eps'], {}), '(p, ele.semi_latus_rectum.km, p_eps)\n', (4081, 4117), False, 'from skyfield.tests.test_elementslib import compare, ele_to_vec\n'), ((4132, 4167), 'skyfield.tests.test_elementslib.compare', 'compare', (['e', 'ele.eccentricity', 'e_eps'], {}), '(e, ele.eccentricity, e_eps)\n', (4139, 4167), False, 'from skyfield.tests.test_elementslib import compare, ele_to_vec\n'), ((4182, 4234), 'skyfield.tests.test_elementslib.compare', 'compare', (['i', 'ele.inclination.radians', 'i_eps'], {'mod': '(True)'}), '(i, ele.inclination.radians, i_eps, mod=True)\n', (4189, 4234), False, 'from skyfield.tests.test_elementslib import compare, ele_to_vec\n'), ((4250, 4320), 'skyfield.tests.test_elementslib.compare', 'compare', (['Om', 'ele.longitude_of_ascending_node.radians', 'Om_eps'], {'mod': '(True)'}), '(Om, ele.longitude_of_ascending_node.radians, Om_eps, mod=True)\n', (4257, 4320), False, 'from skyfield.tests.test_elementslib import compare, ele_to_vec\n'), ((4335, 4397), 'skyfield.tests.test_elementslib.compare', 'compare', (['w', 'ele.argument_of_periapsis.radians', 'w_eps'], {'mod': '(True)'}), '(w, ele.argument_of_periapsis.radians, w_eps, mod=True)\n', (4342, 4397), False, 'from skyfield.tests.test_elementslib import compare, ele_to_vec\n'), ((4412, 4465), 'skyfield.tests.test_elementslib.compare', 'compare', (['v', 'ele.true_anomaly.radians', 'v_eps'], {'mod': '(True)'}), '(v, ele.true_anomaly.radians, v_eps, mod=True)\n', (4419, 4465), False, 'from skyfield.tests.test_elementslib import compare, ele_to_vec\n'), ((4582, 4598), 'skyfield.api.load.timescale', 'load.timescale', ([], {}), '()\n', (4596, 4598), False, 'from skyfield.api import load\n'), ((2901, 2914), 'StringIO.StringIO', 'BytesIO', (['text'], {}), '(text)\n', (2908, 2914), True, 'from StringIO import StringIO as BytesIO\n')] |
import numpy as np
from scipy.linalg import eigh
from scipy.special import binom
from scipy.integrate import quad
import matplotlib.pyplot as plt
import seaborn as sns
import sciunit
from networkunit.scores import to_precision
import matplotlib.mlab as mlab
from scipy.integrate import quad
import scipy.interpolate as interpolate
class eigenangle(sciunit.Score):
"""
The eigenangle score evaluates whether two correlation matrices have
similar non-random elements by calculating the significance of the angles
between the corresponding eigenvectors.
Either the binsize or the number of bins must be provides to perform the
signficnace test.
"""
score = np.nan
@classmethod
def compute(self, matrix_1, matrix_2, bin_num=None,
binsize=None, t_start=None, t_stop=None, **kwargs):
if bin_num is None:
if binsize is not None \
and (t_start is not None and t_stop is not None):
bin_num = float((t_stop - t_start) / binsize)
else:
raise ValueError('To few parameters to compute bin_num!')
N = len(matrix_1)
EWs1, EVs1 = eigh(matrix_1) # returns EWs in ascending order
EWs2, EVs2 = eigh(matrix_2)
EWs1 = EWs1[::-1]
EWs2 = EWs2[::-1]
EVs1 = EVs1.T[::-1]
EVs2 = EVs2.T[::-1]
for count, (ev1, ev2) in enumerate(zip(EVs1, EVs2)):
EVs1[count] = ev1 * np.sign(ev1[np.argmax(np.absolute(ev1))])
EVs2[count] = ev2 * np.sign(ev2[np.argmax(np.absolute(ev2))])
EVs1[count] /= np.linalg.norm(ev1)
EVs2[count] /= np.linalg.norm(ev2)
M = np.dot(EVs1, EVs2.T)
M[np.argwhere(M > 1)] = 1.
if len(M) == 1:
angles = np.arccos(M[0])
else:
angles = np.arccos(np.diag(M))
weights = np.sqrt((EWs1 ** 2 + EWs2 ** 2) / 2.)
smallness = 1 - angles / (np.pi/2.)
weights = weights / sum(weights) * N
weighted_smallness = smallness * weights
similarity_score = np.mean(weighted_smallness)
pvalue = quad(self.null_distribution,
similarity_score, np.inf,
args=(N, bin_num))[0]
self.score = eigenangle(similarity_score)
self.score.data_size = (N, N)
self.score.pvalue = pvalue
return self.score
@classmethod
def null_distribution(self, eta, N, B, return_plotting_dist=False):
# for weights ~ EW
q = B / float(N)
assert q >= 1
def marchenko_pastur(x, alpha):
assert alpha >= 1
x_min = (1 - np.sqrt(1. / alpha)) ** 2
x_max = (1 + np.sqrt(1. / alpha)) ** 2
y = alpha / (2 * np.pi * x) * np.sqrt((x_max - x) * (x - x_min))
if np.isnan(y):
return 0
else:
return y
def weight_dist(x):
# ToDo: add alternative distributions for e.g. asymmetric matrices
return merchenko_pastur(x)
def angle_smallness_dist(D, N):
if D >= -1 and D <= 1:
return math.gamma(N/2.) / (np.sqrt(np.pi) \
* math.gamma((N-1)/2)) \
* np.pi/2 * np.cos(D*np.pi/2)**(N-2)
else:
return 0
def weighted_smallness_dist(D, N, alpha):
x_min = (1 - np.sqrt(1. / alpha)) ** 2
x_max = (1 + np.sqrt(1. / alpha)) ** 2
integrand = lambda x, _D, _N, _alpha: \
angle_smallness_dist(_D / float(x), _N) \
* weight_dist(x, _alpha) * 1. / x
return sc.integrate.quad(integrand, x_min, x_max,
args=(D,N,alpha,))[0]
def similarity_score_distribution(eta, N, alpha):
integrand = lambda x, N_, alpha_: \
x**2 * weighted_smallness_dist(x, N_, alpha_)
var = sc.integrate.quad(integrand,
-np.infty, np.infty,
args=(N,alpha,))[0]
sigma = np.sqrt(var/N)
return sc.stats.norm.pdf(eta, 0, sigma)
if return_plotting_dist:
return weighted_smallness_dist
else:
return similarity_score_distribution(eta, N, q)
@classmethod
def plot(self, matrix_1, matrix_2, ax=None, bin_num=None, palette=None,
binsize=None, t_start=None, t_stop=None, log=False, **kwargs):
if bin_num is None:
if binsize is not None \
and (t_start is not None and t_stop is not None):
bin_num = float((t_stop - t_start) / binsize)
else:
raise ValueError('To few parameters to compute bin_num!')
N = len(matrix_1)
EWs1, EVs1 = eigh(matrix_1) # returns EWs in ascending order
EWs2, EVs2 = eigh(matrix_2)
EWs1 = EWs1[::-1]
EWs2 = EWs2[::-1]
EVs1 = EVs1.T[::-1]
EVs2 = EVs2.T[::-1]
for count, (ev1, ev2) in enumerate(zip(EVs1, EVs2)):
EVs1[count] = ev1 * np.sign(ev1[np.argmax(np.absolute(ev1))])
EVs2[count] = ev2 * np.sign(ev2[np.argmax(np.absolute(ev2))])
EVs1[count] /= np.linalg.norm(ev1)
EVs2[count] /= np.linalg.norm(ev2)
M = np.dot(EVs1, EVs2.T)
M[np.argwhere(M > 1)] = 1.
if len(M) == 1:
angles = np.arccos(M[0])
else:
angles = np.arccos(np.diag(M))
weights = np.sqrt((EWs1 ** 2 + EWs2 ** 2) / 2.)
smallness = 1 - angles / (np.pi/2.)
weights = weights / sum(weights) * N
weighted_smallness = smallness * weights
similarity_score = np.mean(weighted_smallness)
if ax is None:
fig, ax = plt.subplots()
ax.set_xlabel(r'Weighted Angle-Smallness$')
edges = np.linspace(0, 1, 120)
hist, _ = np.histogram(weighted_smallness, bins=edges, density=True)
ax.bar(edges[:-1], hist, np.diff(edges)[0] * .99,
color=palette[1], edgecolor='w')
weighted_smallness_dist = self.null_distribution(eta=0, N=N, B=bin_num,
return_plotting_dist=True)
y = [weighted_smallness_dist(x, N=N, alpha=bin_num/N) for x in edges]
norm = np.sum(y) * (edges[1] - edges[0])
ax.plot(x, np.array(y) / norm, color=palette[0],
label='Prediction')
ax.axvline(np.mean(weighted_smallness), color='k', ls='--',
label='Samples')
ax.set_yticks([])
plt.legend()
sns.despine(left=True)
if log:
ax.set_yscale('log')
return ax
@property
def sort_key(self):
return self.score
def __str__(self):
return "\n\n\033[4mEigenangle Score\033[0m" \
+ "\n\tdatasize: {} x {}" \
.format(self.data_size[0], self.data_size[1]) \
+ "\n\tscore = {:.3f} \t pvalue = {}\n\n" \
.format(self.score, to_precision(self.pvalue,3))
| [
"numpy.sqrt",
"numpy.arccos",
"numpy.array",
"numpy.linalg.norm",
"numpy.mean",
"numpy.histogram",
"seaborn.despine",
"numpy.diff",
"numpy.dot",
"numpy.linspace",
"networkunit.scores.to_precision",
"scipy.linalg.eigh",
"scipy.integrate.quad",
"numpy.isnan",
"numpy.cos",
"matplotlib.pyp... | [((1169, 1183), 'scipy.linalg.eigh', 'eigh', (['matrix_1'], {}), '(matrix_1)\n', (1173, 1183), False, 'from scipy.linalg import eigh\n'), ((1238, 1252), 'scipy.linalg.eigh', 'eigh', (['matrix_2'], {}), '(matrix_2)\n', (1242, 1252), False, 'from scipy.linalg import eigh\n'), ((1677, 1697), 'numpy.dot', 'np.dot', (['EVs1', 'EVs2.T'], {}), '(EVs1, EVs2.T)\n', (1683, 1697), True, 'import numpy as np\n'), ((1871, 1909), 'numpy.sqrt', 'np.sqrt', (['((EWs1 ** 2 + EWs2 ** 2) / 2.0)'], {}), '((EWs1 ** 2 + EWs2 ** 2) / 2.0)\n', (1878, 1909), True, 'import numpy as np\n'), ((2074, 2101), 'numpy.mean', 'np.mean', (['weighted_smallness'], {}), '(weighted_smallness)\n', (2081, 2101), True, 'import numpy as np\n'), ((4883, 4897), 'scipy.linalg.eigh', 'eigh', (['matrix_1'], {}), '(matrix_1)\n', (4887, 4897), False, 'from scipy.linalg import eigh\n'), ((4952, 4966), 'scipy.linalg.eigh', 'eigh', (['matrix_2'], {}), '(matrix_2)\n', (4956, 4966), False, 'from scipy.linalg import eigh\n'), ((5391, 5411), 'numpy.dot', 'np.dot', (['EVs1', 'EVs2.T'], {}), '(EVs1, EVs2.T)\n', (5397, 5411), True, 'import numpy as np\n'), ((5585, 5623), 'numpy.sqrt', 'np.sqrt', (['((EWs1 ** 2 + EWs2 ** 2) / 2.0)'], {}), '((EWs1 ** 2 + EWs2 ** 2) / 2.0)\n', (5592, 5623), True, 'import numpy as np\n'), ((5788, 5815), 'numpy.mean', 'np.mean', (['weighted_smallness'], {}), '(weighted_smallness)\n', (5795, 5815), True, 'import numpy as np\n'), ((5954, 5976), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(120)'], {}), '(0, 1, 120)\n', (5965, 5976), True, 'import numpy as np\n'), ((5995, 6053), 'numpy.histogram', 'np.histogram', (['weighted_smallness'], {'bins': 'edges', 'density': '(True)'}), '(weighted_smallness, bins=edges, density=True)\n', (6007, 6053), True, 'import numpy as np\n'), ((6681, 6693), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6691, 6693), True, 'import matplotlib.pyplot as plt\n'), ((6702, 6724), 'seaborn.despine', 'sns.despine', ([], {'left': '(True)'}), '(left=True)\n', (6713, 6724), True, 'import seaborn as sns\n'), ((1597, 1616), 'numpy.linalg.norm', 'np.linalg.norm', (['ev1'], {}), '(ev1)\n', (1611, 1616), True, 'import numpy as np\n'), ((1644, 1663), 'numpy.linalg.norm', 'np.linalg.norm', (['ev2'], {}), '(ev2)\n', (1658, 1663), True, 'import numpy as np\n'), ((1708, 1726), 'numpy.argwhere', 'np.argwhere', (['(M > 1)'], {}), '(M > 1)\n', (1719, 1726), True, 'import numpy as np\n'), ((1779, 1794), 'numpy.arccos', 'np.arccos', (['M[0]'], {}), '(M[0])\n', (1788, 1794), True, 'import numpy as np\n'), ((2120, 2193), 'scipy.integrate.quad', 'quad', (['self.null_distribution', 'similarity_score', 'np.inf'], {'args': '(N, bin_num)'}), '(self.null_distribution, similarity_score, np.inf, args=(N, bin_num))\n', (2124, 2193), False, 'from scipy.integrate import quad\n'), ((2821, 2832), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (2829, 2832), True, 'import numpy as np\n'), ((4161, 4177), 'numpy.sqrt', 'np.sqrt', (['(var / N)'], {}), '(var / N)\n', (4168, 4177), True, 'import numpy as np\n'), ((5311, 5330), 'numpy.linalg.norm', 'np.linalg.norm', (['ev1'], {}), '(ev1)\n', (5325, 5330), True, 'import numpy as np\n'), ((5358, 5377), 'numpy.linalg.norm', 'np.linalg.norm', (['ev2'], {}), '(ev2)\n', (5372, 5377), True, 'import numpy as np\n'), ((5422, 5440), 'numpy.argwhere', 'np.argwhere', (['(M > 1)'], {}), '(M > 1)\n', (5433, 5440), True, 'import numpy as np\n'), ((5493, 5508), 'numpy.arccos', 'np.arccos', (['M[0]'], {}), '(M[0])\n', (5502, 5508), True, 'import numpy as np\n'), ((5870, 5884), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5882, 5884), True, 'import matplotlib.pyplot as plt\n'), ((6415, 6424), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (6421, 6424), True, 'import numpy as np\n'), ((6561, 6588), 'numpy.mean', 'np.mean', (['weighted_smallness'], {}), '(weighted_smallness)\n', (6568, 6588), True, 'import numpy as np\n'), ((1840, 1850), 'numpy.diag', 'np.diag', (['M'], {}), '(M)\n', (1847, 1850), True, 'import numpy as np\n'), ((2771, 2805), 'numpy.sqrt', 'np.sqrt', (['((x_max - x) * (x - x_min))'], {}), '((x_max - x) * (x - x_min))\n', (2778, 2805), True, 'import numpy as np\n'), ((5554, 5564), 'numpy.diag', 'np.diag', (['M'], {}), '(M)\n', (5561, 5564), True, 'import numpy as np\n'), ((6468, 6479), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (6476, 6479), True, 'import numpy as np\n'), ((7131, 7159), 'networkunit.scores.to_precision', 'to_precision', (['self.pvalue', '(3)'], {}), '(self.pvalue, 3)\n', (7143, 7159), False, 'from networkunit.scores import to_precision\n'), ((2652, 2672), 'numpy.sqrt', 'np.sqrt', (['(1.0 / alpha)'], {}), '(1.0 / alpha)\n', (2659, 2672), True, 'import numpy as np\n'), ((2703, 2723), 'numpy.sqrt', 'np.sqrt', (['(1.0 / alpha)'], {}), '(1.0 / alpha)\n', (2710, 2723), True, 'import numpy as np\n'), ((3408, 3428), 'numpy.sqrt', 'np.sqrt', (['(1.0 / alpha)'], {}), '(1.0 / alpha)\n', (3415, 3428), True, 'import numpy as np\n'), ((3459, 3479), 'numpy.sqrt', 'np.sqrt', (['(1.0 / alpha)'], {}), '(1.0 / alpha)\n', (3466, 3479), True, 'import numpy as np\n'), ((6088, 6102), 'numpy.diff', 'np.diff', (['edges'], {}), '(edges)\n', (6095, 6102), True, 'import numpy as np\n'), ((3264, 3285), 'numpy.cos', 'np.cos', (['(D * np.pi / 2)'], {}), '(D * np.pi / 2)\n', (3270, 3285), True, 'import numpy as np\n'), ((1476, 1492), 'numpy.absolute', 'np.absolute', (['ev1'], {}), '(ev1)\n', (1487, 1492), True, 'import numpy as np\n'), ((1550, 1566), 'numpy.absolute', 'np.absolute', (['ev2'], {}), '(ev2)\n', (1561, 1566), True, 'import numpy as np\n'), ((5190, 5206), 'numpy.absolute', 'np.absolute', (['ev1'], {}), '(ev1)\n', (5201, 5206), True, 'import numpy as np\n'), ((5264, 5280), 'numpy.absolute', 'np.absolute', (['ev2'], {}), '(ev2)\n', (5275, 5280), True, 'import numpy as np\n'), ((3168, 3182), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (3175, 3182), True, 'import numpy as np\n')] |
import itertools
import dask.dataframe as dd
import dask.dataframe.groupby as ddgb
import numpy as np
import pandas
import toolz
from pandas import isnull
import ibis
import ibis.expr.operations as ops
from ibis.backends.pandas.core import integer_types, scalar_types
from ibis.backends.pandas.execution.strings import (
execute_series_join_scalar_sep,
execute_series_regex_extract,
execute_series_regex_replace,
execute_series_regex_search,
execute_series_right,
execute_series_translate_scalar_scalar,
execute_series_translate_scalar_series,
execute_series_translate_series_scalar,
execute_series_translate_series_series,
execute_string_capitalize,
execute_string_contains,
execute_string_length_series,
execute_string_like_series_string,
execute_string_lower,
execute_string_lpad,
execute_string_lstrip,
execute_string_repeat,
execute_string_reverse,
execute_string_rpad,
execute_string_rstrip,
execute_string_strip,
execute_string_upper,
execute_substring_int_int,
haystack_to_series_of_lists,
)
from ..dispatch import execute_node
from .util import (
TypeRegistrationDict,
make_selected_obj,
register_types_to_dispatcher,
)
DASK_DISPATCH_TYPES: TypeRegistrationDict = {
ops.StringLength: [((dd.Series,), execute_string_length_series)],
ops.Substring: [
(
(
dd.Series,
integer_types,
integer_types,
),
execute_substring_int_int,
),
],
ops.Strip: [((dd.Series,), execute_string_strip)],
ops.LStrip: [((dd.Series,), execute_string_lstrip)],
ops.RStrip: [((dd.Series,), execute_string_rstrip)],
ops.LPad: [
(
(
dd.Series,
(dd.Series,) + integer_types,
(dd.Series, str),
),
execute_string_lpad,
),
],
ops.RPad: [
(
(
dd.Series,
(dd.Series,) + integer_types,
(dd.Series, str),
),
execute_string_rpad,
),
],
ops.Reverse: [((dd.Series,), execute_string_reverse)],
ops.Lowercase: [((dd.Series,), execute_string_lower)],
ops.Uppercase: [((dd.Series,), execute_string_upper)],
ops.Capitalize: [((dd.Series,), execute_string_capitalize)],
ops.Repeat: [
((dd.Series, (dd.Series,) + integer_types), execute_string_repeat),
],
ops.StringFind: [
(
(
dd.Series,
(dd.Series, str),
(dd.Series, type(None)) + integer_types,
(dd.Series, type(None)) + integer_types,
),
execute_string_contains,
)
],
ops.StringSQLLike: [
(
(
dd.Series,
str,
(str, type(None)),
),
execute_string_like_series_string,
),
],
ops.RegexSearch: [
(
(
dd.Series,
str,
),
execute_series_regex_search,
)
],
ops.RegexExtract: [
(
(dd.Series, (dd.Series, str), integer_types),
execute_series_regex_extract,
),
],
ops.RegexReplace: [
(
(
dd.Series,
str,
str,
),
execute_series_regex_replace,
),
],
ops.Translate: [
(
(dd.Series, dd.Series, dd.Series),
execute_series_translate_series_series,
),
((dd.Series, dd.Series, str), execute_series_translate_series_scalar),
((dd.Series, str, dd.Series), execute_series_translate_scalar_series),
((dd.Series, str, str), execute_series_translate_scalar_scalar),
],
ops.StrRight: [((dd.Series, integer_types), execute_series_right)],
ops.StringJoin: [
(((dd.Series, str), list), execute_series_join_scalar_sep),
],
}
register_types_to_dispatcher(execute_node, DASK_DISPATCH_TYPES)
@execute_node.register(ops.Substring, dd.Series, dd.Series, integer_types)
def execute_substring_series_int(op, data, start, length, **kwargs):
return execute_substring_series_series(
op, data, start, dd.from_array(np.repeat(length, len(start))), **kwargs
)
@execute_node.register(ops.Substring, dd.Series, integer_types, dd.Series)
def execute_string_substring_int_series(op, data, start, length, **kwargs):
return execute_substring_series_series(
op,
data,
dd.from_array(np.repeat(start, len(length))),
length,
**kwargs,
)
# TODO - substring - #2553
@execute_node.register(ops.Substring, dd.Series, dd.Series, dd.Series)
def execute_substring_series_series(op, data, start, length, **kwargs):
end = start + length
# TODO - this is broken
def iterate(
value,
start_iter=start.iteritems(),
end_iter=end.iteritems(),
):
_, begin = next(start_iter)
_, end = next(end_iter)
if (begin is not None and isnull(begin)) or (
end is not None and isnull(end)
):
return None
return value[begin:end]
return data.map(iterate)
@execute_node.register(ops.StringSQLLike, ddgb.SeriesGroupBy, str, str)
def execute_string_like_series_groupby_string(
op, data, pattern, escape, **kwargs
):
return execute_string_like_series_string(
op, make_selected_obj(data), pattern, escape, **kwargs
).groupby(data.grouper.groupings)
# TODO - aggregations - #2553
@execute_node.register(
ops.GroupConcat, dd.Series, str, (dd.Series, type(None))
)
def execute_group_concat_series_mask(
op, data, sep, mask, aggcontext=None, **kwargs
):
return aggcontext.agg(
data[mask] if mask is not None else data,
lambda series, sep=sep: sep.join(series.values),
)
@execute_node.register(ops.GroupConcat, ddgb.SeriesGroupBy, str, type(None))
def execute_group_concat_series_gb(
op, data, sep, _, aggcontext=None, **kwargs
):
custom_group_concat = dd.Aggregation(
name='custom_group_concat',
chunk=lambda s: s.apply(list),
agg=lambda s0: s0.apply(
lambda chunks: sep.join(
str(s) for s in itertools.chain.from_iterable(chunks)
)
),
)
return data.agg(custom_group_concat)
# TODO - aggregations - #2553
@execute_node.register(
ops.GroupConcat, ddgb.SeriesGroupBy, str, ddgb.SeriesGroupBy
)
def execute_group_concat_series_gb_mask(
op, data, sep, mask, aggcontext=None, **kwargs
):
def method(series, sep=sep):
return sep.join(series.values.astype(str))
return aggcontext.agg(
data,
lambda data, mask=mask.obj, method=method: method(
data[mask[data.index]]
),
)
@execute_node.register(ops.StringAscii, dd.Series)
def execute_string_ascii(op, data, **kwargs):
output_meta = pandas.Series([], dtype=np.dtype('int32'), name=data.name)
return data.map(ord, meta=output_meta)
@execute_node.register(ops.StringAscii, ddgb.SeriesGroupBy)
def execute_string_ascii_group_by(op, data, **kwargs):
return execute_string_ascii(op, make_selected_obj(data), **kwargs).groupby(
data.index
)
@execute_node.register(ops.RegexSearch, ddgb.SeriesGroupBy, str)
def execute_series_regex_search_gb(op, data, pattern, **kwargs):
return execute_series_regex_search(
op,
make_selected_obj(data),
getattr(pattern, 'obj', pattern),
**kwargs,
).groupby(data.index)
@execute_node.register(
ops.RegexExtract, ddgb.SeriesGroupBy, str, integer_types
)
def execute_series_regex_extract_gb(op, data, pattern, index, **kwargs):
return execute_series_regex_extract(
op, make_selected_obj(data), pattern, index, **kwargs
).groupby(data.index)
@execute_node.register(ops.RegexReplace, ddgb.SeriesGroupBy, str, str)
def execute_series_regex_replace_gb(op, data, pattern, replacement, **kwargs):
return execute_series_regex_replace(
make_selected_obj(data), pattern, replacement, **kwargs
).groupby(data.index)
@execute_node.register(ops.StrRight, ddgb.SeriesGroupBy, integer_types)
def execute_series_right_gb(op, data, nchars, **kwargs):
return execute_series_right(op, make_selected_obj(data), nchars).groupby(
data.index
)
def haystack_to_dask_series_of_lists(haystack, index=None):
pieces = haystack_to_series_of_lists(haystack, index)
return dd.from_pandas(pieces, npartitions=1)
@execute_node.register(ops.FindInSet, dd.Series, list)
def execute_series_find_in_set(op, needle, haystack, **kwargs):
def find_in_set(index, elements):
return ibis.util.safe_index(elements, index)
return needle.apply(find_in_set, args=(haystack,))
@execute_node.register(ops.FindInSet, ddgb.SeriesGroupBy, list)
def execute_series_group_by_find_in_set(op, needle, haystack, **kwargs):
pieces = [getattr(piece, 'obj', piece) for piece in haystack]
return execute_series_find_in_set(
op, make_selected_obj(needle), pieces, **kwargs
).groupby(needle.index)
# TODO we need this version not pandas
@execute_node.register(ops.FindInSet, scalar_types, list)
def execute_string_group_by_find_in_set(op, needle, haystack, **kwargs):
# `list` could contain series, series groupbys, or scalars
# mixing series and series groupbys is not allowed
series_in_haystack = [
type(piece)
for piece in haystack
if isinstance(piece, (dd.Series, ddgb.SeriesGroupBy))
]
if not series_in_haystack:
return ibis.util.safe_index(haystack, needle)
try:
(collection_type,) = frozenset(map(type, series_in_haystack))
except ValueError:
raise ValueError('Mixing Series and ddgb.SeriesGroupBy is not allowed')
pieces = haystack_to_dask_series_of_lists(
[getattr(piece, 'obj', piece) for piece in haystack]
)
result = pieces.map(toolz.flip(ibis.util.safe_index)(needle))
if issubclass(collection_type, dd.Series):
return result
assert issubclass(collection_type, ddgb.SeriesGroupBy)
return result.groupby(
toolz.first(
piece.grouper.groupings
for piece in haystack
if hasattr(piece, 'grouper')
)
)
| [
"pandas.isnull",
"toolz.flip",
"dask.dataframe.from_pandas",
"ibis.backends.pandas.execution.strings.haystack_to_series_of_lists",
"itertools.chain.from_iterable",
"numpy.dtype",
"ibis.util.safe_index"
] | [((8561, 8605), 'ibis.backends.pandas.execution.strings.haystack_to_series_of_lists', 'haystack_to_series_of_lists', (['haystack', 'index'], {}), '(haystack, index)\n', (8588, 8605), False, 'from ibis.backends.pandas.execution.strings import execute_series_join_scalar_sep, execute_series_regex_extract, execute_series_regex_replace, execute_series_regex_search, execute_series_right, execute_series_translate_scalar_scalar, execute_series_translate_scalar_series, execute_series_translate_series_scalar, execute_series_translate_series_series, execute_string_capitalize, execute_string_contains, execute_string_length_series, execute_string_like_series_string, execute_string_lower, execute_string_lpad, execute_string_lstrip, execute_string_repeat, execute_string_reverse, execute_string_rpad, execute_string_rstrip, execute_string_strip, execute_string_upper, execute_substring_int_int, haystack_to_series_of_lists\n'), ((8617, 8654), 'dask.dataframe.from_pandas', 'dd.from_pandas', (['pieces'], {'npartitions': '(1)'}), '(pieces, npartitions=1)\n', (8631, 8654), True, 'import dask.dataframe as dd\n'), ((8829, 8866), 'ibis.util.safe_index', 'ibis.util.safe_index', (['elements', 'index'], {}), '(elements, index)\n', (8849, 8866), False, 'import ibis\n'), ((9733, 9771), 'ibis.util.safe_index', 'ibis.util.safe_index', (['haystack', 'needle'], {}), '(haystack, needle)\n', (9753, 9771), False, 'import ibis\n'), ((7075, 7092), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (7083, 7092), True, 'import numpy as np\n'), ((10095, 10127), 'toolz.flip', 'toolz.flip', (['ibis.util.safe_index'], {}), '(ibis.util.safe_index)\n', (10105, 10127), False, 'import toolz\n'), ((5161, 5174), 'pandas.isnull', 'isnull', (['begin'], {}), '(begin)\n', (5167, 5174), False, 'from pandas import isnull\n'), ((5213, 5224), 'pandas.isnull', 'isnull', (['end'], {}), '(end)\n', (5219, 5224), False, 'from pandas import isnull\n'), ((6369, 6406), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['chunks'], {}), '(chunks)\n', (6398, 6406), False, 'import itertools\n')] |
import numpy as np
import pandas as pd
def normalize(x):
mean = np.mean(x, axis=0)
std = np.std(x, axis=0)
return np.apply_along_axis(lambda x: (x - mean) / std, 1, x)
def category_to_discretevalues(Y):
classes = {}
y = []
label = 0
for each in Y:
if each not in classes:
classes[each] = label
label += 1
y.append(classes[each])
return np.array(y), classes
def read_files(x_file, y_file, sep=','):
X = pd.read_csv("dataset/" + x_file, header=None, sep=sep)
X = X.as_matrix()
X = normalize(X)
temp = X.flatten()
std = np.std(temp)
xlim = (temp[np.argmin(temp)] - std, temp[np.argmax(temp)] + std)
X = np.insert(X, 0, 1.0, axis=1)
Y = pd.read_csv("dataset/" + y_file, header=None, sep=sep)
Y = Y.as_matrix().flatten()
if (isinstance(Y[0], str)):
Y, cls_labels = category_to_discretevalues(Y)
std = np.std(Y)
ylim = (Y[np.argmin(Y)] - std, Y[np.argmax(Y)] + std)
return X, Y, xlim, ylim, cls_labels
std = np.std(Y)
ylim = (Y[np.argmin(Y)] - std, Y[np.argmax(Y)] + std)
return X, Y, xlim, ylim
| [
"numpy.insert",
"numpy.mean",
"pandas.read_csv",
"numpy.argmax",
"numpy.array",
"numpy.apply_along_axis",
"numpy.std",
"numpy.argmin"
] | [((70, 88), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (77, 88), True, 'import numpy as np\n'), ((99, 116), 'numpy.std', 'np.std', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (105, 116), True, 'import numpy as np\n'), ((128, 181), 'numpy.apply_along_axis', 'np.apply_along_axis', (['(lambda x: (x - mean) / std)', '(1)', 'x'], {}), '(lambda x: (x - mean) / std, 1, x)\n', (147, 181), True, 'import numpy as np\n'), ((485, 539), 'pandas.read_csv', 'pd.read_csv', (["('dataset/' + x_file)"], {'header': 'None', 'sep': 'sep'}), "('dataset/' + x_file, header=None, sep=sep)\n", (496, 539), True, 'import pandas as pd\n'), ((616, 628), 'numpy.std', 'np.std', (['temp'], {}), '(temp)\n', (622, 628), True, 'import numpy as np\n'), ((707, 735), 'numpy.insert', 'np.insert', (['X', '(0)', '(1.0)'], {'axis': '(1)'}), '(X, 0, 1.0, axis=1)\n', (716, 735), True, 'import numpy as np\n'), ((745, 799), 'pandas.read_csv', 'pd.read_csv', (["('dataset/' + y_file)"], {'header': 'None', 'sep': 'sep'}), "('dataset/' + y_file, header=None, sep=sep)\n", (756, 799), True, 'import pandas as pd\n'), ((1060, 1069), 'numpy.std', 'np.std', (['Y'], {}), '(Y)\n', (1066, 1069), True, 'import numpy as np\n'), ((413, 424), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (421, 424), True, 'import numpy as np\n'), ((933, 942), 'numpy.std', 'np.std', (['Y'], {}), '(Y)\n', (939, 942), True, 'import numpy as np\n'), ((646, 661), 'numpy.argmin', 'np.argmin', (['temp'], {}), '(temp)\n', (655, 661), True, 'import numpy as np\n'), ((675, 690), 'numpy.argmax', 'np.argmax', (['temp'], {}), '(temp)\n', (684, 690), True, 'import numpy as np\n'), ((1085, 1097), 'numpy.argmin', 'np.argmin', (['Y'], {}), '(Y)\n', (1094, 1097), True, 'import numpy as np\n'), ((1108, 1120), 'numpy.argmax', 'np.argmax', (['Y'], {}), '(Y)\n', (1117, 1120), True, 'import numpy as np\n'), ((961, 973), 'numpy.argmin', 'np.argmin', (['Y'], {}), '(Y)\n', (970, 973), True, 'import numpy as np\n'), ((984, 996), 'numpy.argmax', 'np.argmax', (['Y'], {}), '(Y)\n', (993, 996), True, 'import numpy as np\n')] |
import torch
import numpy as np
from tqdm import tqdm
import time
from PIL import Image
import os
from im2mesh.common import (
arange_pixels, transform_to_camera_space)
class Renderer(object):
''' Render class for DVR.
It provides functions to render the representation.
Args:
model (nn.Module): trained DVR model
threshold (float): threshold value
device (device): pytorch device
colors (string): which type of color to use (default: rgb)
resolution (tuple): output resolution
n_views (int): number of views to generate
extension (string): output image extension
background (string): which background color to use
ray_sampling_accuracy (tuple): how many evaluations should be
performed on the ray
n_start_view (int): at which item in the batch the rendering
process should be started
'''
def __init__(self, model, threshold=0.5, device=None, colors='rgb',
resolution=(128, 128), n_views=3, extension='png',
background='white', ray_sampling_accuracy=[1024, 1025],
n_start_view=0):
self.model = model.to(device)
self.threshold = threshold
self.device = device
self.colors = colors
self.n_views = n_views
self.extension = extension
self.resolution = resolution
self.sampling_accuracy = ray_sampling_accuracy
self.n_start_view = n_start_view
if background == 'white':
self.background = 1.
elif background == 'black':
self.background = 0.
else:
self.background = 0.
def render_and_export(self, data, img_out_path, modelname='model0',
return_stats=True):
''' Renders and exports for provided camera information in data.
Args:
data (tensor): data tensor
img_out_path (string): output path
modelname (string): name of the model
return_stats (bool): whether stats should be returned
'''
self.model.eval()
device = self.device
stats_dict = {}
inputs = data.get('inputs', torch.empty(1, 0)).to(device)
with torch.no_grad():
c = self.model.encode_inputs(inputs)
if not os.path.exists(img_out_path):
os.makedirs(img_out_path)
out_imgs = []
for i in tqdm(range(self.n_start_view,
self.n_start_view + self.n_views)):
datai = data.get('img.img%d' % i, None)
if datai is None:
print('No image %d found.' % i)
break
img = datai[None]
batch_size, _, h, w = img.shape
assert(batch_size == 1)
world_mat = datai.get('world_mat').to(device)
camera_mat = datai.get('camera_mat').to(device)
scale_mat = datai.get('scale_mat').to(device)
t0 = time.time()
with torch.no_grad():
img_pred = self.render_img(
camera_mat, world_mat, inputs, scale_mat, c, stats_dict,
resolution=self.resolution)
stats_dict['time_render'] = time.time() - t0
img_pred.save(os.path.join(
img_out_path, '%s_%03d.%s' % (modelname, i, self.extension)))
out_imgs.append(img_pred)
return inputs.cpu(), out_imgs, stats_dict
def render_img(self, camera_mat, world_mat, inputs, scale_mat=None,
c=None, stats_dict={}, resolution=(128, 128)):
''' Renders an image for provided camera information.
Args:
camera_mat (tensor): camera matrix
world_mat (tensor): world matrix
scale_mat (tensor): scale matrix
c (tensor): latent conditioned code c
stats_dict (dict): statistics dictionary
resolution (tuple): output image resolution
'''
device = self.device
h, w = resolution
t0 = time.time()
p_loc, pixels = arange_pixels(resolution=(h, w))
pixels = pixels.to(device)
stats_dict['time_prepare_points'] = time.time() - t0
if self.colors in ('rgb', 'depth'):
# Get predicted world points
with torch.no_grad():
t0 = time.time()
p_world_hat, mask_pred, mask_zero_occupied = \
self.model.pixels_to_world(
pixels, camera_mat, world_mat, scale_mat, c,
sampling_accuracy=self.sampling_accuracy)
stats_dict['time_eval_depth'] = time.time() - t0
t0 = time.time()
p_loc = p_loc[mask_pred]
with torch.no_grad():
if self.colors == 'rgb':
img_out = (255 * np.ones((h, w, 3))).astype(np.uint8)
t0 = time.time()
if mask_pred.sum() > 0:
rgb_hat = self.model.decode_color(p_world_hat, c=c)
rgb_hat = rgb_hat[mask_pred].cpu().numpy()
rgb_hat = (rgb_hat * 255).astype(np.uint8)
img_out[p_loc[:, 1], p_loc[:, 0]] = rgb_hat
img_out = Image.fromarray(img_out).convert('RGB')
elif self.colors == 'depth':
img_out = (255 * np.ones((h, w))).astype(np.uint8)
if mask_pred.sum() > 0:
p_world_hat = p_world_hat[mask_pred].unsqueeze(0)
d_values = transform_to_camera_space(
p_world_hat, camera_mat, world_mat,
scale_mat).squeeze(0)[:, -1].cpu().numpy()
m = d_values[d_values != np.inf].min()
M = d_values[d_values != np.inf].max()
d_values = 0.5 + 0.45 * (d_values - m) / (M - m)
d_image_values = d_values * 255
img_out[p_loc[:, 1], p_loc[:, 0]] = \
d_image_values.astype(np.uint8)
img_out = Image.fromarray(img_out).convert("L")
stats_dict['time_eval_color'] = time.time() - t0
return img_out
def export(self, img_list, img_out_path, modelname='model0'):
''' Exports the image list.
Args:
img_list (list): list of images
img_out_path (string): output path
modelname (string): model name
'''
model_path = os.path.join(img_out_path, modelname)
if not os.path.exists(model_path):
os.makedirs(model_path)
for i in range(self.n_views):
out_file = os.path.join(model_path, '%06d.png' % i)
img_list[i].save(out_file)
return 0
def estimate_colors(self, vertices, c=None):
''' Estimates the colors for provided vertices.
Args:
vertices (Numpy array): mesh vertices
c (tensor): latent conditioned code c
'''
device = self.device
vertices = torch.FloatTensor(vertices)
vertices_split = torch.split(vertices, self.points_batch_size)
colors = []
for vi in vertices_split:
vi = vi.to(device)
with torch.no_grad():
ci = self.model.decode_color(vi, c).squeeze(0).cpu()
colors.append(ci)
colors = np.concatenate(colors, axis=0)
colors = np.clip(colors, 0, 1)
colors = (colors * 255).astype(np.uint8)
colors = np.concatenate([
colors,
np.full((colors.shape[0], 1), 255, dtype=np.uint8)], axis=1)
return colors
| [
"numpy.clip",
"os.path.exists",
"torch.split",
"PIL.Image.fromarray",
"im2mesh.common.arange_pixels",
"numpy.full",
"os.makedirs",
"numpy.ones",
"im2mesh.common.transform_to_camera_space",
"os.path.join",
"numpy.concatenate",
"torch.empty",
"torch.no_grad",
"time.time",
"torch.FloatTenso... | [((4068, 4079), 'time.time', 'time.time', ([], {}), '()\n', (4077, 4079), False, 'import time\n'), ((4105, 4137), 'im2mesh.common.arange_pixels', 'arange_pixels', ([], {'resolution': '(h, w)'}), '(resolution=(h, w))\n', (4118, 4137), False, 'from im2mesh.common import arange_pixels, transform_to_camera_space\n'), ((6585, 6622), 'os.path.join', 'os.path.join', (['img_out_path', 'modelname'], {}), '(img_out_path, modelname)\n', (6597, 6622), False, 'import os\n'), ((7143, 7170), 'torch.FloatTensor', 'torch.FloatTensor', (['vertices'], {}), '(vertices)\n', (7160, 7170), False, 'import torch\n'), ((7196, 7241), 'torch.split', 'torch.split', (['vertices', 'self.points_batch_size'], {}), '(vertices, self.points_batch_size)\n', (7207, 7241), False, 'import torch\n'), ((7479, 7509), 'numpy.concatenate', 'np.concatenate', (['colors'], {'axis': '(0)'}), '(colors, axis=0)\n', (7493, 7509), True, 'import numpy as np\n'), ((7527, 7548), 'numpy.clip', 'np.clip', (['colors', '(0)', '(1)'], {}), '(colors, 0, 1)\n', (7534, 7548), True, 'import numpy as np\n'), ((2260, 2275), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2273, 2275), False, 'import torch\n'), ((2341, 2369), 'os.path.exists', 'os.path.exists', (['img_out_path'], {}), '(img_out_path)\n', (2355, 2369), False, 'import os\n'), ((2383, 2408), 'os.makedirs', 'os.makedirs', (['img_out_path'], {}), '(img_out_path)\n', (2394, 2408), False, 'import os\n'), ((2997, 3008), 'time.time', 'time.time', ([], {}), '()\n', (3006, 3008), False, 'import time\n'), ((4217, 4228), 'time.time', 'time.time', ([], {}), '()\n', (4226, 4228), False, 'import time\n'), ((4716, 4727), 'time.time', 'time.time', ([], {}), '()\n', (4725, 4727), False, 'import time\n'), ((6260, 6271), 'time.time', 'time.time', ([], {}), '()\n', (6269, 6271), False, 'import time\n'), ((6639, 6665), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (6653, 6665), False, 'import os\n'), ((6679, 6702), 'os.makedirs', 'os.makedirs', (['model_path'], {}), '(model_path)\n', (6690, 6702), False, 'import os\n'), ((6765, 6805), 'os.path.join', 'os.path.join', (['model_path', "('%06d.png' % i)"], {}), "(model_path, '%06d.png' % i)\n", (6777, 6805), False, 'import os\n'), ((3026, 3041), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3039, 3041), False, 'import torch\n'), ((3252, 3263), 'time.time', 'time.time', ([], {}), '()\n', (3261, 3263), False, 'import time\n'), ((3295, 3368), 'os.path.join', 'os.path.join', (['img_out_path', "('%s_%03d.%s' % (modelname, i, self.extension))"], {}), "(img_out_path, '%s_%03d.%s' % (modelname, i, self.extension))\n", (3307, 3368), False, 'import os\n'), ((4337, 4352), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4350, 4352), False, 'import torch\n'), ((4375, 4386), 'time.time', 'time.time', ([], {}), '()\n', (4384, 4386), False, 'import time\n'), ((4782, 4797), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4795, 4797), False, 'import torch\n'), ((7345, 7360), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7358, 7360), False, 'import torch\n'), ((7664, 7714), 'numpy.full', 'np.full', (['(colors.shape[0], 1)', '(255)'], {'dtype': 'np.uint8'}), '((colors.shape[0], 1), 255, dtype=np.uint8)\n', (7671, 7714), True, 'import numpy as np\n'), ((2217, 2234), 'torch.empty', 'torch.empty', (['(1)', '(0)'], {}), '(1, 0)\n', (2228, 2234), False, 'import torch\n'), ((4681, 4692), 'time.time', 'time.time', ([], {}), '()\n', (4690, 4692), False, 'import time\n'), ((4939, 4950), 'time.time', 'time.time', ([], {}), '()\n', (4948, 4950), False, 'import time\n'), ((5303, 5327), 'PIL.Image.fromarray', 'Image.fromarray', (['img_out'], {}), '(img_out)\n', (5318, 5327), False, 'from PIL import Image\n'), ((4877, 4895), 'numpy.ones', 'np.ones', (['(h, w, 3)'], {}), '((h, w, 3))\n', (4884, 4895), True, 'import numpy as np\n'), ((6181, 6205), 'PIL.Image.fromarray', 'Image.fromarray', (['img_out'], {}), '(img_out)\n', (6196, 6205), False, 'from PIL import Image\n'), ((5425, 5440), 'numpy.ones', 'np.ones', (['(h, w)'], {}), '((h, w))\n', (5432, 5440), True, 'import numpy as np\n'), ((5612, 5684), 'im2mesh.common.transform_to_camera_space', 'transform_to_camera_space', (['p_world_hat', 'camera_mat', 'world_mat', 'scale_mat'], {}), '(p_world_hat, camera_mat, world_mat, scale_mat)\n', (5637, 5684), False, 'from im2mesh.common import arange_pixels, transform_to_camera_space\n')] |
"""
Contains ABC for Maximum Entropy graph null model.
"""
import warnings
import time
from abc import abstractmethod, ABC
from collections import namedtuple
import numpy as np
import scipy.optimize
from jax import jit, jacfwd, jacrev, grad
from .util import wrap_with_array, print_percentiles, jax_class_jit, hvp
Solution = namedtuple(
"Solution", ["x", "nll", "residual_error_norm", "relative_error", "total_time"]
)
class MaxentGraph(ABC):
"""
ABC for Maximum Entropy graph null model.
"""
@abstractmethod
def bounds(self):
"""
Returns the bounds on the parameters vector.
"""
def clip(self, v):
"""
Clips the parameters vector according to bounds.
"""
(lower, upper), _bounds_object = self.bounds()
return np.clip(v, lower, upper)
@abstractmethod
def transform_parameters(self, v):
"""
Transforms parameters to bounded form.
"""
@abstractmethod
def transform_parameters_inv(self, v):
"""
Transforms parameters to all real numbers for optimization convenience.
"""
@abstractmethod
def order_node_sequence(self):
"""
Concatenates node constraint sequence in a canonical order.
"""
@abstractmethod
def get_initial_guess(self, option):
"""
Gets initial guess.
"""
@abstractmethod
def expected_node_sequence(self, v):
"""
Computes the expected node constraint using matrices.
"""
@abstractmethod
def expected_node_sequence_loops(self, v):
"""
Computes the expected node constraint using loops.
"""
@jax_class_jit
def node_sequence_residuals(self, v):
"""
Computes the residuals of the expected node constraint sequence minus the actual sequence.
"""
return self.expected_node_sequence(v) - self.order_node_sequence()
@abstractmethod
def neg_log_likelihood_loops(self, v):
"""
Computes the negative log-likelihood using loops.
"""
@abstractmethod
def neg_log_likelihood(self, v):
"""
Computes the negative log-likelihood using matrix operations.
"""
def compute_relative_error(self, expected):
"""
Computes relative error for solution for every element of the sequence.
"""
actual = self.order_node_sequence()
# okay not actually relative error but close enough
return np.abs(expected - actual) / (1 + np.abs(actual))
def solve(self, x0, method="trust-krylov", verbose=False):
"""
Solves for the parameters of the null model using either bounded minimization of the
negative log-likelihood or bounded least-squares minimization of the equation residuals.
"""
args = {}
# for some reason scipy prefers hess over hessp if the former is passed
# but since the latter is more efficient, only pass hess when necessary
if method in ["trust-exact", "dogleg"]:
hess = jit(jacfwd(jacrev(self.neg_log_likelihood)))
args["hess"] = hess
elif method in ["Newton-CG", "trust-ncg", "trust-krylov", "trust-constr"]:
hessp = jit(hvp(self.neg_log_likelihood))
args["hessp"] = hessp
if method in ["trf", "dogbox", "lm"]:
f = self.node_sequence_residuals
jac = jit(jacrev(self.expected_node_sequence))
args["jac"] = jac
solver = scipy.optimize.least_squares
elif method in [
"Nelder-Mead",
"Powell",
"CG",
"BFGS",
"Newton-CG",
"L-BFGS-B",
"TNC",
"COBYLA",
"SLSQP",
"trust-constr",
"dogleg",
"trust-ncg",
"trust-exact",
"trust-krylov",
]:
f = self.neg_log_likelihood
jac = jit(grad(self.neg_log_likelihood))
# lbfgsb is fussy. wont accept jax's devicearray
# there may be others, though
if method in ["L-BFGS-B"]:
jac = wrap_with_array(jac)
if method in [
"CG",
"BFGS",
"Newton-CG",
"L-BFGS-B",
"TNC",
"SLSQP",
"dogleg",
"trust-ncg",
"trust-krylov",
"trust-exact",
"trust-constr",
]:
args["jac"] = jac
solver = scipy.optimize.minimize
else:
raise ValueError("Invalid optimization method")
start = time.time()
sol = solver(f, x0=x0, method=method, **args)
end = time.time()
total_time = end - start
eq_r = self.node_sequence_residuals(sol.x)
expected = self.expected_node_sequence(sol.x)
residual_error_norm = np.linalg.norm(eq_r, ord=2)
relative_error = self.compute_relative_error(expected)
nll = self.neg_log_likelihood(sol.x)
if not sol.success:
if np.max(relative_error) < 0.5:
warnings.warn(
"Didn't succeed according to algorithm, but max relative error is low.",
RuntimeWarning,
)
else:
raise RuntimeError(
f"Didn't succeed in minimization. Message: {sol.message}"
)
if verbose:
print(f"Took {total_time} seconds")
print("Relative error for expected degree/strength sequence: ")
print()
print_percentiles(relative_error)
print(f"\nResidual error: {residual_error_norm}")
return Solution(
x=sol.x,
nll=float(nll),
residual_error_norm=residual_error_norm,
relative_error=relative_error,
total_time=total_time,
)
| [
"numpy.clip",
"numpy.abs",
"jax.jacrev",
"collections.namedtuple",
"numpy.max",
"jax.grad",
"numpy.linalg.norm",
"warnings.warn",
"time.time"
] | [((330, 425), 'collections.namedtuple', 'namedtuple', (['"""Solution"""', "['x', 'nll', 'residual_error_norm', 'relative_error', 'total_time']"], {}), "('Solution', ['x', 'nll', 'residual_error_norm', 'relative_error',\n 'total_time'])\n", (340, 425), False, 'from collections import namedtuple\n'), ((811, 835), 'numpy.clip', 'np.clip', (['v', 'lower', 'upper'], {}), '(v, lower, upper)\n', (818, 835), True, 'import numpy as np\n'), ((4738, 4749), 'time.time', 'time.time', ([], {}), '()\n', (4747, 4749), False, 'import time\n'), ((4818, 4829), 'time.time', 'time.time', ([], {}), '()\n', (4827, 4829), False, 'import time\n'), ((5000, 5027), 'numpy.linalg.norm', 'np.linalg.norm', (['eq_r'], {'ord': '(2)'}), '(eq_r, ord=2)\n', (5014, 5027), True, 'import numpy as np\n'), ((2527, 2552), 'numpy.abs', 'np.abs', (['(expected - actual)'], {}), '(expected - actual)\n', (2533, 2552), True, 'import numpy as np\n'), ((2560, 2574), 'numpy.abs', 'np.abs', (['actual'], {}), '(actual)\n', (2566, 2574), True, 'import numpy as np\n'), ((3463, 3498), 'jax.jacrev', 'jacrev', (['self.expected_node_sequence'], {}), '(self.expected_node_sequence)\n', (3469, 3498), False, 'from jax import jit, jacfwd, jacrev, grad\n'), ((5180, 5202), 'numpy.max', 'np.max', (['relative_error'], {}), '(relative_error)\n', (5186, 5202), True, 'import numpy as np\n'), ((5226, 5337), 'warnings.warn', 'warnings.warn', (['"""Didn\'t succeed according to algorithm, but max relative error is low."""', 'RuntimeWarning'], {}), '(\n "Didn\'t succeed according to algorithm, but max relative error is low.",\n RuntimeWarning)\n', (5239, 5337), False, 'import warnings\n'), ((3112, 3143), 'jax.jacrev', 'jacrev', (['self.neg_log_likelihood'], {}), '(self.neg_log_likelihood)\n', (3118, 3143), False, 'from jax import jit, jacfwd, jacrev, grad\n'), ((4006, 4035), 'jax.grad', 'grad', (['self.neg_log_likelihood'], {}), '(self.neg_log_likelihood)\n', (4010, 4035), False, 'from jax import jit, jacfwd, jacrev, grad\n')] |
# encoding:UTF-8
import numpy as np
import bsplines
# Knot Sequence Update Strategies need two method:
# 1) generateKnotList: which generates a new knotlist given the current spline and reprojection errors
# Returns a boolean flag if the updated knot sequence needs another step of optimization
# 2) getUpdatedSpline: given a knot list, spline order and spline generates a new spline initialized with the
# values of the given spline and the given knot sequence.
class ReprojectionErrorKnotSequenceUpdateStrategy(object):
__previousKnotSequence = None
__previousErrorTerms = None
__framerate = None
__maxKnotsPerSecond = None
__disabledTimeSegments = []
def __init__(self, framerate):
self.__framerate = framerate
self.__maxKnotsPerSecond = 1. / (2. * self.__framerate)
def generateKnotList(self, reprojection_errors, poseSpline):
[times, errors] = self.__getErrorAndTimestamp(reprojection_errors)
# take a copy of the old knots:
knots = poseSpline.knots()
[errorTermsPerSegment, errorPerSegment] = self.__getErrorPerSegment(times, errors, knots)
disabledTimeSegments = self.__removeSegmentsWithoutImprovement(times, errors, self.__disabledTimeSegments)
[filteredKnots,
disabledTimeSegments] = self.__removeSegmentsWithoutObservations(knots, errorPerSegment, disabledTimeSegments)
[errorTermsPerSegmentFiltered,
errorPerSegmentFiltered] = self.__getErrorPerSegment(times, errors, filteredKnots)
updatedKnots = self.__generateKnotSequence(errorPerSegmentFiltered, errorTermsPerSegmentFiltered, knots,
disabledTimeSegments)
if self.__previousKnotSequence is None:
requiresUpdate = True
else:
# require at least a 1% increase in knots for a next iteration being worth the effort
requiresUpdate = (len(updatedKnots) > 1.01 * len(self.__previousKnotSequence))
# keep a copy of the knot sequence
self.__previousKnotSequence = np.copy(updatedKnots)
self.__previousErrorTerms = errorPerSegmentFiltered
self.__disabledTimeSegments = disabledTimeSegments
return [updatedKnots, requiresUpdate]
def getUpdatedSpline(self, poseSpline, knots, splineOrder):
"""Get a spline with the new knot sequence build upon the poses of the old spline"""
# linearly sample the old spline
times = np.linspace(poseSpline.t_min(), poseSpline.t_max(), len(knots))
splinePoses = np.zeros((6, len(knots)))
for i, time in enumerate(times):
splinePoses[:, i] = poseSpline.eval(time)
# guarantee that beginning and end times of the spline remain unchanged
oldKnots = poseSpline.knots()
i = 0
while oldKnots[i] < knots[0]:
i += 1
knots = np.insert(knots, 0, oldKnots[0:i])
i = -1
while oldKnots[i] > knots[-1]:
i -= 1
knots = np.append(knots, oldKnots[i:])
newPoseSpline = bsplines.BSplinePose(splineOrder, poseSpline.rotation())
newPoseSpline.initPoseSplineSparseKnots(times, splinePoses, np.array(knots), 1e-6)
return newPoseSpline
def __getErrorAndTimestamp(self, reprojection_errors):
"""Extract the timestamps and reprojection error values"""
errors = []
times = []
for reprojection_error in reprojection_errors:
times.append(reprojection_error.observationTime())
errors.append(reprojection_error.evaluateError())
# it is not guaranteed that the errors are sorted in time
newIdx = sorted(range(len(times)), key=times.__getitem__)
times = [times[i] for i in newIdx]
errors = [errors[i] for i in newIdx]
return [times, errors]
def __getErrorPerSegment(self, times, errors, knots):
"""Get the total error per segment and number of error terms per segment"""
errorPerSegment = np.zeros(len(knots))
errorTermsPerSegment = np.zeros(len(knots))
segment = (-1, -1)
# analyse each section of the knot sequence:
for i, t in enumerate(times):
segment = self.__time2KnotSection(t, knots, segment)
errorPerSegment[segment[0]] += errors[i]
errorTermsPerSegment[segment[0]] += 1
return [errorTermsPerSegment, errorPerSegment]
def __removeSegmentsWithoutObservations(self, knots, errorPerSegment, disabledTimeSegments=[]):
filteredKnots = []
# remove segments with consecutive 0-valued errors
p_error = 0
for i, error in enumerate(errorPerSegment):
# this should depend on the splineOrder!
if p_error == 0 and error == 0 and 6 < i < len(errorPerSegment) - 6:
# add the segment between the previous and the next knot the the "blacklist"
disabledTimeSegments.append((knots[i - 1], knots[i + 1]))
else:
filteredKnots.append(knots[i])
p_error = error
return [filteredKnots, disabledTimeSegments]
def __generateKnotSequence(self, errorPerSegmentFiltered, errorTermsPerSegmentFiltered, knots,
disabledTimeSegments):
newKnots = []
numberOfKnots = len(knots)
for i, error in enumerate(errorPerSegmentFiltered):
expectedNormalError = errorTermsPerSegmentFiltered[i]
if error > expectedNormalError and i < numberOfKnots - 1:
newKnot = (knots[i] + knots[i + 1]) / 2.0
deltaT = newKnot - knots[i]
# max number of knots per second hit: do not split
if deltaT <= self.__maxKnotsPerSecond:
newKnots.append(knots[i])
# segment is disabled: do not split
elif disabledTimeSegments is not None and self.__isSegmentDisabled(disabledTimeSegments, newKnot):
newKnots.append(knots[i])
else:
# split:
newKnots.append(knots[i])
newKnots.append(newKnot)
else:
newKnots.append(knots[i])
return newKnots
def __time2KnotSection(self, t, knots, segment):
i = segment[0] # we assume that the times are ordered thus we do not need to check before the previous segment
if i == -1:
i = 0
while i < len(knots) - 1:
if knots[i] < t < knots[i + 1]:
return (i, i + 1)
i += 1
return (-1, -1)
# true: disabled
# false: not disabled
def __isSegmentDisabled(self, disabledTimeSegments, t):
for seg in disabledTimeSegments:
if seg[1] > t >= seg[0]:
return True
return False
def __removeSegmentsWithoutImprovement(self, times, errors, disabledTimeSegments):
# first compare the reprojection error in the "previous" segments
# if we do not observe a significant drop. stop adding errors!
if self.__previousKnotSequence is not None and self.__previousErrorTerms is not None:
timeSegments = []
errorPerOldSegment = np.zeros(len(self.__previousKnotSequence))
segment = (-1, -1)
# analyse each section of the knot sequence:
# this kind of inefficient as it adds the same time segment multiple times...
for i, t in enumerate(times):
segment = self.__time2KnotSection(t, self.__previousKnotSequence, segment)
errorPerOldSegment[segment[0]] += errors[i]
timeSegments.append((self.__previousKnotSequence[segment[0]], self.__previousKnotSequence[segment[1]]))
# disabledTimeSegments = []
for i, pe in enumerate(self.__previousErrorTerms):
if pe * 0.8 < errorPerOldSegment[i]:
disabledTimeSegments.append(timeSegments[i])
return disabledTimeSegments
| [
"numpy.insert",
"numpy.copy",
"numpy.array",
"numpy.append"
] | [((2088, 2109), 'numpy.copy', 'np.copy', (['updatedKnots'], {}), '(updatedKnots)\n', (2095, 2109), True, 'import numpy as np\n'), ((2907, 2941), 'numpy.insert', 'np.insert', (['knots', '(0)', 'oldKnots[0:i]'], {}), '(knots, 0, oldKnots[0:i])\n', (2916, 2941), True, 'import numpy as np\n'), ((3032, 3062), 'numpy.append', 'np.append', (['knots', 'oldKnots[i:]'], {}), '(knots, oldKnots[i:])\n', (3041, 3062), True, 'import numpy as np\n'), ((3213, 3228), 'numpy.array', 'np.array', (['knots'], {}), '(knots)\n', (3221, 3228), True, 'import numpy as np\n')] |
############################ README ############################################
# This file is used to apply receptive field to the image to imitate how
# retinal ganglion cells perceive in real world scenario. Here 'w' is the filter
# that need to be convoluted with the image. Sophisticated python libraries for
# convolution can be used for optimization.
################################################################################
import numpy as np
def rf(inp):
w = [[-0.5,-0.125, 0.25, -0.125, -0.5 ],
[-0.125 , 0.25 , 0.625 , 0.25 , -0.125],
[ 0.25 ,0.625 , 1. , 0.625 , 0.25 ],
[-0.125 , 0.25 , 0.625 , 0.25, -0.125],
[-0.5 , -0.125 , 0.25 , -0.125 ,-0.5 ]]
pot = np.zeros([28,28])
ran = [-2,-1,0,1,2]
ox = 2
oy = 2
#Convolution
for i in range(28):
for j in range(28):
summ = 0
for m in ran:
for n in ran:
if (i+m)>=0 and (i+m)<=15 and (j+n)>=0 and (j+n)<=15:
summ = summ + w[ox+m][oy+n]*inp[i+m][j+n]/255
pot[i][j] = summ
return pot
# if __name__ == '__main__':
# maxx = -1000
# minn = 1000
# for j in range(1,1500):
# img = cv2.imread("images/" + str(j) + ".png", 0)
# pot = rf(img)
# for c in pot:
# if max(c)>maxx:
# maxx= max(c)
# if min(c)<minn:
# minn = min(c)
# print maxx, minn
| [
"numpy.zeros"
] | [((702, 720), 'numpy.zeros', 'np.zeros', (['[28, 28]'], {}), '([28, 28])\n', (710, 720), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tqdm import tqdm
from abc import ABCMeta, abstractmethod
import paddle
import paddle.nn as nn
from paddle.io import DataLoader
from paddlemm.models import CMML, NIC, SCAN, SGRAF, AoANet, EarlyFusion, LateFusion, LMFFusion, TMCFusion, VSEPP, IMRAM
from paddlemm.datasets import BasicDataset, SemiDataset, PretrainDataset, SampleDataset
DatasetMap = {
'basic': BasicDataset,
'semi': SemiDataset,
'sample': SampleDataset,
'pretrain': PretrainDataset
}
ModelMap = {
'cmml': CMML,
'nic': NIC,
'scan': SCAN,
'vsepp': VSEPP,
'imram': IMRAM,
'sgraf': SGRAF,
'aoanet': AoANet,
'earlyfusion': EarlyFusion,
'latefusion': LateFusion,
'lmffusion': LMFFusion,
'tmcfusion': TMCFusion
}
class BaseTrainer(metaclass=ABCMeta):
def __init__(self, opt):
self.model_name = opt.model_name.lower()
self.out_root = opt.out_root
self.logger = opt.logger
self.num_epochs = opt.num_epochs
self.batch_size = opt.batch_size
self.learning_rate = opt.learning_rate
self.task = opt.task
self.weight_decay = opt.get('weight_decay', 0.)
self.pretrain_epochs = opt.get('pretrain_epochs', 0)
self.num_workers = opt.get('num_workers', 0)
self.val_epoch = opt.get('val_epoch', 1)
# choose metric for select best model during training
self.select_metric = opt.get('select_metric', 'loss')
self.dataset = DatasetMap[opt.data_mode](**opt)
opt.vocab_size = self.dataset.vocab_size
opt.vocab = str(self.dataset.word2idx)
self.model = ModelMap[opt.model_name.lower()](**opt)
self.grad_clip = opt.get('grad_clip', 0)
if self.grad_clip:
self.grad_clip = nn.clip.ClipGradByValue(opt.grad_clip)
else:
self.grad_clip = None
self.step_size = opt.get('step_size', 0)
self.gamma = opt.get('gamma', 0.1)
if self.step_size:
self.scheduler = paddle.optimizer.lr.StepDecay(learning_rate=self.learning_rate, step_size=self.step_size,
gamma=self.gamma)
self.optimizer = paddle.optimizer.Adam(parameters=self.model.parameters(),
learning_rate=self.scheduler,
weight_decay=self.weight_decay,
grad_clip=self.grad_clip)
else:
self.optimizer = paddle.optimizer.Adam(parameters=self.model.parameters(),
learning_rate=self.learning_rate,
weight_decay=self.weight_decay,
grad_clip=self.grad_clip)
def train(self):
if self.pretrain_epochs > 0:
self.pretrain()
for epoch in range(1, self.num_epochs + 1):
all_loss = []
self.model.train()
train_loader = DataLoader(self.dataset.train_(),
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers)
train_tqdm = tqdm(train_loader(), ncols=80)
for idx, batch in enumerate(train_tqdm):
batch['epoch'] = epoch
loss = self.model(batch)
loss.backward()
self.optimizer.step()
self.optimizer.clear_grad()
all_loss.append(loss.item())
train_tqdm.set_description("Epoch: {} | Loss: {:.3f}".format(epoch, loss.item()))
train_tqdm.close()
if self.step_size:
self.scheduler.step()
paddle.save(self.model.state_dict(), os.path.join(self.out_root, 'temp.pdparams'))
if epoch % self.val_epoch == 0:
val_res = self.evaluate()
if self.select_metric == 'loss':
if val_res['loss'] < self.best_loss:
self.best_loss = val_res['loss']
paddle.save(self.model.state_dict(), os.path.join(self.out_root, 'best_model.pdparams'))
self.logger.info("Epoch: {}, valid loss: {:.3f}, Best: {:.3f}".format(epoch, val_res['loss'], self.best_loss))
else:
if val_res[self.select_metric] > self.best_score:
self.best_score = val_res[self.select_metric]
paddle.save(self.model.state_dict(), os.path.join(self.out_root, 'best_model.pdparams'))
self.logger.info("Epoch: {}, valid score: {:.3f}, Best: {:.3f}".format(epoch, val_res[self.select_metric],
self.best_score))
def pretrain(self):
# for cmml pretraining
self.model.train()
for epoch in range(1, self.pretrain_epochs + 1):
all_loss = []
train_loader = DataLoader(self.dataset.train_(),
batch_size=self.batch_size * 8, # mul 8 to train total supervised data
shuffle=True,
num_workers=self.num_workers)
train_tqdm = tqdm(train_loader(), ncols=80)
for idx, batch in enumerate(train_tqdm):
self.optimizer.clear_grad()
loss = self.model.pretrain(batch)
loss.backward()
self.optimizer.step()
all_loss.append(loss.item())
train_tqdm.set_description("Pretrain epoch: {} | Loss: {:.3f}".format(epoch, np.mean(all_loss)))
@abstractmethod
def evaluate(self):
pass
@abstractmethod
def test(self):
pass
| [
"os.path.join",
"numpy.mean",
"paddle.nn.clip.ClipGradByValue",
"paddle.optimizer.lr.StepDecay"
] | [((1963, 2001), 'paddle.nn.clip.ClipGradByValue', 'nn.clip.ClipGradByValue', (['opt.grad_clip'], {}), '(opt.grad_clip)\n', (1986, 2001), True, 'import paddle.nn as nn\n'), ((2206, 2318), 'paddle.optimizer.lr.StepDecay', 'paddle.optimizer.lr.StepDecay', ([], {'learning_rate': 'self.learning_rate', 'step_size': 'self.step_size', 'gamma': 'self.gamma'}), '(learning_rate=self.learning_rate, step_size=\n self.step_size, gamma=self.gamma)\n', (2235, 2318), False, 'import paddle\n'), ((4151, 4195), 'os.path.join', 'os.path.join', (['self.out_root', '"""temp.pdparams"""'], {}), "(self.out_root, 'temp.pdparams')\n", (4163, 4195), False, 'import os\n'), ((6097, 6114), 'numpy.mean', 'np.mean', (['all_loss'], {}), '(all_loss)\n', (6104, 6114), True, 'import numpy as np\n'), ((4513, 4563), 'os.path.join', 'os.path.join', (['self.out_root', '"""best_model.pdparams"""'], {}), "(self.out_root, 'best_model.pdparams')\n", (4525, 4563), False, 'import os\n'), ((4924, 4974), 'os.path.join', 'os.path.join', (['self.out_root', '"""best_model.pdparams"""'], {}), "(self.out_root, 'best_model.pdparams')\n", (4936, 4974), False, 'import os\n')] |
import time
from itertools import product
from loggers import CSVLogger, PrintLogger, FileLogger, multi_logger
import torch
from torch.optim import Adam
import os
import networkx as nx
import pickle
import numpy as np
import logging
from model_runner import ModelRunner,execute_runners
import cProfile
Dataset_name = 'Tmall' #'Tmall','DBLP','IMDB'
Time_inds = 9
Hid_size = [10]
Epochs = 3
Dropout = [0.3]
LR = [0.01]
Regularization = [0.002]
Temporal_pen = [0.002]
Optimizer = Adam
Iterations = 1
Number_Of_Classes = 2 # 2 for 'Tmall', 15 for 'DBLP', 11 for 'IMDB'
Is_NNI = False
Train_test_split = 'bipartite' #'bipartite', 'all_labeled', 'partialy_labeled'
Bipartite_products = 200 #200 either None
loss_weights = 'sqrt(N/Nj)' #None #'1/Njs', 'sqrt(N/Nj)'
class GCNTemporalCommunities(object):
def __init__(self, nni=False):
self._nni = nni
self._device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
self._adjacency_matrices = None
self._feature_matrices = None
self._labels = None
def load_data(self):
graphs = []
labels = []
mx_s = []
for i in range(Time_inds):
with open(os.path.join('dataset',Dataset_name,'input', 'graph_' + str(i) + '.pkl'), 'rb') as f:
g = pickle.load(f)
with open(os.path.join('dataset',Dataset_name,'input', 'labels_' + str(i) + '.pkl'), 'rb') as f:
l = pickle.load(f)
with open(os.path.join('dataset',Dataset_name,'input', 'mx_' + str(i) + '.pkl'), 'rb') as f:
mx = pickle.load(f)
graphs.append(g)
labels.append(l)
mx_s.append(mx)
self._adjacency_matrices = [nx.adjacency_matrix(g).tocoo() for g in graphs]
self._feature_matrices = mx_s
self._labels = labels
def fix_logger(self, dumping_name):
# os.getcwd() returns current working directory of a process
products_path = os.path.join(os.getcwd(), 'dataset', Dataset_name, "logs", dumping_name,
time.strftime("%Y%m%d_%H%M%S"))
if not os.path.exists(products_path):
os.makedirs(products_path)
logger = multi_logger([PrintLogger("MyLogger", level=logging.DEBUG),
FileLogger("results_%s" % dumping_name,
path=products_path, level=logging.INFO)], name=None)
return logger
def prep_trial(self, input_params, grid_logger, grid_logger_avg):
runners = []
for it in range(input_params['iterations']):
# train and test split
if Train_test_split == 'bipartite':
person_data = np.delete(np.arange(len(self._labels[0])), np.arange(Bipartite_products))
rand_test_indices = np.random.choice(person_data, round(len(person_data) * 0.9), replace=False)
rand_train_indices = np.delete(np.arange(len(self._labels[0])), rand_test_indices)
else:
rand_test_indices = np.random.choice(len(self._labels[0]), round(len(self._labels[0]) * 0.9), replace=False)
rand_train_indices = np.delete(np.arange(len(self._labels[0])), rand_test_indices)
train = [[k for k in rand_train_indices if self._labels[j][k] != -1] for j in range(len(self._labels))]
test = [[k for k in rand_test_indices if self._labels[j][k] != -1] for j in range(len(self._labels))]
test_labels = [torch.tensor([self._labels[j][k] for k in rand_test_indices if self._labels[j][k] != -1],
dtype=torch.double).to(self._device) for j in range(input_params['time_inds'])]
train_labels = [torch.tensor([self._labels[j][k] for k in rand_train_indices if self._labels[j][k] != -1],
dtype=torch.double).to(self._device) for j in range(input_params['time_inds'])]
input_params['it_num'] = it
input_params['activation'] = torch.nn.functional.relu
input_params['train_person'] = rand_train_indices
input_params['test_person'] = rand_test_indices
input_params['training_inds'] = train
input_params['test_inds'] = test
input_params['training_labels'] = train_labels
input_params['test_labels'] = test_labels
input_params['adj_matrices'] = self._adjacency_matrices
input_params['feature_matrices'] = self._feature_matrices
dumping_name = ""
logger = self.fix_logger(dumping_name)
runner = ModelRunner(input_params, logger=logger)
runners.append(runner)
execute_runners(runners, grid_logger, grid_logger_avg, is_nni=self._nni)
def main(params, grid_logger, grid_logger_avg):
gtc = GCNTemporalCommunities(nni=params['is_nni'])
gtc.load_data()
gtc.prep_trial(params, grid_logger, grid_logger_avg)
if __name__ == "__main__":
pr = cProfile.Profile()
pr.enable()
grid_outputs_folder = time.strftime("%Y%m%d_%H%M%S")
res_path = os.path.join(os.getcwd(), "dataset", Dataset_name, "grid", grid_outputs_folder)
if not os.path.exists(res_path):
os.makedirs(res_path)
grid_logger = CSVLogger("results_%s" % 'grid' + time.strftime("%Y%m%d_%H%M%S"), path=res_path)
grid_logger_avg = CSVLogger("results_%s" % 'grid_it_avg' + time.strftime("%Y%m%d_%H%M%S"), path=res_path)
grid_logger.info("iteration", "total_it", "lr", "do", "hid_size", "wd", "temp_pen", "epochs",
"train_reg_loss", "train_temp_loss", "total_train_loss", "train_acc_f1_macro", "train_f1_micro",
"test_reg_loss", "test_temp_loss", "total_test_loss", "test_f1_macro", "test_f1_micro")
grid_logger_avg.info("iterations", "lr", "do", "hid_size", "wd", "temp_pen", "epochs",
"train_reg_loss", "train_temp_loss", "total_train_loss", "train_f1_macro", "train_f1_micro",
"test_reg_loss", "test_temp_loss", "total_test_loss", "test_f1_macro", "test_f1_micro")
num_of_grids = len(LR) * len(Hid_size) * len(Regularization) * len(Temporal_pen) * len(Dropout)
grid_counter = 0
configurations = list(product(*[LR, Hid_size, Regularization, Temporal_pen, Dropout]))
for LR, Hid_size, Regularization, Temporal_pen, Dropout in configurations:
grid_counter += 1
print("\ngrid {} out of {}:".format(grid_counter, num_of_grids))
params = {"hid_size": Hid_size,
"epochs": Epochs,
"dropout": Dropout,
"lr": LR,
"weight_decay": Regularization,
"temporal_pen": Temporal_pen,
"optimizer": Optimizer,
"iterations": Iterations,
"time_inds": Time_inds,
"optim_name": 'Adam',
"dataset_name": Dataset_name,
"number_of_classes": Number_Of_Classes,
"is_nni": False,
"name": "lr_" + str(LR) + "_do_" + str(Dropout) + "_wd_" + str(Regularization) + "_Tempen_" + str(
Temporal_pen) + "_hid_size_" + str(Hid_size),
"grid_output_folder": grid_outputs_folder,
"loss_weights_type": loss_weights}
main(params, grid_logger, grid_logger_avg)
pr.disable()
pr.print_stats(sort="time")
| [
"os.path.exists",
"os.makedirs",
"torch.device",
"model_runner.ModelRunner",
"networkx.adjacency_matrix",
"time.strftime",
"itertools.product",
"pickle.load",
"os.getcwd",
"torch.tensor",
"torch.cuda.is_available",
"loggers.FileLogger",
"cProfile.Profile",
"loggers.PrintLogger",
"numpy.a... | [((5212, 5230), 'cProfile.Profile', 'cProfile.Profile', ([], {}), '()\n', (5228, 5230), False, 'import cProfile\n'), ((5275, 5305), 'time.strftime', 'time.strftime', (['"""%Y%m%d_%H%M%S"""'], {}), "('%Y%m%d_%H%M%S')\n", (5288, 5305), False, 'import time\n'), ((4909, 4981), 'model_runner.execute_runners', 'execute_runners', (['runners', 'grid_logger', 'grid_logger_avg'], {'is_nni': 'self._nni'}), '(runners, grid_logger, grid_logger_avg, is_nni=self._nni)\n', (4924, 4981), False, 'from model_runner import ModelRunner, execute_runners\n'), ((5337, 5348), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5346, 5348), False, 'import os\n'), ((5416, 5440), 'os.path.exists', 'os.path.exists', (['res_path'], {}), '(res_path)\n', (5430, 5440), False, 'import os\n'), ((5451, 5472), 'os.makedirs', 'os.makedirs', (['res_path'], {}), '(res_path)\n', (5462, 5472), False, 'import os\n'), ((6495, 6558), 'itertools.product', 'product', (['*[LR, Hid_size, Regularization, Temporal_pen, Dropout]'], {}), '(*[LR, Hid_size, Regularization, Temporal_pen, Dropout])\n', (6502, 6558), False, 'from itertools import product\n'), ((1007, 1032), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1030, 1032), False, 'import torch\n'), ((983, 1003), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (995, 1003), False, 'import torch\n'), ((1038, 1057), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1050, 1057), False, 'import torch\n'), ((2123, 2134), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2132, 2134), False, 'import os\n'), ((2221, 2251), 'time.strftime', 'time.strftime', (['"""%Y%m%d_%H%M%S"""'], {}), "('%Y%m%d_%H%M%S')\n", (2234, 2251), False, 'import time\n'), ((2269, 2298), 'os.path.exists', 'os.path.exists', (['products_path'], {}), '(products_path)\n', (2283, 2298), False, 'import os\n'), ((2313, 2339), 'os.makedirs', 'os.makedirs', (['products_path'], {}), '(products_path)\n', (2324, 2339), False, 'import os\n'), ((4819, 4859), 'model_runner.ModelRunner', 'ModelRunner', (['input_params'], {'logger': 'logger'}), '(input_params, logger=logger)\n', (4830, 4859), False, 'from model_runner import ModelRunner, execute_runners\n'), ((5528, 5558), 'time.strftime', 'time.strftime', (['"""%Y%m%d_%H%M%S"""'], {}), "('%Y%m%d_%H%M%S')\n", (5541, 5558), False, 'import time\n'), ((5639, 5669), 'time.strftime', 'time.strftime', (['"""%Y%m%d_%H%M%S"""'], {}), "('%Y%m%d_%H%M%S')\n", (5652, 5669), False, 'import time\n'), ((1422, 1436), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1433, 1436), False, 'import pickle\n'), ((1568, 1582), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1579, 1582), False, 'import pickle\n'), ((1711, 1725), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1722, 1725), False, 'import pickle\n'), ((2374, 2418), 'loggers.PrintLogger', 'PrintLogger', (['"""MyLogger"""'], {'level': 'logging.DEBUG'}), "('MyLogger', level=logging.DEBUG)\n", (2385, 2418), False, 'from loggers import CSVLogger, PrintLogger, FileLogger, multi_logger\n'), ((2452, 2531), 'loggers.FileLogger', 'FileLogger', (["('results_%s' % dumping_name)"], {'path': 'products_path', 'level': 'logging.INFO'}), "('results_%s' % dumping_name, path=products_path, level=logging.INFO)\n", (2462, 2531), False, 'from loggers import CSVLogger, PrintLogger, FileLogger, multi_logger\n'), ((1854, 1876), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['g'], {}), '(g)\n', (1873, 1876), True, 'import networkx as nx\n'), ((2923, 2952), 'numpy.arange', 'np.arange', (['Bipartite_products'], {}), '(Bipartite_products)\n', (2932, 2952), True, 'import numpy as np\n'), ((3674, 3788), 'torch.tensor', 'torch.tensor', (['[self._labels[j][k] for k in rand_test_indices if self._labels[j][k] != -1]'], {'dtype': 'torch.double'}), '([self._labels[j][k] for k in rand_test_indices if self._labels\n [j][k] != -1], dtype=torch.double)\n', (3686, 3788), False, 'import torch\n'), ((3914, 4029), 'torch.tensor', 'torch.tensor', (['[self._labels[j][k] for k in rand_train_indices if self._labels[j][k] != -1]'], {'dtype': 'torch.double'}), '([self._labels[j][k] for k in rand_train_indices if self.\n _labels[j][k] != -1], dtype=torch.double)\n', (3926, 4029), False, 'import torch\n')] |
import numpy
import random
import math
import qConstants as qc
import qUtilities as qu
import qBitStrings as qb
import qAlgorithms as qa
def power(stateOrGate, m):
assert m > 0, "m must be positive"
'''Assumes n >= 1. Given an n-qbit gate or state and m >= 1, returns the
mth tensor power, which is an (n * m)-qbit gate or state. For the sake of
time and memory, m should be small.'''
result = stateOrGate
for _ in range(1,m):
result = tensor(result, stateOrGate)
return result
def function(n, m, f):
'''Assumes n, m >= 1. Given a Python function f : {0, 1}^n -> {0, 1}^m.
That is, f takes as input an n-bit string and produces as output an m-bit
string, as defined in qBitStrings.py. Returns the corresponding
(n + m)-qbit gate F.'''
#iterate through all bit strings of length n and length m, converting to bit strings from ints
F = None
for a in range(2 ** n):
for b in range(2 ** m):
alpha = qb.string(n, a)
beta = qb.string(m, b)
first_ket = qu.quantumFromClassic(alpha)
second_ket = qu.quantumFromClassic(qb.addition(beta, f(alpha)))
result = tensor(first_ket, second_ket)
column = numpy.array([result]).T
#replace this check and the one in tensor since you can concat to an empty array - see simon()
if F is None:
F = column
else:
F = numpy.concatenate((F, column), axis = 1)
return F
def application(u, ketPsi):
'''Assumes n >=. Applies the n-qbit gate U to the n-qbit state |psi>, returning the n-qbit state U |psi>.'''
return numpy.dot(u, ketPsi)
def tensor(a, b):
'''Assumes that n, m >= 1. Assumes that a is an n-qbit state and b is an
m-qbit state, or that a is an n-qbit gate and b is an m-qbit gate. Returns
the tensor product of a and b, which is an (n + m)-qbit gate or state.'''
#convert a to matrix if not already and make vectors vertical columns
if len(a.shape) == 1 or len(b.shape) == 1:
a = numpy.array([a]).T
b = numpy.array([b]).T
result = None
nrows = a.shape[0]
ncols = a.shape[1]
for r in range(0, nrows):
row_chunk = a[r,0] * b
for c in range(1, ncols):
row_chunk = numpy.concatenate((row_chunk, a[r,c] * b), axis = 1)
if result is not None:
result = numpy.concatenate((result, row_chunk))
else:
result = row_chunk
#return a vector if only one column (and revert make to an array by transposing the column)
if result.shape[1] == 1:
result = result.T[0]
return result
def fourierRecursive(n):
'''Assumes n >= 1. Returns the n-qbit quantum Fourier transform gate T.
Computes T recursively rather than from the definition.'''
return numpy.matmul(fourierQ(n), numpy.matmul(fourierR(n), fourierS(n)))
def fourierR(n):
'''Helper to fourierRecursive'''
if n == 1:
return qc.h
return tensor(qc.i, fourierRecursive(n - 1))
def fourierS(n):
'''Helper to fourierRecursive'''
if n == 1:
return qc.i
if n == 2:
return qc.swap
chunk = tensor(power(qc.i, n - 2), qc.swap)
for i in range(1, n - 2):
chunk = numpy.matmul(tensor(tensor(power(qc.i, n - i - 2), qc.swap), power(qc.i, i)), chunk)
chunk = numpy.matmul(tensor(qc.swap, power(qc.i, n - 2)), chunk)
return chunk
def fourierQ(n):
'''Helper to fourierRecursive'''
if n == 1:
return qc.i
i_columns = numpy.concatenate((power( qc.i, n - 1), power(qc.i, n - 1)))
d_columns = numpy.concatenate((fourierD(n - 1), -1 * fourierD(n - 1)))
return (1 / math.sqrt(2)) * numpy.concatenate((i_columns, d_columns), axis = 1)
def fourierD(n):
'''Helper to fourierRecursive'''
wkplus1 = numpy.exp(numpy.array(0 + (2 * numpy.pi / (2 ** (n + 1)) ) * 1j) )
wGate = numpy.array([[1, 0], [0, wkplus1]])
if n == 1:
return wGate
return tensor(fourierD(n - 1), wGate)
def distant(gate):
'''Given an (n + 1)-qbit gate U (such as a controlled-V gate, where V is
n-qbit), performs swaps to insert one extra wire between the first qbit and
the other n qbits. Returns an (n + 2)-qbit gate.'''
n = int(math.log2(gate.shape[1]) - 1)
swap_chunk = tensor(qc.swap, power(qc.i, n))
gate_chunk = tensor(qc.i, gate)
circuit = numpy.matmul(swap_chunk, numpy.matmul(gate_chunk, swap_chunk))
return circuit
def ccNot():
'''Returns the three-qbit ccNOT (i.e., Toffoli) gate. The gate is
implemented using five specific two-qbit gates and some SWAPs.'''
v_chunk = distant(qc.cV)
z_chunk = tensor(qc.i, qc.cZ)
u_chunk = tensor(qc.cU, qc.i)
gate = numpy.matmul(u_chunk, numpy.matmul(z_chunk, numpy.matmul(v_chunk, numpy.matmul(z_chunk, v_chunk))))
return gate
def groverR3():
'''Assumes that n = 3. Returns -R, where R is Grover’s n-qbit gate for
reflection across |rho>. Builds the gate from one- and two-qbit gates,
rather than manually constructing the matrix.'''
h_chunk = tensor(power(qc.i, 2), qc.h)
circuit = numpy.matmul(h_chunk, numpy.matmul(ccNot(), h_chunk))
return circuit
### DEFINING SOME TESTS ###
def applicationTest():
# These simple tests detect type errors but not much else.
answer = application(qc.h, qc.ketMinus)
if qu.equal(answer, qc.ket1, 0.000001):
print("passed applicationTest first part")
else:
print("FAILED applicationTest first part")
print(" H |-> = " + str(answer))
ketPsi = qu.uniform(2)
answer = application(qc.swap, application(qc.swap, ketPsi))
if qu.equal(answer, ketPsi, 0.000001):
print("passed applicationTest second part")
else:
print("FAILED applicationTest second part")
print(" |psi> = " + str(ketPsi))
print(" answer = " + str(answer))
def tensorTest():
# Pick two gates and two states.
u = qc.x
v = qc.h
ketChi = qu.uniform(1)
ketOmega = qu.uniform(1)
# Compute (U tensor V) (|chi> tensor |omega>) in two ways.
a = tensor(application(u, ketChi), application(v, ketOmega))
b = application(tensor(u, v), tensor(ketChi, ketOmega))
# Compare.
if qu.equal(a, b, 0.000001):
print("passed tensorTest")
else:
print("FAILED tensorTest")
print(" a = " + str(a))
print(" b = " + str(b))
def powerTest():
#unoffical test just to look at some results
print(power(qc.i, 3))
def functionTest(n, m):
# 2^n times, randomly pick an m-bit string.
values = [qb.string(m, random.randrange(0, 2**m)) for k in range(2**n)]
# Define f by using those values as a look-up table.
def f(alpha):
a = qb.integer(alpha)
return values[a]
# Build the corresponding gate F.
ff = function(n, m, f)
# Helper functions --- necessary because of poor planning.
def g(gamma):
if gamma == 0:
return qc.ket0
else:
return qc.ket1
def ketFromBitString(alpha):
ket = g(alpha[0])
for gamma in alpha[1:]:
ket = tensor(ket, g(gamma))
return ket
# Check 2^n - 1 values somewhat randomly.
alphaStart = qb.string(n, random.randrange(0, 2**n))
alpha = qb.next(alphaStart)
while alpha != alphaStart:
# Pick a single random beta to test against this alpha.
beta = qb.string(m, random.randrange(0, 2**m))
# Compute |alpha> tensor |beta + f(alpha)>.
ketCorrect = ketFromBitString(alpha + qb.addition(beta, f(alpha)))
# Compute F * (|alpha> tensor |beta>).
ketAlpha = ketFromBitString(alpha)
ketBeta = ketFromBitString(beta)
ketAlleged = application(ff, tensor(ketAlpha, ketBeta))
# Compare.
if not qu.equal(ketCorrect, ketAlleged, 0.000001):
print("failed functionTest")
print(" alpha = " + str(alpha))
print(" beta = " + str(beta))
print(" ketCorrect = " + str(ketCorrect))
print(" ketAlleged = " + str(ketAlleged))
print(" and here’s F...")
print(ff)
return
else:
alpha = qb.next(alpha)
print("passed functionTest")
def fourierRecursiveTest(n):
state = qu.uniform(n)
fGate = qa.fourier(n)
recursiveFGate = fourierRecursive(n)
standardFourierState = application(fGate, state)
fourierRecursiveState = application(recursiveFGate, state)
if qu.equal(standardFourierState, fourierRecursiveState, 0.0001):
print(f"Passed fourierRecursiveTest for {n = }")
else:
print(f"Failed fourierRecursiveTest \n {fGate = } \n {recursiveFGate = }")
### RUNNING THE TESTS ###
def main():
print(f'{groverR3() = }')
if __name__ == "__main__":
main()
| [
"qBitStrings.string",
"random.randrange",
"qBitStrings.integer",
"qUtilities.quantumFromClassic",
"math.sqrt",
"math.log2",
"numpy.array",
"numpy.dot",
"numpy.matmul",
"numpy.concatenate",
"qAlgorithms.fourier",
"qUtilities.uniform",
"qUtilities.equal",
"qBitStrings.next"
] | [((1667, 1687), 'numpy.dot', 'numpy.dot', (['u', 'ketPsi'], {}), '(u, ketPsi)\n', (1676, 1687), False, 'import numpy\n'), ((3924, 3959), 'numpy.array', 'numpy.array', (['[[1, 0], [0, wkplus1]]'], {}), '([[1, 0], [0, wkplus1]])\n', (3935, 3959), False, 'import numpy\n'), ((5404, 5436), 'qUtilities.equal', 'qu.equal', (['answer', 'qc.ket1', '(1e-06)'], {}), '(answer, qc.ket1, 1e-06)\n', (5412, 5436), True, 'import qUtilities as qu\n'), ((5610, 5623), 'qUtilities.uniform', 'qu.uniform', (['(2)'], {}), '(2)\n', (5620, 5623), True, 'import qUtilities as qu\n'), ((5695, 5726), 'qUtilities.equal', 'qu.equal', (['answer', 'ketPsi', '(1e-06)'], {}), '(answer, ketPsi, 1e-06)\n', (5703, 5726), True, 'import qUtilities as qu\n'), ((6029, 6042), 'qUtilities.uniform', 'qu.uniform', (['(1)'], {}), '(1)\n', (6039, 6042), True, 'import qUtilities as qu\n'), ((6058, 6071), 'qUtilities.uniform', 'qu.uniform', (['(1)'], {}), '(1)\n', (6068, 6071), True, 'import qUtilities as qu\n'), ((6282, 6303), 'qUtilities.equal', 'qu.equal', (['a', 'b', '(1e-06)'], {}), '(a, b, 1e-06)\n', (6290, 6303), True, 'import qUtilities as qu\n'), ((7332, 7351), 'qBitStrings.next', 'qb.next', (['alphaStart'], {}), '(alphaStart)\n', (7339, 7351), True, 'import qBitStrings as qb\n'), ((8340, 8353), 'qUtilities.uniform', 'qu.uniform', (['n'], {}), '(n)\n', (8350, 8353), True, 'import qUtilities as qu\n'), ((8367, 8380), 'qAlgorithms.fourier', 'qa.fourier', (['n'], {}), '(n)\n', (8377, 8380), True, 'import qAlgorithms as qa\n'), ((8547, 8608), 'qUtilities.equal', 'qu.equal', (['standardFourierState', 'fourierRecursiveState', '(0.0001)'], {}), '(standardFourierState, fourierRecursiveState, 0.0001)\n', (8555, 8608), True, 'import qUtilities as qu\n'), ((3724, 3773), 'numpy.concatenate', 'numpy.concatenate', (['(i_columns, d_columns)'], {'axis': '(1)'}), '((i_columns, d_columns), axis=1)\n', (3741, 3773), False, 'import numpy\n'), ((3855, 3906), 'numpy.array', 'numpy.array', (['(0 + 2 * numpy.pi / 2 ** (n + 1) * 1.0j)'], {}), '(0 + 2 * numpy.pi / 2 ** (n + 1) * 1.0j)\n', (3866, 3906), False, 'import numpy\n'), ((4448, 4484), 'numpy.matmul', 'numpy.matmul', (['gate_chunk', 'swap_chunk'], {}), '(gate_chunk, swap_chunk)\n', (4460, 4484), False, 'import numpy\n'), ((6787, 6804), 'qBitStrings.integer', 'qb.integer', (['alpha'], {}), '(alpha)\n', (6797, 6804), True, 'import qBitStrings as qb\n'), ((7293, 7320), 'random.randrange', 'random.randrange', (['(0)', '(2 ** n)'], {}), '(0, 2 ** n)\n', (7309, 7320), False, 'import random\n'), ((983, 998), 'qBitStrings.string', 'qb.string', (['n', 'a'], {}), '(n, a)\n', (992, 998), True, 'import qBitStrings as qb\n'), ((1018, 1033), 'qBitStrings.string', 'qb.string', (['m', 'b'], {}), '(m, b)\n', (1027, 1033), True, 'import qBitStrings as qb\n'), ((1059, 1087), 'qUtilities.quantumFromClassic', 'qu.quantumFromClassic', (['alpha'], {}), '(alpha)\n', (1080, 1087), True, 'import qUtilities as qu\n'), ((2075, 2091), 'numpy.array', 'numpy.array', (['[a]'], {}), '([a])\n', (2086, 2091), False, 'import numpy\n'), ((2106, 2122), 'numpy.array', 'numpy.array', (['[b]'], {}), '([b])\n', (2117, 2122), False, 'import numpy\n'), ((2308, 2359), 'numpy.concatenate', 'numpy.concatenate', (['(row_chunk, a[r, c] * b)'], {'axis': '(1)'}), '((row_chunk, a[r, c] * b), axis=1)\n', (2325, 2359), False, 'import numpy\n'), ((2414, 2452), 'numpy.concatenate', 'numpy.concatenate', (['(result, row_chunk)'], {}), '((result, row_chunk))\n', (2431, 2452), False, 'import numpy\n'), ((3708, 3720), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (3717, 3720), False, 'import math\n'), ((4293, 4317), 'math.log2', 'math.log2', (['gate.shape[1]'], {}), '(gate.shape[1])\n', (4302, 4317), False, 'import math\n'), ((6651, 6678), 'random.randrange', 'random.randrange', (['(0)', '(2 ** m)'], {}), '(0, 2 ** m)\n', (6667, 6678), False, 'import random\n'), ((7475, 7502), 'random.randrange', 'random.randrange', (['(0)', '(2 ** m)'], {}), '(0, 2 ** m)\n', (7491, 7502), False, 'import random\n'), ((7858, 7897), 'qUtilities.equal', 'qu.equal', (['ketCorrect', 'ketAlleged', '(1e-06)'], {}), '(ketCorrect, ketAlleged, 1e-06)\n', (7866, 7897), True, 'import qUtilities as qu\n'), ((8250, 8264), 'qBitStrings.next', 'qb.next', (['alpha'], {}), '(alpha)\n', (8257, 8264), True, 'import qBitStrings as qb\n'), ((1238, 1259), 'numpy.array', 'numpy.array', (['[result]'], {}), '([result])\n', (1249, 1259), False, 'import numpy\n'), ((1460, 1498), 'numpy.concatenate', 'numpy.concatenate', (['(F, column)'], {'axis': '(1)'}), '((F, column), axis=1)\n', (1477, 1498), False, 'import numpy\n'), ((4835, 4865), 'numpy.matmul', 'numpy.matmul', (['z_chunk', 'v_chunk'], {}), '(z_chunk, v_chunk)\n', (4847, 4865), False, 'import numpy\n')] |
import numpy as np
#this is not an efficient implementation. just for testing!
def dual_contouring_47_test(int_grid, float_grid):
all_vertices = []
all_triangles = []
int_grid = np.squeeze(int_grid)
dimx,dimy,dimz = int_grid.shape
vertices_grid = np.full([dimx,dimy,dimz], -1, np.int32)
#all vertices
for i in range(0,dimx-1):
for j in range(0,dimy-1):
for k in range(0,dimz-1):
v0 = int_grid[i,j,k]
v1 = int_grid[i+1,j,k]
v2 = int_grid[i+1,j+1,k]
v3 = int_grid[i,j+1,k]
v4 = int_grid[i,j,k+1]
v5 = int_grid[i+1,j,k+1]
v6 = int_grid[i+1,j+1,k+1]
v7 = int_grid[i,j+1,k+1]
if v1!=v0 or v2!=v0 or v3!=v0 or v4!=v0 or v5!=v0 or v6!=v0 or v7!=v0:
#add a vertex
vertices_grid[i,j,k] = len(all_vertices)
pos = float_grid[i,j,k]+np.array([i,j,k], np.float32)
all_vertices.append(pos)
all_vertices = np.array(all_vertices, np.float32)
#all triangles
#i-direction
for i in range(0,dimx-1):
for j in range(1,dimy-1):
for k in range(1,dimz-1):
v0 = int_grid[i,j,k]
v1 = int_grid[i+1,j,k]
if v0!=v1:
if v0==0:
all_triangles.append([vertices_grid[i,j-1,k-1],vertices_grid[i,j,k],vertices_grid[i,j,k-1]])
all_triangles.append([vertices_grid[i,j-1,k-1],vertices_grid[i,j-1,k],vertices_grid[i,j,k]])
else:
all_triangles.append([vertices_grid[i,j-1,k-1],vertices_grid[i,j,k-1],vertices_grid[i,j,k]])
all_triangles.append([vertices_grid[i,j-1,k-1],vertices_grid[i,j,k],vertices_grid[i,j-1,k]])
#j-direction
for i in range(1,dimx-1):
for j in range(0,dimy-1):
for k in range(1,dimz-1):
v0 = int_grid[i,j,k]
v1 = int_grid[i,j+1,k]
if v0!=v1:
if v0==0:
all_triangles.append([vertices_grid[i-1,j,k-1],vertices_grid[i,j,k-1],vertices_grid[i,j,k]])
all_triangles.append([vertices_grid[i-1,j,k-1],vertices_grid[i,j,k],vertices_grid[i-1,j,k]])
else:
all_triangles.append([vertices_grid[i-1,j,k-1],vertices_grid[i,j,k],vertices_grid[i,j,k-1]])
all_triangles.append([vertices_grid[i-1,j,k-1],vertices_grid[i-1,j,k],vertices_grid[i,j,k]])
#k-direction
for i in range(1,dimx-1):
for j in range(1,dimy-1):
for k in range(0,dimz-1):
v0 = int_grid[i,j,k]
v1 = int_grid[i,j,k+1]
if v0!=v1:
if v0==0:
all_triangles.append([vertices_grid[i-1,j-1,k],vertices_grid[i-1,j,k],vertices_grid[i,j,k]])
all_triangles.append([vertices_grid[i-1,j-1,k],vertices_grid[i,j,k],vertices_grid[i,j-1,k]])
else:
all_triangles.append([vertices_grid[i-1,j-1,k],vertices_grid[i,j,k],vertices_grid[i-1,j,k]])
all_triangles.append([vertices_grid[i-1,j-1,k],vertices_grid[i,j-1,k],vertices_grid[i,j,k]])
all_triangles = np.array(all_triangles, np.int32)
return all_vertices, all_triangles
def write_obj_triangle(name, vertices, triangles):
fout = open(name, 'w')
for ii in range(len(vertices)):
fout.write("v "+str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+"\n")
for ii in range(len(triangles)):
fout.write("f "+str(int(triangles[ii,0]+1))+" "+str(int(triangles[ii,1]+1))+" "+str(int(triangles[ii,2]+1))+"\n")
fout.close()
def write_ply_triangle(name, vertices, triangles):
fout = open(name, 'w')
fout.write("ply\n")
fout.write("format ascii 1.0\n")
fout.write("element vertex "+str(len(vertices))+"\n")
fout.write("property float x\n")
fout.write("property float y\n")
fout.write("property float z\n")
fout.write("element face "+str(len(triangles))+"\n")
fout.write("property list uchar int vertex_index\n")
fout.write("end_header\n")
for ii in range(len(vertices)):
fout.write(str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+"\n")
for ii in range(len(triangles)):
fout.write("3 "+str(triangles[ii,0])+" "+str(triangles[ii,1])+" "+str(triangles[ii,2])+"\n")
fout.close()
def write_ply_point(name, vertices):
fout = open(name, 'w')
fout.write("ply\n")
fout.write("format ascii 1.0\n")
fout.write("element vertex "+str(len(vertices))+"\n")
fout.write("property float x\n")
fout.write("property float y\n")
fout.write("property float z\n")
fout.write("end_header\n")
for ii in range(len(vertices)):
fout.write(str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+"\n")
fout.close()
def write_ply_point_normal(name, vertices, normals=None):
fout = open(name, 'w')
fout.write("ply\n")
fout.write("format ascii 1.0\n")
fout.write("element vertex "+str(len(vertices))+"\n")
fout.write("property float x\n")
fout.write("property float y\n")
fout.write("property float z\n")
fout.write("property float nx\n")
fout.write("property float ny\n")
fout.write("property float nz\n")
fout.write("end_header\n")
if normals is None:
for ii in range(len(vertices)):
fout.write(str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+" "+str(vertices[ii,3])+" "+str(vertices[ii,4])+" "+str(vertices[ii,5])+"\n")
else:
for ii in range(len(vertices)):
fout.write(str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+" "+str(normals[ii,0])+" "+str(normals[ii,1])+" "+str(normals[ii,2])+"\n")
fout.close()
def read_intersectionpn_file_as_2d_array(name):
fp = open(name, 'rb')
line = fp.readline().strip()
if not line.startswith(b'#intersectionpn'):
raise IOError('Not an intersectionpn file')
dims = list(map(int, fp.readline().strip().split(b' ')[1:]))
point_nums = np.array(list(map(int, fp.readline().strip().split(b' '))),np.int32)
line = fp.readline()
data = np.frombuffer(fp.read(), dtype=np.float32)
data = data.reshape([np.sum(point_nums),6])
fp.close()
separated = []
count = 0
for i in range(len(point_nums)):
separated.append(np.ascontiguousarray(data[count:count+point_nums[i]]))
count += point_nums[i]
return separated
def read_sdf_file_as_3d_array(name):
fp = open(name, 'rb')
line = fp.readline().strip()
if not line.startswith(b'#sdf'):
raise IOError('Not a sdf file')
dims = list(map(int, fp.readline().strip().split(b' ')[1:]))
line = fp.readline()
data = np.frombuffer(fp.read(), dtype=np.float32)
data = data.reshape(dims)
fp.close()
return data
| [
"numpy.ascontiguousarray",
"numpy.squeeze",
"numpy.array",
"numpy.sum",
"numpy.full"
] | [((193, 213), 'numpy.squeeze', 'np.squeeze', (['int_grid'], {}), '(int_grid)\n', (203, 213), True, 'import numpy as np\n'), ((270, 311), 'numpy.full', 'np.full', (['[dimx, dimy, dimz]', '(-1)', 'np.int32'], {}), '([dimx, dimy, dimz], -1, np.int32)\n', (277, 311), True, 'import numpy as np\n'), ((1102, 1136), 'numpy.array', 'np.array', (['all_vertices', 'np.float32'], {}), '(all_vertices, np.float32)\n', (1110, 1136), True, 'import numpy as np\n'), ((3420, 3453), 'numpy.array', 'np.array', (['all_triangles', 'np.int32'], {}), '(all_triangles, np.int32)\n', (3428, 3453), True, 'import numpy as np\n'), ((6487, 6505), 'numpy.sum', 'np.sum', (['point_nums'], {}), '(point_nums)\n', (6493, 6505), True, 'import numpy as np\n'), ((6620, 6675), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['data[count:count + point_nums[i]]'], {}), '(data[count:count + point_nums[i]])\n', (6640, 6675), True, 'import numpy as np\n'), ((1007, 1038), 'numpy.array', 'np.array', (['[i, j, k]', 'np.float32'], {}), '([i, j, k], np.float32)\n', (1015, 1038), True, 'import numpy as np\n')] |
"""
Aggregations.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import numpy as np
import eta.core.utils as etau
from fiftyone.core.expressions import ViewField as F
import fiftyone.core.media as fom
import fiftyone.core.utils as fou
class Aggregation(object):
"""Abstract base class for all aggregations.
:class:`Aggregation` instances represent an aggregation or reduction
of a :class:`fiftyone.core.collections.SampleCollection` instance.
Args:
field_name: the name of the field to operate on
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
"""
def __init__(self, field_name, expr=None):
self._field_name = field_name
self._expr = expr
@property
def field_name(self):
"""The field name being computed on."""
return self._field_name
@property
def expr(self):
"""The :class:`fiftyone.core.expressions.ViewExpression` or MongoDB
expression that will be applied to the field before aggregating, if any.
"""
return self._expr
def to_mongo(self, sample_collection):
"""Returns the MongoDB aggregation pipeline for this aggregation.
Args:
sample_collection: the
:class:`fiftyone.core.collections.SampleCollection` to which
the aggregation is being applied
Returns:
a MongoDB aggregation pipeline (list of dicts)
"""
raise NotImplementedError("subclasses must implement to_mongo()")
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the aggregation result
"""
raise NotImplementedError("subclasses must implement parse_result()")
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
the aggregation result
"""
raise NotImplementedError("subclasses must implement default_result()")
def _parse_field_and_expr(
self, sample_collection, auto_unwind=True, allow_missing=False
):
return _parse_field_and_expr(
sample_collection,
self._field_name,
self._expr,
auto_unwind,
allow_missing,
)
class AggregationError(Exception):
"""An error raised during the execution of an :class:`Aggregation`."""
pass
class Bounds(Aggregation):
"""Computes the bounds of a numeric field of a collection.
``None``-valued fields are ignored.
This aggregation is typically applied to *numeric* field types (or lists of
such types):
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.FloatField`
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
numeric_field=1.0,
numeric_list_field=[1, 2, 3],
),
fo.Sample(
filepath="/path/to/image2.png",
numeric_field=4.0,
numeric_list_field=[1, 2],
),
fo.Sample(
filepath="/path/to/image3.png",
numeric_field=None,
numeric_list_field=None,
),
]
)
#
# Compute the bounds of a numeric field
#
aggregation = fo.Bounds("numeric_field")
bounds = dataset.aggregate(aggregation)
print(bounds) # (min, max)
#
# Compute the a bounds of a numeric list field
#
aggregation = fo.Bounds("numeric_list_field")
bounds = dataset.aggregate(aggregation)
print(bounds) # (min, max)
#
# Compute the bounds of a transformation of a numeric field
#
aggregation = fo.Bounds("numeric_field", expr=2 * (F() + 1))
bounds = dataset.aggregate(aggregation)
print(bounds) # (min, max)
Args:
field_name: the name of the field to operate on
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
"""
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``(None, None)``
"""
return None, None
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the ``(min, max)`` bounds
"""
return d["min"], d["max"]
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
pipeline.append(
{
"$group": {
"_id": None,
"min": {"$min": "$" + path},
"max": {"$max": "$" + path},
}
}
)
return pipeline
class Count(Aggregation):
"""Counts the number of field values in a collection.
``None``-valued fields are ignored.
If no field is provided, the samples themselves are counted.
Examples::
import fiftyone as fo
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
predictions=fo.Detections(
detections=[
fo.Detection(label="cat"),
fo.Detection(label="dog"),
]
),
),
fo.Sample(
filepath="/path/to/image2.png",
predictions=fo.Detections(
detections=[
fo.Detection(label="cat"),
fo.Detection(label="rabbit"),
fo.Detection(label="squirrel"),
]
),
),
fo.Sample(
filepath="/path/to/image3.png",
predictions=None,
),
]
)
#
# Count the number of samples in the dataset
#
aggregation = fo.Count()
count = dataset.aggregate(aggregation)
print(count) # the count
#
# Count the number of samples with `predictions`
#
aggregation = fo.Count("predictions")
count = dataset.aggregate(aggregation)
print(count) # the count
#
# Count the number of objects in the `predictions` field
#
aggregation = fo.Count("predictions.detections")
count = dataset.aggregate(aggregation)
print(count) # the count
#
# Count the number of samples with more than 2 predictions
#
expr = (F("detections").length() > 2).if_else(F("detections"), None)
aggregation = fo.Count("predictions", expr=expr)
count = dataset.aggregate(aggregation)
print(count) # the count
Args:
field_name (None): the name of the field to operate on. If none is
provided, the samples themselves are counted
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
"""
def __init__(self, field_name=None, expr=None):
super().__init__(field_name, expr=expr)
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``0``
"""
return 0
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the count
"""
return d["count"]
def to_mongo(self, sample_collection):
if self._field_name is None:
return [{"$count": "count"}]
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
if sample_collection.media_type != fom.VIDEO or path != "frames":
pipeline.append({"$match": {"$expr": {"$gt": ["$" + path, None]}}})
pipeline.append({"$count": "count"})
return pipeline
class CountValues(Aggregation):
"""Counts the occurrences of field values in a collection.
This aggregation is typically applied to *countable* field types (or lists
of such types):
- :class:`fiftyone.core.fields.BooleanField`
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.StringField`
Examples::
import fiftyone as fo
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
tags=["sunny"],
predictions=fo.Detections(
detections=[
fo.Detection(label="cat"),
fo.Detection(label="dog"),
]
),
),
fo.Sample(
filepath="/path/to/image2.png",
tags=["cloudy"],
predictions=fo.Detections(
detections=[
fo.Detection(label="cat"),
fo.Detection(label="rabbit"),
]
),
),
fo.Sample(
filepath="/path/to/image3.png",
predictions=None,
),
]
)
#
# Compute the tag counts in the dataset
#
aggregation = fo.CountValues("tags")
counts = dataset.aggregate(aggregation)
print(counts) # dict mapping values to counts
#
# Compute the predicted label counts in the dataset
#
aggregation = fo.CountValues("predictions.detections.label")
counts = dataset.aggregate(aggregation)
print(counts) # dict mapping values to counts
#
# Compute the predicted label counts after some normalization
#
expr = F().map_values({"cat": "pet", "dog": "pet"}).upper()
aggregation = fo.CountValues("predictions.detections.label", expr=expr)
counts = dataset.aggregate(aggregation)
print(counts) # dict mapping values to counts
Args:
field_name: the name of the field to operate on
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
"""
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``{}``
"""
return {}
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
a dict mapping values to counts
"""
return {i["k"]: i["count"] for i in d["result"]}
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
pipeline += [
{"$group": {"_id": "$" + path, "count": {"$sum": 1}}},
{
"$group": {
"_id": None,
"result": {"$push": {"k": "$_id", "count": "$count"}},
}
},
]
return pipeline
class Distinct(Aggregation):
"""Computes the distinct values of a field in a collection.
``None``-valued fields are ignored.
This aggregation is typically applied to *countable* field types (or lists
of such types):
- :class:`fiftyone.core.fields.BooleanField`
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.StringField`
Examples::
import fiftyone as fo
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
tags=["sunny"],
predictions=fo.Detections(
detections=[
fo.Detection(label="cat"),
fo.Detection(label="dog"),
]
),
),
fo.Sample(
filepath="/path/to/image2.png",
tags=["sunny", "cloudy"],
predictions=fo.Detections(
detections=[
fo.Detection(label="cat"),
fo.Detection(label="rabbit"),
]
),
),
fo.Sample(
filepath="/path/to/image3.png",
predictions=None,
),
]
)
#
# Get the distinct tags in a dataset
#
aggregation = fo.Distinct("tags")
values = dataset.aggregate(aggregation)
print(values) # list of distinct values
#
# Get the distinct predicted labels in a dataset
#
aggregation = fo.Distinct("predictions.detections.label")
values = dataset.aggregate(aggregation)
print(values) # list of distinct values
#
# Get the distinct predicted labels after some normalization
#
expr = F().map_values({"cat": "pet", "dog": "pet"}).upper()
aggregation = fo.Distinct("predictions.detections.label", expr=expr)
values = dataset.aggregate(aggregation)
print(values) # list of distinct values
Args:
field_name: the name of the field to operate on
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
"""
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``[]``
"""
return []
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
a sorted list of distinct values
"""
return sorted(d["values"])
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
pipeline += [
{"$match": {"$expr": {"$gt": ["$" + path, None]}}},
{"$group": {"_id": None, "values": {"$addToSet": "$" + path}}},
]
return pipeline
class HistogramValues(Aggregation):
"""Computes a histogram of the field values in a collection.
This aggregation is typically applied to *numeric* field types (or
lists of such types):
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.FloatField`
Examples::
import numpy as np
import matplotlib.pyplot as plt
import fiftyone as fo
samples = []
for idx in range(100):
samples.append(
fo.Sample(
filepath="/path/to/image%d.png" % idx,
numeric_field=np.random.randn(),
numeric_list_field=list(np.random.randn(10)),
)
)
dataset = fo.Dataset()
dataset.add_samples(samples)
def plot_hist(counts, edges):
counts = np.asarray(counts)
edges = np.asarray(edges)
left_edges = edges[:-1]
widths = edges[1:] - edges[:-1]
plt.bar(left_edges, counts, width=widths, align="edge")
#
# Compute a histogram of a numeric field
#
aggregation = fo.HistogramValues("numeric_field", bins=50)
counts, edges, other = dataset.aggregate(aggregation)
plot_hist(counts, edges)
plt.show(block=False)
#
# Compute the histogram of a numeric list field
#
aggregation = fo.HistogramValues("numeric_list_field", bins=50)
counts, edges, other = dataset.aggregate(aggregation)
plot_hist(counts, edges)
plt.show(block=False)
#
# Compute the histogram of a transformation of a numeric field
#
aggregation = fo.HistogramValues(
"numeric_field", expr=2 * (F() + 1), bins=50
)
counts, edges, other = dataset.aggregate(aggregation)
plot_hist(counts, edges)
plt.show(block=False)
Args:
field_name: the name of the field to operate on
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
bins (None): can be either an integer number of bins to generate or a
monotonically increasing sequence specifying the bin edges to use.
By default, 10 bins are created. If ``bins`` is an integer and no
``range`` is specified, bin edges are automatically computed from
the bounds of the field
range (None): a ``(lower, upper)`` tuple specifying a range in which to
generate equal-width bins. Only applicable when ``bins`` is an
integer or ``None``
auto (False): whether to automatically choose bin edges in an attempt
to evenly distribute the counts in each bin. If this option is
chosen, ``bins`` will only be used if it is an integer, and the
``range`` parameter is ignored
"""
def __init__(
self, field_name, expr=None, bins=None, range=None, auto=False
):
super().__init__(field_name, expr=expr)
self._bins = bins
self._range = range
self._auto = auto
self._num_bins = None
self._edges = None
self._edges_last_used = None
self._parse_args()
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
a tuple of
- counts: ``[]``
- edges: ``[]``
- other: ``0``
"""
return [], [], 0
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
a tuple of
- counts: a list of counts in each bin
- edges: an increasing list of bin edges of length
``len(counts) + 1``. Note that each bin is treated as having an
inclusive lower boundary and exclusive upper boundary,
``[lower, upper)``, including the rightmost bin
- other: the number of items outside the bins
"""
if self._auto:
return self._parse_result_auto(d)
return self._parse_result_edges(d)
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
if self._auto:
pipeline.append(
{
"$bucketAuto": {
"groupBy": "$" + path,
"buckets": self._num_bins,
"output": {"count": {"$sum": 1}},
}
}
)
else:
if self._edges is not None:
edges = self._edges
else:
edges = self._compute_bin_edges(sample_collection)
self._edges_last_used = edges
pipeline.append(
{
"$bucket": {
"groupBy": "$" + path,
"boundaries": edges,
"default": "other", # counts documents outside of bins
"output": {"count": {"$sum": 1}},
}
}
)
pipeline.append({"$group": {"_id": None, "bins": {"$push": "$$ROOT"}}})
return pipeline
def _parse_args(self):
if self._bins is None:
bins = 10
else:
bins = self._bins
if self._auto:
if etau.is_numeric(bins):
self._num_bins = bins
else:
self._num_bins = 10
return
if not etau.is_numeric(bins):
# User-provided bin edges
self._edges = list(bins)
return
if self._range is not None:
# Linearly-spaced bins within `range`
self._edges = list(
np.linspace(self._range[0], self._range[1], bins + 1)
)
else:
# Compute bin edges from bounds
self._num_bins = bins
def _compute_bin_edges(self, sample_collection):
bounds = sample_collection.bounds(self._field_name, expr=self._expr)
if any(b is None for b in bounds):
bounds = (-1, -1)
return list(
np.linspace(bounds[0], bounds[1] + 1e-6, self._num_bins + 1)
)
def _parse_result_edges(self, d):
_edges_array = np.array(self._edges_last_used)
edges = list(_edges_array)
counts = [0] * (len(edges) - 1)
other = 0
for di in d["bins"]:
left = di["_id"]
if left == "other":
other = di["count"]
else:
idx = np.abs(_edges_array - left).argmin()
counts[idx] = di["count"]
return counts, edges, other
def _parse_result_auto(self, d):
counts = []
edges = []
for di in d["bins"]:
counts.append(di["count"])
edges.append(di["_id"]["min"])
edges.append(di["_id"]["max"])
return counts, edges, 0
class Mean(Aggregation):
"""Computes the arithmetic mean of the field values of a collection.
``None``-valued fields are ignored.
This aggregation is typically applied to *numeric* field types (or lists of
such types):
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.FloatField`
Examples::
import fiftyone as fo
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
numeric_field=1.0,
numeric_list_field=[1, 2, 3],
),
fo.Sample(
filepath="/path/to/image2.png",
numeric_field=4.0,
numeric_list_field=[1, 2],
),
fo.Sample(
filepath="/path/to/image3.png",
numeric_field=None,
numeric_list_field=None,
),
]
)
#
# Compute the mean of a numeric field
#
aggregation = fo.Mean("numeric_field")
mean = dataset.aggregate(aggregation)
print(mean) # the mean
#
# Compute the mean of a numeric list field
#
aggregation = fo.Mean("numeric_list_field")
mean = dataset.aggregate(aggregation)
print(mean) # the mean
#
# Compute the mean of a transformation of a numeric field
#
aggregation = fo.Mean("numeric_field", expr=2 * (F() + 1))
mean = dataset.aggregate(aggregation)
print(mean) # the mean
Args:
field_name: the name of the field to operate on
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
"""
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``0``
"""
return 0
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the mean
"""
return d["mean"]
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
pipeline.append(
{"$group": {"_id": None, "mean": {"$avg": "$" + path}}}
)
return pipeline
class Std(Aggregation):
"""Computes the standard deviation of the field values of a collection.
``None``-valued fields are ignored.
This aggregation is typically applied to *numeric* field types (or lists of
such types):
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.FloatField`
Examples::
import fiftyone as fo
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
numeric_field=1.0,
numeric_list_field=[1, 2, 3],
),
fo.Sample(
filepath="/path/to/image2.png",
numeric_field=4.0,
numeric_list_field=[1, 2],
),
fo.Sample(
filepath="/path/to/image3.png",
numeric_field=None,
numeric_list_field=None,
),
]
)
#
# Compute the standard deviation of a numeric field
#
aggregation = fo.Std("numeric_field")
std = dataset.aggregate(aggregation)
print(std) # the standard deviation
#
# Compute the standard deviation of a numeric list field
#
aggregation = fo.Std("numeric_list_field")
std = dataset.aggregate(aggregation)
print(std) # the standard deviation
#
# Compute the standard deviation of a transformation of a numeric field
#
aggregation = fo.Std("numeric_field", expr=2 * (F() + 1))
std = dataset.aggregate(aggregation)
print(std) # the standard deviation
Args:
field_name: the name of the field to operate on
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
sample (False): whether to compute the sample standard deviation rather
than the population standard deviation
"""
def __init__(self, field_name, expr=None, sample=False):
super().__init__(field_name, expr=expr)
self._sample = sample
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``0``
"""
return 0
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the standard deviation
"""
return d["std"]
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
op = "$stdDevSamp" if self._sample else "$stdDevPop"
pipeline.append({"$group": {"_id": None, "std": {op: "$" + path}}})
return pipeline
class Sum(Aggregation):
"""Computes the sum of the field values of a collection.
``None``-valued fields are ignored.
This aggregation is typically applied to *numeric* field types (or lists of
such types):
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.FloatField`
Examples::
import fiftyone as fo
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
numeric_field=1.0,
numeric_list_field=[1, 2, 3],
),
fo.Sample(
filepath="/path/to/image2.png",
numeric_field=4.0,
numeric_list_field=[1, 2],
),
fo.Sample(
filepath="/path/to/image3.png",
numeric_field=None,
numeric_list_field=None,
),
]
)
#
# Compute the sum of a numeric field
#
aggregation = fo.Sum("numeric_field")
total = dataset.aggregate(aggregation)
print(total) # the sum
#
# Compute the sum of a numeric list field
#
aggregation = fo.Sum("numeric_list_field")
total = dataset.aggregate(aggregation)
print(total) # the sum
#
# Compute the sum of a transformation of a numeric field
#
aggregation = fo.Sum("numeric_field", expr=2 * (F() + 1))
total = dataset.aggregate(aggregation)
print(total) # the sum
Args:
field_name: the name of the field to operate on
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
"""
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``0``
"""
return 0
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the sum
"""
return d["sum"]
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
pipeline.append({"$group": {"_id": None, "sum": {"$sum": "$" + path}}})
return pipeline
class Values(Aggregation):
"""Extracts the values of the field from all samples in a collection.
.. note::
Unlike other aggregations, :class:`Values` does not automatically
unwind list fields, which ensures that the returned values match the
potentially-nested structure of the documents.
You can opt-in to unwinding specific list fields using the ``[]``
syntax, or you can pass the optional ``unwind=True`` parameter to
unwind all supported list fields. See :ref:`aggregations-list-fields`
for more information.
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
numeric_field=1.0,
numeric_list_field=[1, 2, 3],
),
fo.Sample(
filepath="/path/to/image2.png",
numeric_field=4.0,
numeric_list_field=[1, 2],
),
fo.Sample(
filepath="/path/to/image3.png",
numeric_field=None,
numeric_list_field=None,
),
]
)
#
# Get all values of a field
#
aggregation = fo.Values("numeric_field")
values = dataset.aggregate(aggregation)
print(values) # [1.0, 4.0, None]
#
# Get all values of a list field
#
aggregation = fo.Values("numeric_list_field")
values = dataset.aggregate(aggregation)
print(values) # [[1, 2, 3], [1, 2], None]
#
# Get all values of transformed field
#
aggregation = fo.Values("numeric_field", expr=2 * (F() + 1))
values = dataset.aggregate(aggregation)
print(values) # [4.0, 10.0, None]
Args:
field_name: the name of the field to operate on
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
missing_value (None): a value to insert for missing or ``None``-valued
fields
unwind (False): whether to automatically unwind all recognized list
fields
"""
def __init__(
self,
field_name,
expr=None,
missing_value=None,
unwind=False,
_allow_missing=False,
):
field_name, found_id_field = _handle_id_fields(field_name)
super().__init__(field_name, expr=expr)
self._missing_value = missing_value
self._unwind = unwind
self._allow_missing = _allow_missing
self._found_id_field = found_id_field
self._found_array_field = None
self._num_list_fields = None
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``[]``
"""
return []
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the list of field values
"""
values = d["values"]
if self._found_id_field:
level = 1 + self._num_list_fields
return _transform_values(values, str, level=level)
if self._found_array_field:
fcn = fou.deserialize_numpy_array
level = 1 + self._num_list_fields
return _transform_values(values, fcn, level=level)
return values
def to_mongo(self, sample_collection):
path, pipeline, other_list_fields = self._parse_field_and_expr(
sample_collection,
auto_unwind=self._unwind,
allow_missing=self._allow_missing,
)
self._found_array_field = sample_collection._is_array_field(path)
self._num_list_fields = len(other_list_fields)
pipeline += _make_extract_values_pipeline(
path, other_list_fields, self._missing_value
)
return pipeline
def _handle_id_fields(field_name):
if field_name == "id":
field_name = "_id"
found_id_field = True
elif field_name.endswith(".id"):
field_name = field_name[: -len(".id")] + "._id"
found_id_field = True
else:
found_id_field = False
return field_name, found_id_field
def _transform_values(values, fcn, level=1):
if values is None:
return None
if level < 1:
return fcn(values)
return [_transform_values(v, fcn, level=level - 1) for v in values]
def _make_extract_values_pipeline(path, list_fields, missing_value):
if not list_fields:
root = path
else:
root = list_fields[0]
expr = (F() != None).if_else(F(), missing_value)
if list_fields:
subfield = path[len(list_fields[-1]) + 1 :]
expr = _extract_list_values(subfield, expr)
if len(list_fields) > 1:
for list_field1, list_field2 in zip(
reversed(list_fields[:-1]), reversed(list_fields[1:])
):
inner_list_field = list_field2[len(list_field1) + 1 :]
expr = _extract_list_values(inner_list_field, expr)
return [
{"$set": {root: expr.to_mongo(prefix="$" + root)}},
{"$group": {"_id": None, "values": {"$push": "$" + root}}},
]
def _extract_list_values(subfield, expr):
if subfield:
map_expr = F(subfield).apply(expr)
else:
map_expr = expr
return F().map(map_expr)
def _parse_field_and_expr(
sample_collection, field_name, expr, auto_unwind, allow_missing
):
if expr is not None:
pipeline, _ = sample_collection._make_set_field_pipeline(
field_name, expr
)
else:
pipeline = []
(
path,
is_frame_field,
unwind_list_fields,
other_list_fields,
) = sample_collection._parse_field_name(
field_name, auto_unwind=auto_unwind, allow_missing=allow_missing
)
if is_frame_field and auto_unwind:
pipeline.extend(
[{"$unwind": "$frames"}, {"$replaceRoot": {"newRoot": "$frames"}}]
)
for list_field in unwind_list_fields:
pipeline.append({"$unwind": "$" + list_field})
if other_list_fields:
# Don't unroll terminal lists unless explicitly requested
other_list_fields = [
lf for lf in other_list_fields if lf != field_name
]
if other_list_fields:
root = other_list_fields[0]
leaf = path[len(root) + 1 :]
else:
root = path
leaf = None
pipeline.append({"$project": {root: True}})
return path, pipeline, other_list_fields
| [
"numpy.abs",
"eta.core.utils.is_numeric",
"fiftyone.core.expressions.ViewField",
"numpy.array",
"numpy.linspace"
] | [((22311, 22342), 'numpy.array', 'np.array', (['self._edges_last_used'], {}), '(self._edges_last_used)\n', (22319, 22342), True, 'import numpy as np\n'), ((36211, 36214), 'fiftyone.core.expressions.ViewField', 'F', ([], {}), '()\n', (36212, 36214), True, 'from fiftyone.core.expressions import ViewField as F\n'), ((21377, 21398), 'eta.core.utils.is_numeric', 'etau.is_numeric', (['bins'], {}), '(bins)\n', (21392, 21398), True, 'import eta.core.utils as etau\n'), ((21528, 21549), 'eta.core.utils.is_numeric', 'etau.is_numeric', (['bins'], {}), '(bins)\n', (21543, 21549), True, 'import eta.core.utils as etau\n'), ((22178, 22239), 'numpy.linspace', 'np.linspace', (['bounds[0]', '(bounds[1] + 1e-06)', '(self._num_bins + 1)'], {}), '(bounds[0], bounds[1] + 1e-06, self._num_bins + 1)\n', (22189, 22239), True, 'import numpy as np\n'), ((36937, 36940), 'fiftyone.core.expressions.ViewField', 'F', ([], {}), '()\n', (36938, 36940), True, 'from fiftyone.core.expressions import ViewField as F\n'), ((21780, 21833), 'numpy.linspace', 'np.linspace', (['self._range[0]', 'self._range[1]', '(bins + 1)'], {}), '(self._range[0], self._range[1], bins + 1)\n', (21791, 21833), True, 'import numpy as np\n'), ((36190, 36193), 'fiftyone.core.expressions.ViewField', 'F', ([], {}), '()\n', (36191, 36193), True, 'from fiftyone.core.expressions import ViewField as F\n'), ((36867, 36878), 'fiftyone.core.expressions.ViewField', 'F', (['subfield'], {}), '(subfield)\n', (36868, 36878), True, 'from fiftyone.core.expressions import ViewField as F\n'), ((22602, 22629), 'numpy.abs', 'np.abs', (['(_edges_array - left)'], {}), '(_edges_array - left)\n', (22608, 22629), True, 'import numpy as np\n')] |
import copy
import numpy as _np
import inspect
import warnings
from pyemma._ext import six
from pyemma._ext.sklearn.base import _pprint
from pyemma.util.statistics import confidence_interval
from pyemma.util.reflection import call_member
__author__ = 'noe'
class Model(object):
""" Base class for pyEMMA models
This class is inspired by sklearn's BaseEstimator class. However, we define parameter names not by the
current class' __init__ but have to announce them. This allows us to also remember the parameters of model
superclasses. This class can be mixed with pyEMMA and sklearn Estimators.
"""
def _get_model_param_names(self):
"""Get parameter names for the estimator"""
# fetch model parameters
if hasattr(self, 'set_model_params'):
set_model_param_method = getattr(self, 'set_model_params')
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(set_model_param_method)
if varargs is not None:
raise RuntimeError("pyEMMA models should always specify their parameters in the signature"
" of their set_model_params (no varargs). %s doesn't follow this convention."
% (self, ))
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
args.sort()
return args
else:
# No parameters known
return []
def update_model_params(self, **params):
"""Update given model parameter if they are set to specific values"""
for key, value in params.iteritems():
if not hasattr(self, key):
setattr(self, key, value) # set parameter for the first time.
elif getattr(self, key) is None:
setattr(self, key, value) # update because this parameter is still None.
elif value is not None:
setattr(self, key, value) # only overwrite if set to a specific value (None does not overwrite).
def get_model_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_model_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
# def set_model_params(self, **params):
# """Set the parameters of this estimator.
# The method works on simple estimators as well as on nested objects
# (such as pipelines). The former have parameters of the form
# ``<component>__<parameter>`` so that it's possible to update each
# component of a nested object.
# Returns
# -------
# self
# """
# if not params:
# # Simple optimisation to gain speed (inspect is slow)
# return self
# valid_params = self.get_model_params(deep=True)
# for key, value in six.iteritems(params):
# split = key.split('__', 1)
# if len(split) > 1:
# # nested objects case
# name, sub_name = split
# if name not in valid_params:
# raise ValueError('Invalid parameter %s for estimator %s' %
# (name, self))
# sub_object = valid_params[name]
# sub_object.set_params(**{sub_name: value})
# else:
# # simple objects case
# if key not in valid_params:
# raise ValueError('Invalid parameter %s ' 'for estimator %s'
# % (key, self.__class__.__name__))
# setattr(self, key, value)
# return self
# FIXME: __repr__ is incompatible with Estimator __repr__. Need a general fix for a nice representation
# def __repr__(self):
# class_name = self.__class__.__name__
# return '%s(%s)' % (class_name, _pprint(self.get_model_params(deep=False),
# offset=len(class_name),),)
class SampledModel(Model):
def __init__(self, samples, conf=0.95):
self.set_model_params(samples=samples, conf=conf)
# TODO: maybe rename to parametrize in order to avoid confusion with set_params that has a different behavior?
def set_model_params(self, samples=None, conf=0.95):
self.update_model_params(samples=samples, conf=conf)
if samples is not None:
self.nsamples = len(samples)
def _check_samples_available(self):
if self.samples is None:
raise AttributeError('Model samples not available in '+str(self)+'. Call set_model_params with samples.')
# def mean_model(self):
# """Computes the mean model from the given samples"""
# raise NotImplementedError('mean_model is not implemented in class '+str(self.__class__))
def sample_f(self, f, *args, **kw):
self._check_samples_available()
return [call_member(M, f, *args, **kw) for M in self.samples]
def sample_mean(self, f, *args, **kw):
vals = self.sample_f(f, *args, **kw)
return _np.mean(vals, axis=0)
def sample_std(self, f, *args, **kw):
vals = self.sample_f(f, *args, **kw)
return _np.std(vals, axis=0)
def sample_conf(self, f, *args, **kw):
vals = self.sample_f(f, *args, **kw)
return confidence_interval(vals, conf=self.conf)
| [
"numpy.mean",
"pyemma.util.statistics.confidence_interval",
"warnings.catch_warnings",
"inspect.getargspec",
"numpy.std",
"warnings.simplefilter",
"warnings.filters.pop",
"pyemma.util.reflection.call_member"
] | [((6505, 6527), 'numpy.mean', '_np.mean', (['vals'], {'axis': '(0)'}), '(vals, axis=0)\n', (6513, 6527), True, 'import numpy as _np\n'), ((6631, 6652), 'numpy.std', '_np.std', (['vals'], {'axis': '(0)'}), '(vals, axis=0)\n', (6638, 6652), True, 'import numpy as _np\n'), ((6757, 6798), 'pyemma.util.statistics.confidence_interval', 'confidence_interval', (['vals'], {'conf': 'self.conf'}), '(vals, conf=self.conf)\n', (6776, 6798), False, 'from pyemma.util.statistics import confidence_interval\n'), ((1014, 1056), 'inspect.getargspec', 'inspect.getargspec', (['set_model_param_method'], {}), '(set_model_param_method)\n', (1032, 1056), False, 'import inspect\n'), ((2955, 3006), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""', 'DeprecationWarning'], {}), "('always', DeprecationWarning)\n", (2976, 3006), False, 'import warnings\n'), ((6347, 6377), 'pyemma.util.reflection.call_member', 'call_member', (['M', 'f', '*args'], {}), '(M, f, *args, **kw)\n', (6358, 6377), False, 'from pyemma.util.reflection import call_member\n'), ((3342, 3365), 'warnings.filters.pop', 'warnings.filters.pop', (['(0)'], {}), '(0)\n', (3362, 3365), False, 'import warnings\n'), ((3045, 3081), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (3068, 3081), False, 'import warnings\n')] |
import pytest
import numpy as np
from maxent_graph import BICM, DECM, BWCM, ECM, BIECM, RCM
from maxent_graph.util import nx_get_A, nx_get_B
models = [
BICM(nx_get_B("data/my_senate_116_bipartite.graphml")),
BICM(nx_get_B("data/opsahl-southernwomen_bipartite.graphml")),
DECM(nx_get_A("data/residence_hall.graphml", weight_key="weight")),
DECM(nx_get_A("data/macaques.graphml", weight_key="weight")),
BWCM(
nx_get_B(
"data/plant_pol_kato.graphml",
weight_key="count",
bipartite_key="pollinator",
)
),
BWCM(
nx_get_B(
"data/plant_pol_vazquez_All_sites_pooled.graphml",
weight_key="count",
bipartite_key="pollinator",
)
),
BIECM(
nx_get_B(
"data/plant_pol_kato.graphml",
weight_key="count",
bipartite_key="pollinator",
)
),
BIECM(
nx_get_B(
"data/plant_pol_vazquez_All_sites_pooled.graphml",
weight_key="count",
bipartite_key="pollinator",
)
),
ECM(nx_get_A("data/kangaroo.graphml", weight_key="weight")),
ECM(nx_get_A("data/train_terrorists.graphml", weight_key="weight")),
RCM(nx_get_A("data/dutch_school_net_1.graphml")),
RCM(nx_get_A("data/macaques.graphml")),
]
@pytest.mark.parametrize("model", models)
def test_model(model):
initial_guess = model.get_initial_guess()
nll_loops = model.neg_log_likelihood_loops(initial_guess)
nll = model.neg_log_likelihood(initial_guess)
assert np.allclose(nll_loops, nll)
ens_loops = model.expected_node_sequence_loops(initial_guess)
ens = model.expected_node_sequence(initial_guess)
assert np.allclose(ens_loops, ens)
solution = model.solve(initial_guess)
assert solution is not None
assert max(solution.relative_error) < 0.001 | [
"maxent_graph.util.nx_get_A",
"pytest.mark.parametrize",
"maxent_graph.util.nx_get_B",
"numpy.allclose"
] | [((1342, 1382), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model"""', 'models'], {}), "('model', models)\n", (1365, 1382), False, 'import pytest\n'), ((1578, 1605), 'numpy.allclose', 'np.allclose', (['nll_loops', 'nll'], {}), '(nll_loops, nll)\n', (1589, 1605), True, 'import numpy as np\n'), ((1740, 1767), 'numpy.allclose', 'np.allclose', (['ens_loops', 'ens'], {}), '(ens_loops, ens)\n', (1751, 1767), True, 'import numpy as np\n'), ((163, 211), 'maxent_graph.util.nx_get_B', 'nx_get_B', (['"""data/my_senate_116_bipartite.graphml"""'], {}), "('data/my_senate_116_bipartite.graphml')\n", (171, 211), False, 'from maxent_graph.util import nx_get_A, nx_get_B\n'), ((223, 278), 'maxent_graph.util.nx_get_B', 'nx_get_B', (['"""data/opsahl-southernwomen_bipartite.graphml"""'], {}), "('data/opsahl-southernwomen_bipartite.graphml')\n", (231, 278), False, 'from maxent_graph.util import nx_get_A, nx_get_B\n'), ((290, 350), 'maxent_graph.util.nx_get_A', 'nx_get_A', (['"""data/residence_hall.graphml"""'], {'weight_key': '"""weight"""'}), "('data/residence_hall.graphml', weight_key='weight')\n", (298, 350), False, 'from maxent_graph.util import nx_get_A, nx_get_B\n'), ((362, 416), 'maxent_graph.util.nx_get_A', 'nx_get_A', (['"""data/macaques.graphml"""'], {'weight_key': '"""weight"""'}), "('data/macaques.graphml', weight_key='weight')\n", (370, 416), False, 'from maxent_graph.util import nx_get_A, nx_get_B\n'), ((437, 529), 'maxent_graph.util.nx_get_B', 'nx_get_B', (['"""data/plant_pol_kato.graphml"""'], {'weight_key': '"""count"""', 'bipartite_key': '"""pollinator"""'}), "('data/plant_pol_kato.graphml', weight_key='count', bipartite_key=\n 'pollinator')\n", (445, 529), False, 'from maxent_graph.util import nx_get_A, nx_get_B\n'), ((597, 709), 'maxent_graph.util.nx_get_B', 'nx_get_B', (['"""data/plant_pol_vazquez_All_sites_pooled.graphml"""'], {'weight_key': '"""count"""', 'bipartite_key': '"""pollinator"""'}), "('data/plant_pol_vazquez_All_sites_pooled.graphml', weight_key=\n 'count', bipartite_key='pollinator')\n", (605, 709), False, 'from maxent_graph.util import nx_get_A, nx_get_B\n'), ((778, 870), 'maxent_graph.util.nx_get_B', 'nx_get_B', (['"""data/plant_pol_kato.graphml"""'], {'weight_key': '"""count"""', 'bipartite_key': '"""pollinator"""'}), "('data/plant_pol_kato.graphml', weight_key='count', bipartite_key=\n 'pollinator')\n", (786, 870), False, 'from maxent_graph.util import nx_get_A, nx_get_B\n'), ((939, 1051), 'maxent_graph.util.nx_get_B', 'nx_get_B', (['"""data/plant_pol_vazquez_All_sites_pooled.graphml"""'], {'weight_key': '"""count"""', 'bipartite_key': '"""pollinator"""'}), "('data/plant_pol_vazquez_All_sites_pooled.graphml', weight_key=\n 'count', bipartite_key='pollinator')\n", (947, 1051), False, 'from maxent_graph.util import nx_get_A, nx_get_B\n'), ((1109, 1163), 'maxent_graph.util.nx_get_A', 'nx_get_A', (['"""data/kangaroo.graphml"""'], {'weight_key': '"""weight"""'}), "('data/kangaroo.graphml', weight_key='weight')\n", (1117, 1163), False, 'from maxent_graph.util import nx_get_A, nx_get_B\n'), ((1174, 1236), 'maxent_graph.util.nx_get_A', 'nx_get_A', (['"""data/train_terrorists.graphml"""'], {'weight_key': '"""weight"""'}), "('data/train_terrorists.graphml', weight_key='weight')\n", (1182, 1236), False, 'from maxent_graph.util import nx_get_A, nx_get_B\n'), ((1247, 1290), 'maxent_graph.util.nx_get_A', 'nx_get_A', (['"""data/dutch_school_net_1.graphml"""'], {}), "('data/dutch_school_net_1.graphml')\n", (1255, 1290), False, 'from maxent_graph.util import nx_get_A, nx_get_B\n'), ((1301, 1334), 'maxent_graph.util.nx_get_A', 'nx_get_A', (['"""data/macaques.graphml"""'], {}), "('data/macaques.graphml')\n", (1309, 1334), False, 'from maxent_graph.util import nx_get_A, nx_get_B\n')] |
"""
Chemical composition is just the description of the amount of atoms of each specie. In the case of clusters or
molecules, ie a finite structure, it represents the complete set of atoms. For periodic structures it represents
the species present on a cell.
"""
import re
from numpy import array, argsort
from math import gcd as _gcd
from math import pi
from pychemia.utils.periodic import atomic_symbols, electronegativity, atomic_number, covalent_radius
from pychemia.utils.computing import deep_unicode
from functools import reduce
from collections.abc import Mapping
class Composition(Mapping):
"""
A Composition is basically a mapping between a number of species and a integer indicating how many atoms of that
specie are present in the structure.
A composition object do not contain geometrical information or bonding.
The main purpose of this class is to be able to parse formulas into compositions and return string formulas sorted
in various ways.
"""
def __init__(self, value=None):
"""
Creates a new composition, currently only absolute formulas are supported.
:param value: (str, dict) The input argument could be a string with a chemical formula or the actual dictionary
of species and values. The order of species is not guaranteed to be preserved. A iterable of atomic symbols
is also accepted to build a composition object.
:rtype: Composition
>>> cp = Composition({'Ba': 2, 'Cu': 3, 'O': 7, 'Y': 1})
>>> cp.formula
'Ba2Cu3O7Y'
>>> cp = Composition('Ba2Cu3O7Y')
>>> cp2 = Composition(cp)
>>> len(cp2)
4
>>> cp.nspecies
4
>>> cp = Composition(['O', 'H', 'O'])
>>> len(cp)
2
>>> cp['O']
2
"""
# The internal dictionary where atom species and numbers of atoms of each specie are stored.
self._composition = {}
# Convert strings and dictionaries into unicode
if value is not None:
value = deep_unicode(value)
# Case 1: The input is a formula
if isinstance(value, str):
self._set_composition(self.formula_parser(value))
# Case 2: The input is a dictionary
elif isinstance(value, dict):
self._set_composition(value)
# Case 3: The input is another composition object
elif isinstance(value, Composition):
self._set_composition(value.composition)
# Case 4: The input is an iterable of atomic symbols
elif hasattr(value, "__len__"):
dvalue = {}
for i in value:
if i in dvalue:
dvalue[i] += 1
else:
dvalue[i] = 1
self._set_composition(dvalue)
else:
self._composition = {}
def __len__(self):
return len(self._composition)
def __getitem__(self, specie):
"""
Returns the number of atoms of a given specie
:param specie: Atomic Symbol for which the value will be returned
:return: number of atoms of the given specie
:rtype: int
>>> comp = Composition('H2')
>>> comp['H']
2
>>> comp['He']
0
"""
if specie in self._composition:
return self._composition[specie]
else:
return 0
def __repr__(self):
"""
Evaluable representation of Composition object
:return: Text representation that can be evaluated
:rtype: str
>>> cp1 = Composition('H2O')
>>> cp2 = eval(repr(cp1))
>>> cp2 == cp1
True
"""
return 'Composition(' + str(self.composition) + ')'
def __str__(self):
"""
:return: String representation of the composition
>>> cp = Composition('YBa2Cu3O7')
>>> 'Cu' in str(cp)
True
"""
ret = ''
for i in self.species:
ret += " %3s: %4d " % (i, self.composition[i])
return ret
def __iter__(self):
return iter(self.composition)
def __contains__(self, specie):
"""True if 'specie' is present in composition
:return: True if specie is present
:param specie: atomic specie
:rtype: bool
>>> cp = Composition('H2O')
>>> 'He' in cp
False
"""
return specie in self._composition
def _set_composition(self, value):
"""
Checks the values of a dictionary before setting the actual composition
:param value: (dict)
:rtype: None
"""
for i in value:
assert (i in atomic_symbols)
assert (isinstance(value[i], int))
self._composition = value.copy()
@property
def composition(self):
"""Dictionary with composition
:return: The composition dictionary
:rtype: dict
>>> import pprint
>>> cp = Composition('H2O')
>>> pprint.pprint(cp.composition)
{'H': 2, 'O': 1}
"""
return self._composition
def covalent_volume(self, packing='cubes'):
"""
:param packing: The kind of packing could be 'cubes' or 'spheres'
:type packing: str
:return: The volume occupied by a given formula assuming a 'cubes' packing or 'spheres' packing
:rtype: (float)
>>> cp = Composition('C5H10')
>>> cp.covalent_volume()
19.942320000000002
>>> cp.covalent_volume(packing='spheres')
10.441774334589468
"""
if packing == 'cubes':
factor = 8
elif packing == 'spheres':
factor = 4 * pi / 3.0
else:
raise ValueError('Non-valid packing: "%s"' % packing)
# find volume of unit cell by adding cubes
volume = 0.0
for specie in self:
number_atoms_specie = self.composition[specie]
# Pack each atom in a cube (2*r)^3
volume += factor * number_atoms_specie * covalent_radius(specie) ** 3
return volume
@property
def formula(self):
"""Chemical formula
:return: The chemical formula with atoms sorted alphabetically
:rtype: str
>>> cp = Composition('NaCl')
>>> cp.formula
'ClNa'
"""
return self.sorted_formula(sortby='alpha', reduced=True)
@staticmethod
def formula_parser(value):
"""Return a dictionary from a chemical formula
:return: Convert an string representing a chemical formula into a dictionary with the species as keys
and values as the number of atoms of that specie
:param value: (str) Chemical formula
:rtype: dict
>>> import pprint
>>> Composition.formula_parser('Au20')
{'Au': 20}
>>> ret = Composition.formula_parser('UutUupUusUuo')
>>> pprint.pprint(ret)
{'Uuo': 1, 'Uup': 1, 'Uus': 1, 'Uut': 1}
"""
ret = {}
jump = False
for i in range(len(value)):
if jump > 0: # This char belongs to the current atom, move on
jump -= 1
elif value[i].isupper(): # Atom Name starts with Uppercase
if i + 1 < len(value) and value[i + 1].islower(): # Atom name has more than 1 char
if i + 2 < len(value) and value[i + 2].islower(): # Atom name has more than 2 chars
specie = value[i:i + 3]
jump = 2
else:
specie = value[i:i + 2]
jump = 1
else:
specie = value[i]
jump = 0
j = 1
number = ''
while True:
if i + jump + j < len(value) and value[i + jump + j].isdigit():
number += value[i + jump + j]
j += 1
else:
break
if number == '':
ret[specie] = 1
else:
ret[specie] = int(number)
return ret
@staticmethod
def formula_to_list(formula, nunits=1):
"""
Reads a formula and returns a list of atomic symbols consistent with the formula and the number of
formulas given by nunits
:param formula: (str) Chemical formula as string
:param nunits: (int) Number of formulas to apply
:return: list of atomic symbols
:rtype: list
>>> Composition.formula_to_list('NaCl')
['Na', 'Cl']
>>> flist = Composition.formula_to_list(u'Uut2Uup3Uus4Uuo5')
>>> len(flist)
14
>>> flist = Composition.formula_to_list('Uut2Uup3Uus4Uuo5', nunits=2)
>>> len(flist)
28
"""
# decompose composition
a = re.findall(r"[A-Z][a-z0-9]*", formula)
composition = []
for i in a:
m = re.match(r"([A-Za-z]+)([0-9]*)", i)
if m.group(2) == "":
n = int(1)
else:
n = int(m.group(2))
for j in range(n * nunits):
composition.append(m.group(1))
return composition
@property
def gcd(self):
""" Number of minimal formulas on a given composition.
:return: The number of formulas that can be extracted from a composition ie, the greatest common denominator
for the composition.
:rtype: int
>>> cp = Composition('NaCl')
>>> cp.gcd
1
>>> cp = Composition('Na2Cl2')
>>> cp.gcd
2
>>> cp = Composition()
>>> cp.gcd is None
True
"""
if self.natom > 0:
return reduce(_gcd, self.values)
else:
return None
@staticmethod
def get_species_from_hex(arg):
"""List of species encoded for hex string produced by species_hex
:return: Return a set of species from the encoded species created by the output of "species_hex" method.
:param arg: str String with hexadecimal representation of list of species.
>>> Composition.get_species_from_hex('0x38271d08')
[8, 29, 39, 56]
"""
num = int(arg, 16)
ret = []
while num > 0:
ret.append(num % 256)
num = (num-ret[-1])//256
return ret
@property
def natom(self):
"""
:return: The number of atoms in the composition
:rtype: int
>>> cp = Composition('H2O')
>>> cp.natom
3
"""
return sum(self.values)
@property
def nspecies(self):
"""
:return: Number of species in the composition
:rtype: int
>>> cp = Composition('H2O')
>>> cp.nspecies
2
"""
return len(self.species)
@property
def symbols(self):
"""List of species on the composition
:return: A list of atomic symbols
:rtype: list
>>> cp = Composition('H2O')
>>> cp.symbols
['H', 'H', 'O']
"""
ret = []
for specie in self:
number_atoms_specie = self.composition[specie]
for i in range(number_atoms_specie):
ret.append(specie)
return sorted(deep_unicode(ret))
@property
def species(self):
"""List of species on the composition
:return: The list of species, no particular order but atoms of the same specie are contiguous.
:rtype: list
>>> cp = Composition('H2O')
>>> sorted(cp.species)
['H', 'O']
"""
return [deep_unicode(x) for x in self._composition]
def sorted_formula(self, sortby='alpha', reduced=True):
"""
:return: The chemical formula. It could be sorted alphabetically using sortby='alpha', by electronegativity
using sortby='electronegativity' or using Hill System with sortby='Hill'
Just the first 3 letters are unambiguous and case is not taken in account so you can use 'alp', 'hil'
or 'ele'
:param sortby: (str) 'alpha' : Alphabetically
'electronegativity' : Electronegativity
'hill' : Hill System
:param reduced: (bool) If the formula should be normalized
:rtype: str
.. notes: Hill exceptions have not being implemented yet
>>> cp = Composition('YBa2Cu3O7')
>>> cp.sorted_formula()
'Ba2Cu3O7Y'
>>> cp.sorted_formula(sortby='hill')
'Ba2Cu3O7Y'
>>> cp.sorted_formula(sortby='electroneg')
'Ba2YCu3O7'
>>> cp = Composition('H10C5')
>>> cp.sorted_formula(sortby='hill', reduced=True)
'CH2'
>>> cp = Composition('IBr')
>>> cp.sorted_formula(sortby='hill', reduced=False)
'BrI'
>>> cp = Composition('Cl4C')
>>> cp.sorted_formula(sortby='hill', reduced=False)
'CCl4'
>>> cp = Composition('IH3C')
>>> cp.sorted_formula(sortby='hill', reduced=False)
'CH3I'
>>> cp = Composition('BrH5C2')
>>> cp.sorted_formula(sortby='hill', reduced=False)
'C2H5Br'
>>> cp = Composition('S04H2')
>>> cp.sorted_formula(sortby='hill', reduced=False)
'H2S4'
>>> cp = Composition('SO4H2')
>>> cp.sorted_formula(sortby='hill', reduced=False)
'H2O4S'
"""
if reduced and self.gcd > 1:
comp = Composition(self.composition)
for i in comp.composition:
comp._composition[i] //= self.gcd
else:
comp = self
if sortby.lower()[:3] == 'ele':
electroneg = list(electronegativity(comp.species))
# Not longer needed as electronegativy will return 0 for 'None' values
# for i in range(len(electroneg)):
# if electroneg[i] is None:
# electroneg[i] = -1
sortedspecies = array(comp.species)[argsort(electroneg)]
elif sortby.lower()[:3] == "hil": # FIXME: Hill system exceptions not implemented
sortedspecies = []
presortedspecies = sorted(comp.species)
if 'C' in presortedspecies:
sortedspecies.append('C')
presortedspecies.pop(presortedspecies.index('C'))
if 'H' in presortedspecies:
sortedspecies.append('H')
presortedspecies.pop(presortedspecies.index('H'))
sortedspecies += presortedspecies
else:
sortedspecies = sorted(comp.species)
ret = u''
for specie in sortedspecies:
ret += '%s' % specie
if comp.composition[specie] > 1:
ret += "%d" % comp.composition[specie]
return deep_unicode(ret)
def species_encoded(self, base):
"""Encode the list of species with a number
:return: Encodes the species as a number.
:param base: Integer used as base for encoding.
:rtype: int
>>> cp = Composition('H2O')
>>> cp.species_encoded(100)
801
"""
ret = 0
i = 0
for atom_number in sorted(atomic_number(self.species)):
ret += atom_number * (base ** i)
i += 1
return ret
def species_hex(self):
"""Encoding in hexadecimal with 2 bytes per specie (base 256)
:return: Encodes the species into a hexadecimal representation where each specie is stored on a 2-Byte slot
ordered by atomic number.
The output produces a unique encoding where each 2 character from the hexadecimal will encode a single
species and the species are ordered by atomic number making the codification unique.
:rtype: str
>>> cp = Composition('YBa2Cu3O7')
>>> cp.species_hex()
'0x38271d08'
"""
enc = self.species_encoded(256)
return hex(enc)
@property
def values(self):
"""
:return: The number of atoms of each specie
:rtype: list
>>> cp = Composition('YBa2Cu3O7')
>>> sorted(cp.values)
[1, 2, 3, 7]
"""
return [self._composition[x] for x in self._composition]
| [
"pychemia.utils.computing.deep_unicode",
"functools.reduce",
"pychemia.utils.periodic.electronegativity",
"re.match",
"pychemia.utils.periodic.atomic_number",
"numpy.array",
"numpy.argsort",
"pychemia.utils.periodic.covalent_radius",
"re.findall"
] | [((8952, 8989), 're.findall', 're.findall', (['"""[A-Z][a-z0-9]*"""', 'formula'], {}), "('[A-Z][a-z0-9]*', formula)\n", (8962, 8989), False, 'import re\n'), ((14977, 14994), 'pychemia.utils.computing.deep_unicode', 'deep_unicode', (['ret'], {}), '(ret)\n', (14989, 14994), False, 'from pychemia.utils.computing import deep_unicode\n'), ((2056, 2075), 'pychemia.utils.computing.deep_unicode', 'deep_unicode', (['value'], {}), '(value)\n', (2068, 2075), False, 'from pychemia.utils.computing import deep_unicode\n'), ((9052, 9086), 're.match', 're.match', (['"""([A-Za-z]+)([0-9]*)"""', 'i'], {}), "('([A-Za-z]+)([0-9]*)', i)\n", (9060, 9086), False, 'import re\n'), ((9855, 9880), 'functools.reduce', 'reduce', (['_gcd', 'self.values'], {}), '(_gcd, self.values)\n', (9861, 9880), False, 'from functools import reduce\n'), ((11428, 11445), 'pychemia.utils.computing.deep_unicode', 'deep_unicode', (['ret'], {}), '(ret)\n', (11440, 11445), False, 'from pychemia.utils.computing import deep_unicode\n'), ((11771, 11786), 'pychemia.utils.computing.deep_unicode', 'deep_unicode', (['x'], {}), '(x)\n', (11783, 11786), False, 'from pychemia.utils.computing import deep_unicode\n'), ((15373, 15400), 'pychemia.utils.periodic.atomic_number', 'atomic_number', (['self.species'], {}), '(self.species)\n', (15386, 15400), False, 'from pychemia.utils.periodic import atomic_symbols, electronegativity, atomic_number, covalent_radius\n'), ((13880, 13911), 'pychemia.utils.periodic.electronegativity', 'electronegativity', (['comp.species'], {}), '(comp.species)\n', (13897, 13911), False, 'from pychemia.utils.periodic import atomic_symbols, electronegativity, atomic_number, covalent_radius\n'), ((14154, 14173), 'numpy.array', 'array', (['comp.species'], {}), '(comp.species)\n', (14159, 14173), False, 'from numpy import array, argsort\n'), ((14174, 14193), 'numpy.argsort', 'argsort', (['electroneg'], {}), '(electroneg)\n', (14181, 14193), False, 'from numpy import array, argsort\n'), ((6075, 6098), 'pychemia.utils.periodic.covalent_radius', 'covalent_radius', (['specie'], {}), '(specie)\n', (6090, 6098), False, 'from pychemia.utils.periodic import atomic_symbols, electronegativity, atomic_number, covalent_radius\n')] |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2018 ZhicongYan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
import os
import sys
sys.path.append('.')
sys.path.append("../")
import tensorflow as tf
import tensorflow.contrib.layers as tcl
import numpy as np
from netutils.learning_rate import get_learning_rate
from netutils.learning_rate import get_global_step
from netutils.optimizer import get_optimizer
from netutils.optimizer import get_optimizer_by_config
from netutils.loss import get_loss
from .base_model import BaseModel
class BEGAN(BaseModel):
""" Implementation of "BEGAN: Boundary Equilibrium Generative Adversarial Networks"
<NAME>, <NAME>, <NAME>
@article{DBLP:journals/corr/BerthelotSM17,
author = {<NAME> and
<NAME> and
<NAME>},
title = {{BEGAN:} Boundary Equilibrium Generative Adversarial Networks},
journal = {CoRR},
volume = {abs/1703.10717},
year = {2017},
url = {http://arxiv.org/abs/1703.10717},
archivePrefix = {arXiv},
eprint = {1703.10717},
timestamp = {Wed, 07 Jun 2017 14:42:35 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/BerthelotSM17},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
def __init__(self, config):
super(BEGAN, self).__init__(config)
raise NotImplementedError
self.input_shape = config['input shape']
self.z_dim = config['z_dim']
self.config = config
self.discriminator_warm_up_steps = int(config.get('discriminator warm up steps', 40))
self.discriminator_training_steps = int(config.get('discriminator training steps', 5))
self.build_model()
self.build_summary()
def build_model(self):
# network config
self.config['discriminator params']['name'] = 'Discriminator'
self.config['generator params']['name'] = 'Generator'
self.discriminator = self._build_discriminator('discriminator')
self.generator = self._build_generator('generator')
# build model
self.x_real = tf.placeholder(tf.float32, shape=[None, ] + list(self.input_shape), name='x_input')
self.z = tf.placeholder(tf.float32, shape=[None, self.z_dim], name='z')
self.x_fake = self.generator(self.z)
self.dis_real = self.discriminator(self.x_real)
self.dis_fake = self.discriminator(self.x_fake)
# loss config
self.d_loss = get_loss('adversarial down', 'cross entropy', {'dis_real' : self.dis_real, 'dis_fake' : self.dis_fake})
self.g_loss = get_loss('adversarial up', 'cross entropy', {'dis_fake' : self.dis_fake})
# optimizer config
self.global_step, self.global_step_update = get_global_step()
# optimizer of discriminator configured without global step update
# so we can keep the learning rate of discriminator the same as generator
(self.d_train_op,
self.d_learning_rate,
self.d_global_step) = get_optimizer_by_config(self.config['discriminator optimizer'],
self.config['discriminator optimizer params'],
self.d_loss, self.discriminator.vars,
self.global_step)
(self.g_train_op,
self.g_learning_rate,
self.g_global_step) = get_optimizer_by_config(self.config['generator optimizer'],
self.config['generator optimizer params'],
self.g_loss, self.generator.vars,
self.global_step, self.global_step_update)
# model saver
self.saver = tf.train.Saver(self.discriminator.store_vars
+ self.generator.store_vars
+ [self.global_step])
def build_summary(self):
if self.has_summary:
# summary scalars are logged per step
sum_list = []
sum_list.append(tf.summary.scalar('discriminator/loss', self.d_loss))
sum_list.append(tf.summary.scalar('discriminator/lr', self.d_learning_rate))
self.d_sum_scalar = tf.summary.merge(sum_list)
sum_list = []
sum_list.append(tf.summary.scalar('generator/loss', self.g_loss))
sum_list.append(tf.summary.scalar('generator/lr', self.g_learning_rate))
self.g_sum_scalar = tf.summary.merge(sum_list)
# summary hists are logged by calling self.summary()
sum_list = []
sum_list += [tf.summary.histogram('discriminator/'+var.name, var) for var in self.discriminator.vars]
sum_list += [tf.summary.histogram('generator/'+var.name, var) for var in self.generator.vars]
self.histogram_summary = tf.summary.merge(sum_list)
else:
self.d_sum_scalar = None
self.g_sum_scalar = None
self.histogram_summary = None
@property
def vars(self):
return self.discriminator.vars + self.generator.vars
'''
train operations
'''
def train_on_batch_supervised(self, sess, x_batch, y_batch):
raise NotImplementedError
def train_on_batch_unsupervised(self, sess, x_batch):
dis_train_step = self.discriminator_training_steps
summary_list = []
for i in range(dis_train_step):
feed_dict = {
self.x_real : x_batch,
self.z : np.random.randn(x_batch.shape[0], self.z_dim),
self.is_training : True
}
step_d, lr_d, loss_d, summary_d = self.train(sess, feed_dict, update_op=self.d_train_op,
step=self.d_global_step,
learning_rate=self.d_learning_rate,
loss=self.d_loss,
summary=self.d_sum_scalar)
summary_list.append((step_d, summary_d))
feed_dict = {
self.z : np.random.randn(x_batch.shape[0], self.z_dim),
self.is_training : True
}
step_g, lr_g, loss_g, summary_g = self.train(sess, feed_dict, update_op=self.g_train_op,
step=self.g_global_step,
learning_rate=self.g_learning_rate,
loss=self.g_loss,
summary=self.g_sum_scalar)
summary_list.append((step_g, summary_g))
return step_g, {'d':lr_d, 'g':lr_g}, {'d':loss_d,'g':loss_g}, summary_list,
'''
test operation
'''
def generate(self, sess, z_batch):
feed_dict = {
self.z : z_batch,
self.is_training : False
}
x_batch = sess.run([self.x_fake], feed_dict = feed_dict)[0]
return x_batch
def discriminate(self, sess, x_batch):
feed_dict = {
self.x_real : x_batch,
self.is_training : False
}
dis_x = sess.run([self.dis_real], feed_dict = feed_dict)[0][:, 0]
return dis_x
'''
summary operation
'''
def summary(self, sess):
if self.has_summary:
summ = sess.run(self.histogram_summary)
return summ
else:
return None
| [
"tensorflow.placeholder",
"tensorflow.train.Saver",
"tensorflow.summary.merge",
"netutils.learning_rate.get_global_step",
"numpy.random.randn",
"tensorflow.summary.histogram",
"netutils.optimizer.get_optimizer_by_config",
"netutils.loss.get_loss",
"tensorflow.summary.scalar",
"sys.path.append"
] | [((1232, 1252), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (1247, 1252), False, 'import sys\n'), ((1253, 1275), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (1268, 1275), False, 'import sys\n'), ((3323, 3385), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.z_dim]', 'name': '"""z"""'}), "(tf.float32, shape=[None, self.z_dim], name='z')\n", (3337, 3385), True, 'import tensorflow as tf\n'), ((3559, 3664), 'netutils.loss.get_loss', 'get_loss', (['"""adversarial down"""', '"""cross entropy"""', "{'dis_real': self.dis_real, 'dis_fake': self.dis_fake}"], {}), "('adversarial down', 'cross entropy', {'dis_real': self.dis_real,\n 'dis_fake': self.dis_fake})\n", (3567, 3664), False, 'from netutils.loss import get_loss\n'), ((3679, 3751), 'netutils.loss.get_loss', 'get_loss', (['"""adversarial up"""', '"""cross entropy"""', "{'dis_fake': self.dis_fake}"], {}), "('adversarial up', 'cross entropy', {'dis_fake': self.dis_fake})\n", (3687, 3751), False, 'from netutils.loss import get_loss\n'), ((3821, 3838), 'netutils.learning_rate.get_global_step', 'get_global_step', ([], {}), '()\n', (3836, 3838), False, 'from netutils.learning_rate import get_global_step\n'), ((4058, 4234), 'netutils.optimizer.get_optimizer_by_config', 'get_optimizer_by_config', (["self.config['discriminator optimizer']", "self.config['discriminator optimizer params']", 'self.d_loss', 'self.discriminator.vars', 'self.global_step'], {}), "(self.config['discriminator optimizer'], self.config\n ['discriminator optimizer params'], self.d_loss, self.discriminator.\n vars, self.global_step)\n", (4081, 4234), False, 'from netutils.optimizer import get_optimizer_by_config\n'), ((4346, 4535), 'netutils.optimizer.get_optimizer_by_config', 'get_optimizer_by_config', (["self.config['generator optimizer']", "self.config['generator optimizer params']", 'self.g_loss', 'self.generator.vars', 'self.global_step', 'self.global_step_update'], {}), "(self.config['generator optimizer'], self.config[\n 'generator optimizer params'], self.g_loss, self.generator.vars, self.\n global_step, self.global_step_update)\n", (4369, 4535), False, 'from netutils.optimizer import get_optimizer_by_config\n'), ((4606, 4704), 'tensorflow.train.Saver', 'tf.train.Saver', (['(self.discriminator.store_vars + self.generator.store_vars + [self.global_step]\n )'], {}), '(self.discriminator.store_vars + self.generator.store_vars +\n [self.global_step])\n', (4620, 4704), True, 'import tensorflow as tf\n'), ((5005, 5031), 'tensorflow.summary.merge', 'tf.summary.merge', (['sum_list'], {}), '(sum_list)\n', (5021, 5031), True, 'import tensorflow as tf\n'), ((5218, 5244), 'tensorflow.summary.merge', 'tf.summary.merge', (['sum_list'], {}), '(sum_list)\n', (5234, 5244), True, 'import tensorflow as tf\n'), ((5549, 5575), 'tensorflow.summary.merge', 'tf.summary.merge', (['sum_list'], {}), '(sum_list)\n', (5565, 5575), True, 'import tensorflow as tf\n'), ((6508, 6553), 'numpy.random.randn', 'np.random.randn', (['x_batch.shape[0]', 'self.z_dim'], {}), '(x_batch.shape[0], self.z_dim)\n', (6523, 6553), True, 'import numpy as np\n'), ((4848, 4900), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""discriminator/loss"""', 'self.d_loss'], {}), "('discriminator/loss', self.d_loss)\n", (4865, 4900), True, 'import tensorflow as tf\n'), ((4921, 4980), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""discriminator/lr"""', 'self.d_learning_rate'], {}), "('discriminator/lr', self.d_learning_rate)\n", (4938, 4980), True, 'import tensorflow as tf\n'), ((5069, 5117), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""generator/loss"""', 'self.g_loss'], {}), "('generator/loss', self.g_loss)\n", (5086, 5117), True, 'import tensorflow as tf\n'), ((5138, 5193), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""generator/lr"""', 'self.g_learning_rate'], {}), "('generator/lr', self.g_learning_rate)\n", (5155, 5193), True, 'import tensorflow as tf\n'), ((5335, 5389), 'tensorflow.summary.histogram', 'tf.summary.histogram', (["('discriminator/' + var.name)", 'var'], {}), "('discriminator/' + var.name, var)\n", (5355, 5389), True, 'import tensorflow as tf\n'), ((5440, 5490), 'tensorflow.summary.histogram', 'tf.summary.histogram', (["('generator/' + var.name)", 'var'], {}), "('generator/' + var.name, var)\n", (5460, 5490), True, 'import tensorflow as tf\n'), ((6098, 6143), 'numpy.random.randn', 'np.random.randn', (['x_batch.shape[0]', 'self.z_dim'], {}), '(x_batch.shape[0], self.z_dim)\n', (6113, 6143), True, 'import numpy as np\n')] |
import sys
from pathlib import Path
import h5py
import minydra
import numpy as np
from tqdm import tqdm
sys.path.append(str(Path(__file__).resolve().parent.parent))
from aiphysim.utils import dat_to_array, new_unique_path, resolve # noqa: E402
def label_file(file_path, delay_fs=0.2):
delay_path = file_path.parent
t3_path = delay_path.parent
intensity_path = t3_path.parent
dataset_path = intensity_path.parent
delay_idx = int(delay_path.name)
t3_value = float(t3_path.name.split("_t2_")[-1])
intensity = float(intensity_path.name.split("spectrum_Intensity_")[-1])
dataset = dataset_path.name
return {
"delay": (delay_idx - 1) * delay_fs,
"t3": t3_value,
"i": intensity,
"set": dataset,
}
def sample_files(path, datasets, i1, t3, ignore_delays):
"""
Create a list of all paths to DYNAMICSdat files as per the
training sets (datasets), intensity (i1) and t3 delays found in path.
within those folders (path/dataset/intensity/t3) there are 500 files
named 1 to 500. `ignore_delays` is going to select every nth files
like:
keep_indices = np.arange(1, 501, ignore_delays)
eg file path:
/network/tmp1/schmidtv/perovai/training_set1/spectrum_Intensity_.02104/spectrum_Intensity_.02104_t2_20.0/481/DYNAMICSdat noqa: E501
<------------ path ----------><-- dataset -><---- Intensity 02104 ---><------------- t3 20 -----------><idx><--file-->
Args:
path (Path or str): base path for the datasets
datasets (list(str)): The datasets to explore
i1 (list(str)): Intensity values to select
t3 (list(str)): The t3 delays to select
ignore_delays (int): the step of the range to select files
"""
print("\n" + "-" * 50 + "\n")
if datasets == "all":
datasets = [d for d in path.iterdir() if d.is_dir()]
else:
if isinstance(datasets, str):
datasets = [datasets]
datasets = [resolve(path / d) for d in datasets]
ignoring = [d for d in datasets if not d.exists()]
if ignoring:
print(
"Warning! Those datasets do not exist:\n"
+ "\n".join(list(map(str, ignoring)))
)
print("datasets: ", sorted(set([d.name for d in datasets])))
if i1 == "all":
i1s = [resolve(i) for d in datasets for i in d.glob("spectrum_Intensity_*")]
else:
if not isinstance(i1, list):
i1 = [i1]
i1s = [
resolve(t)
for d in datasets
for i in i1
for t in d.glob(f"spectrum_Intensity_*{i}*")
]
print("intensities: ", sorted(set([i.name for i in i1s])))
if t3 == "all":
t3s = [resolve(i) for i1 in i1s for i in i1.glob("spectrum_Intensity_*")]
else:
if not isinstance(t3, list):
t3 = [t3]
t3s = [
resolve(t)
for i1 in i1s
for i in t3
for t in i1.glob(f"spectrum_Intensity_*_t2_{i}.0")
]
print(
"t3s: ", sorted(set([t.name.replace("spectrum_Intensity_", "") for t in t3s]))
)
keep_indices = np.arange(1, 501, ignore_delays)
print("delays: ", list(keep_indices))
files = [
resolve(t3 / str(delay) / "DYNAMICSdat") for t3 in t3s for delay in keep_indices
]
print(">>> Found", len(files), "files.")
return files
if __name__ == "__main__":
# run subsample_density_dataset.py path=/network/tmp1/schmidtv/perovai datasets=training_set1 i1___str="02104" ignore_delays=10
parser = minydra.Parser()
args = parser.args.resolve()
# -----------------------------
# ----- PARSE ARGUMENTS -----
# -----------------------------
if "path" in args:
path = resolve(args.path)
assert path.exists()
else:
raise ValueError("Provide a base path: `path=X`")
if "datasets" in args:
datasets = args.datasets
else:
datasets = "all"
if "i1" in args:
i1 = args.i1
else:
i1 = "all"
if "t3" in args:
t3 = args.t3
else:
t3 = "all"
if "ignore_delays" in args:
ignore_delays = args.ignore_delays
else:
if "y" not in input(
"No `ignore_delays` was provided. All 500 delays will be used. "
+ "Ok? [y/n] "
):
print("Aborting")
sys.exit()
ignore_delays = 1
print("\n" + "-" * 50 + "\n")
out = None
if "out" not in args:
if "y" not in input(
">> WARNING `out=X` is not provided. Using $SLURM_TMPDIR. Ok? [y/n]"
):
print("Aborting")
sys.exit()
slurm_tmpdir = resolve("/Tmp/slurm.$SLURM_JOB_ID.0")
assert slurm_tmpdir.exists()
out = new_unique_path(slurm_tmpdir / "mini-dataset.h5")
else:
out = resolve(args.out)
if out.is_dir():
out = new_unique_path(out / "mini-dataset.h5")
print(f"`out` is a directory, using {str(out)}")
else:
if out.exists():
out = new_unique_path(out)
print(f"Warning: outfile {out.name} exists, using {str(out)}")
else:
print(f"Creating dataset: {str(out)}")
# ----------------------------
# ----- CREATE DATASET -----
# ----------------------------
files = sample_files(path, datasets, i1, t3, ignore_delays)
files = sorted(files, key=lambda x: str(x))
print(f"Writing {len(files)} to {str(out)}")
with h5py.File(str(out), "w-") as f5:
f5.attrs.update(dict(args))
for i, f in tqdm(enumerate(files)):
labels = label_file(f)
data = dat_to_array(f, shape=3)
d = f5.create_dataset(
f"trajectory_{i}",
data=data,
dtype="f",
compression="gzip",
compression_opts=2,
)
d.attrs.update(labels)
print(f"\nDone! Data is in {str(out)}")
| [
"pathlib.Path",
"aiphysim.utils.dat_to_array",
"sys.exit",
"aiphysim.utils.resolve",
"aiphysim.utils.new_unique_path",
"numpy.arange",
"minydra.Parser"
] | [((3164, 3196), 'numpy.arange', 'np.arange', (['(1)', '(501)', 'ignore_delays'], {}), '(1, 501, ignore_delays)\n', (3173, 3196), True, 'import numpy as np\n'), ((3590, 3606), 'minydra.Parser', 'minydra.Parser', ([], {}), '()\n', (3604, 3606), False, 'import minydra\n'), ((3788, 3806), 'aiphysim.utils.resolve', 'resolve', (['args.path'], {}), '(args.path)\n', (3795, 3806), False, 'from aiphysim.utils import dat_to_array, new_unique_path, resolve\n'), ((4727, 4764), 'aiphysim.utils.resolve', 'resolve', (['"""/Tmp/slurm.$SLURM_JOB_ID.0"""'], {}), "('/Tmp/slurm.$SLURM_JOB_ID.0')\n", (4734, 4764), False, 'from aiphysim.utils import dat_to_array, new_unique_path, resolve\n'), ((4816, 4865), 'aiphysim.utils.new_unique_path', 'new_unique_path', (["(slurm_tmpdir / 'mini-dataset.h5')"], {}), "(slurm_tmpdir / 'mini-dataset.h5')\n", (4831, 4865), False, 'from aiphysim.utils import dat_to_array, new_unique_path, resolve\n'), ((4890, 4907), 'aiphysim.utils.resolve', 'resolve', (['args.out'], {}), '(args.out)\n', (4897, 4907), False, 'from aiphysim.utils import dat_to_array, new_unique_path, resolve\n'), ((1980, 1997), 'aiphysim.utils.resolve', 'resolve', (['(path / d)'], {}), '(path / d)\n', (1987, 1997), False, 'from aiphysim.utils import dat_to_array, new_unique_path, resolve\n'), ((2343, 2353), 'aiphysim.utils.resolve', 'resolve', (['i'], {}), '(i)\n', (2350, 2353), False, 'from aiphysim.utils import dat_to_array, new_unique_path, resolve\n'), ((2510, 2520), 'aiphysim.utils.resolve', 'resolve', (['t'], {}), '(t)\n', (2517, 2520), False, 'from aiphysim.utils import dat_to_array, new_unique_path, resolve\n'), ((2741, 2751), 'aiphysim.utils.resolve', 'resolve', (['i'], {}), '(i)\n', (2748, 2751), False, 'from aiphysim.utils import dat_to_array, new_unique_path, resolve\n'), ((2905, 2915), 'aiphysim.utils.resolve', 'resolve', (['t'], {}), '(t)\n', (2912, 2915), False, 'from aiphysim.utils import dat_to_array, new_unique_path, resolve\n'), ((4416, 4426), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4424, 4426), False, 'import sys\n'), ((4693, 4703), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4701, 4703), False, 'import sys\n'), ((4951, 4991), 'aiphysim.utils.new_unique_path', 'new_unique_path', (["(out / 'mini-dataset.h5')"], {}), "(out / 'mini-dataset.h5')\n", (4966, 4991), False, 'from aiphysim.utils import dat_to_array, new_unique_path, resolve\n'), ((5738, 5762), 'aiphysim.utils.dat_to_array', 'dat_to_array', (['f'], {'shape': '(3)'}), '(f, shape=3)\n', (5750, 5762), False, 'from aiphysim.utils import dat_to_array, new_unique_path, resolve\n'), ((5118, 5138), 'aiphysim.utils.new_unique_path', 'new_unique_path', (['out'], {}), '(out)\n', (5133, 5138), False, 'from aiphysim.utils import dat_to_array, new_unique_path, resolve\n'), ((126, 140), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (130, 140), False, 'from pathlib import Path\n')] |
import os
import numpy as np
from array import array
from sklearn.metrics import mean_absolute_error
from skmultiflow.data import RegressionGenerator
from skmultiflow.trees import HoeffdingTreeRegressor
from difflib import SequenceMatcher
def test_hoeffding_tree_regressor():
stream = RegressionGenerator(n_samples=500, n_features=20, n_informative=15, random_state=1)
learner = HoeffdingTreeRegressor(leaf_prediction='mean')
cnt = 0
max_samples = 500
y_pred = array('d')
y_true = array('d')
wait_samples = 10
while cnt < max_samples:
X, y = stream.next_sample()
# Test every n samples
if (cnt % wait_samples == 0) and (cnt != 0):
y_pred.append(learner.predict(X)[0])
y_true.append(y[0])
learner.partial_fit(X, y)
cnt += 1
expected_predictions = array('d', [102.38946041769101, 55.6584574987656, 5.746076599168373, 17.11797209372667,
2.566888222752787, 9.188247802192826, 17.87894804676911, 15.940629626883966,
8.981172175448485, 13.152624115190092, 11.106058099429399, 6.473195313058236,
4.723621479590173, 13.825568609556493, 8.698873073880696, 1.6452441811010252,
5.123496188584294, 6.34387187194982, 5.9977733790395105, 6.874251577667707,
4.605348088338317, 8.20112636572672, 9.032631648758098, 4.428189978974459,
4.249801041367518, 9.983272668044492, 12.859518508979734, 11.741395774380285,
11.230028410261868, 9.126921979081521, 9.132146661688296, 7.750655625124709,
6.445145118245414, 5.760928671876355, 4.041291302080659, 3.591837600560529,
0.7640424010500604, 0.1738639840537784, 2.2068337802212286, -81.05302946841077,
96.17757415335177, -77.35894903819677, 95.85568683733698, 99.1981674250886,
99.89327888035015, 101.66673013734784, -79.1904234513751, -80.42952143783687,
100.63954789983896])
assert np.allclose(y_pred, expected_predictions)
error = mean_absolute_error(y_true, y_pred)
expected_error = 143.11351404083086
assert np.isclose(error, expected_error)
expected_info = "HoeffdingTreeRegressor(binary_split=False, grace_period=200, leaf_prediction='mean', " \
"learning_ratio_const=True, learning_ratio_decay=0.001, learning_ratio_perceptron=0.02, " \
"max_byte_size=33554432, memory_estimate_period=1000000, nb_threshold=0, no_preprune=False, " \
"nominal_attributes=None, random_state=None, remove_poor_atts=False, split_confidence=1e-07, " \
"stop_mem_management=False, tie_threshold=0.05)"
info = " ".join([line.strip() for line in learner.get_info().split()])
assert info == expected_info
assert isinstance(learner.get_model_description(), type(''))
assert type(learner.predict(X)) == np.ndarray
def test_hoeffding_tree_regressor_perceptron():
stream = RegressionGenerator(n_samples=500, n_features=20, n_informative=15, random_state=1)
learner = HoeffdingTreeRegressor(leaf_prediction='perceptron', random_state=1)
cnt = 0
max_samples = 500
y_pred = array('d')
y_true = array('d')
wait_samples = 10
while cnt < max_samples:
X, y = stream.next_sample()
# Test every n samples
if (cnt % wait_samples == 0) and (cnt != 0):
y_pred.append(learner.predict(X)[0])
y_true.append(y[0])
learner.partial_fit(X, y)
cnt += 1
expected_predictions = array('d', [525.7553636732247, 352.8160300365902, 224.80744320456478,
193.72837054292074, 132.6059603765031, 117.06974933197759,
114.53342429855932, 89.37195405567235, 57.85335051891305,
60.00883955911155, 47.263185779784266, 25.17616431074491,
17.43259526890146, 47.33468996498019, 22.83975208548138,
-7.659282840823236, 8.564101665071064, 14.61585289361161,
11.560941733770441, 13.70120291865976, 1.1938438210799651,
19.01970713481836, 21.23459424444584, -5.667473522309328,
-5.203149619381393, 28.726275200889173, 41.03406433337882,
27.950322712127267, 21.267116786963925, 5.53344652490152,
6.753264259267268, -2.3288137435962213, -10.492766334689875,
-11.19641058176631, -20.134685945295644, -19.36581990084085,
-38.26894947177957, -34.90246284430353, -11.019543212232008,
-22.016714766708127, -18.710456277443544, -20.5568019328217,
-2.636583876625667, 24.787714491718187, 29.325261678088406,
45.31267371823666, -48.271054430207776, -59.7649172085901,
48.22724814037523])
# assert np.allclose(y_pred, expected_predictions)
error = mean_absolute_error(y_true, y_pred)
expected_error = 152.12931270533377
assert np.isclose(error, expected_error)
expected_info = "HoeffdingTreeRegressor(binary_split=False, grace_period=200, leaf_prediction='perceptron', " \
"learning_ratio_const=True, learning_ratio_decay=0.001, learning_ratio_perceptron=0.02, " \
"max_byte_size=33554432, memory_estimate_period=1000000, nb_threshold=0, no_preprune=False, " \
"nominal_attributes=None, random_state=1, remove_poor_atts=False, split_confidence=1e-07, " \
"stop_mem_management=False, tie_threshold=0.05)"
info = " ".join([line.strip() for line in learner.get_info().split()])
assert info == expected_info
assert isinstance(learner.get_model_description(), type(''))
assert type(learner.predict(X)) == np.ndarray
def test_hoeffding_tree_regressor_coverage(test_path):
# Cover nominal attribute observer
test_file = os.path.join(test_path, 'regression_data.npz')
data = np.load(test_file)
X = data['X']
y = data['y']
# Typo in leaf prediction
learner = HoeffdingTreeRegressor(
leaf_prediction='percptron', nominal_attributes=[i for i in range(3)]
)
print(learner.split_criterion)
# Invalid split_criterion
learner.split_criterion = 'VR'
learner.partial_fit(X, y)
assert learner._estimator_type == 'regressor'
def test_hoeffding_tree_regressor_model_description():
stream = RegressionGenerator(
n_samples=500, n_features=20, n_informative=15, random_state=1
)
learner = HoeffdingTreeRegressor(leaf_prediction='mean')
max_samples = 500
X, y = stream.next_sample(max_samples)
learner.partial_fit(X, y)
expected_description = "if Attribute 6 <= 0.1394515530995348:\n" \
" Leaf = Statistics {0: 276.0000, 1: -21537.4157, 2: 11399392.2187}\n" \
"if Attribute 6 > 0.1394515530995348:\n" \
" Leaf = Statistics {0: 224.0000, 1: 22964.8868, 2: 10433581.2534}\n"
assert SequenceMatcher(
None, expected_description, learner.get_model_description()
).ratio() > 0.9
def test_hoeffding_tree_regressor_categorical_features(test_path):
data_path = os.path.join(test_path, 'ht_categorical_features_testcase.npy')
stream = np.load(data_path)
# Remove class value
stream = stream[:, np.delete(np.arange(8), 7)]
# Removes the last column (used only in the multi-target regression case)
stream = stream[:, :-1]
X, y = stream[:, :-1], stream[:, -1]
nominal_attr_idx = np.arange(7).tolist()
learner = HoeffdingTreeRegressor(nominal_attributes=nominal_attr_idx)
learner.partial_fit(X, y)
expected_description = "if Attribute 4 = 0.0:\n" \
" Leaf = Statistics {0: 606.0000, 1: 1212.0000, 2: 3626.0000}\n" \
"if Attribute 4 = 1.0:\n" \
" Leaf = Statistics {0: 551.0000, 1: 1128.0000, 2: 3400.0000}\n" \
"if Attribute 4 = 2.0:\n" \
" Leaf = Statistics {0: 566.0000, 1: 1139.0000, 2: 3423.0000}\n" \
"if Attribute 4 = 3.0:\n" \
" Leaf = Statistics {0: 577.0000, 1: 1138.0000, 2: 3374.0000}\n" \
"if Attribute 4 = 4.0:\n" \
" Leaf = Statistics {0: 620.0000, 1: 1233.0000, 2: 3725.0000}\n" \
"if Attribute 4 = -3.0:\n" \
" Leaf = Statistics {0: 80.0000, 1: 163.0000, 2: 483.0000}\n"
assert SequenceMatcher(
None, expected_description, learner.get_model_description()
).ratio() > 0.9
| [
"numpy.allclose",
"numpy.isclose",
"array.array",
"os.path.join",
"skmultiflow.trees.HoeffdingTreeRegressor",
"sklearn.metrics.mean_absolute_error",
"skmultiflow.data.RegressionGenerator",
"numpy.load",
"numpy.arange"
] | [((291, 378), 'skmultiflow.data.RegressionGenerator', 'RegressionGenerator', ([], {'n_samples': '(500)', 'n_features': '(20)', 'n_informative': '(15)', 'random_state': '(1)'}), '(n_samples=500, n_features=20, n_informative=15,\n random_state=1)\n', (310, 378), False, 'from skmultiflow.data import RegressionGenerator\n'), ((390, 436), 'skmultiflow.trees.HoeffdingTreeRegressor', 'HoeffdingTreeRegressor', ([], {'leaf_prediction': '"""mean"""'}), "(leaf_prediction='mean')\n", (412, 436), False, 'from skmultiflow.trees import HoeffdingTreeRegressor\n'), ((485, 495), 'array.array', 'array', (['"""d"""'], {}), "('d')\n", (490, 495), False, 'from array import array\n'), ((509, 519), 'array.array', 'array', (['"""d"""'], {}), "('d')\n", (514, 519), False, 'from array import array\n'), ((852, 1889), 'array.array', 'array', (['"""d"""', '[102.38946041769101, 55.6584574987656, 5.746076599168373, 17.11797209372667,\n 2.566888222752787, 9.188247802192826, 17.87894804676911, \n 15.940629626883966, 8.981172175448485, 13.152624115190092, \n 11.106058099429399, 6.473195313058236, 4.723621479590173, \n 13.825568609556493, 8.698873073880696, 1.6452441811010252, \n 5.123496188584294, 6.34387187194982, 5.9977733790395105, \n 6.874251577667707, 4.605348088338317, 8.20112636572672, \n 9.032631648758098, 4.428189978974459, 4.249801041367518, \n 9.983272668044492, 12.859518508979734, 11.741395774380285, \n 11.230028410261868, 9.126921979081521, 9.132146661688296, \n 7.750655625124709, 6.445145118245414, 5.760928671876355, \n 4.041291302080659, 3.591837600560529, 0.7640424010500604, \n 0.1738639840537784, 2.2068337802212286, -81.05302946841077, \n 96.17757415335177, -77.35894903819677, 95.85568683733698, \n 99.1981674250886, 99.89327888035015, 101.66673013734784, -\n 79.1904234513751, -80.42952143783687, 100.63954789983896]'], {}), "('d', [102.38946041769101, 55.6584574987656, 5.746076599168373, \n 17.11797209372667, 2.566888222752787, 9.188247802192826, \n 17.87894804676911, 15.940629626883966, 8.981172175448485, \n 13.152624115190092, 11.106058099429399, 6.473195313058236, \n 4.723621479590173, 13.825568609556493, 8.698873073880696, \n 1.6452441811010252, 5.123496188584294, 6.34387187194982, \n 5.9977733790395105, 6.874251577667707, 4.605348088338317, \n 8.20112636572672, 9.032631648758098, 4.428189978974459, \n 4.249801041367518, 9.983272668044492, 12.859518508979734, \n 11.741395774380285, 11.230028410261868, 9.126921979081521, \n 9.132146661688296, 7.750655625124709, 6.445145118245414, \n 5.760928671876355, 4.041291302080659, 3.591837600560529, \n 0.7640424010500604, 0.1738639840537784, 2.2068337802212286, -\n 81.05302946841077, 96.17757415335177, -77.35894903819677, \n 95.85568683733698, 99.1981674250886, 99.89327888035015, \n 101.66673013734784, -79.1904234513751, -80.42952143783687, \n 100.63954789983896])\n", (857, 1889), False, 'from array import array\n'), ((2289, 2330), 'numpy.allclose', 'np.allclose', (['y_pred', 'expected_predictions'], {}), '(y_pred, expected_predictions)\n', (2300, 2330), True, 'import numpy as np\n'), ((2344, 2379), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2363, 2379), False, 'from sklearn.metrics import mean_absolute_error\n'), ((2431, 2464), 'numpy.isclose', 'np.isclose', (['error', 'expected_error'], {}), '(error, expected_error)\n', (2441, 2464), True, 'import numpy as np\n'), ((3277, 3364), 'skmultiflow.data.RegressionGenerator', 'RegressionGenerator', ([], {'n_samples': '(500)', 'n_features': '(20)', 'n_informative': '(15)', 'random_state': '(1)'}), '(n_samples=500, n_features=20, n_informative=15,\n random_state=1)\n', (3296, 3364), False, 'from skmultiflow.data import RegressionGenerator\n'), ((3376, 3444), 'skmultiflow.trees.HoeffdingTreeRegressor', 'HoeffdingTreeRegressor', ([], {'leaf_prediction': '"""perceptron"""', 'random_state': '(1)'}), "(leaf_prediction='perceptron', random_state=1)\n", (3398, 3444), False, 'from skmultiflow.trees import HoeffdingTreeRegressor\n'), ((3493, 3503), 'array.array', 'array', (['"""d"""'], {}), "('d')\n", (3498, 3503), False, 'from array import array\n'), ((3517, 3527), 'array.array', 'array', (['"""d"""'], {}), "('d')\n", (3522, 3527), False, 'from array import array\n'), ((3860, 4916), 'array.array', 'array', (['"""d"""', '[525.7553636732247, 352.8160300365902, 224.80744320456478, \n 193.72837054292074, 132.6059603765031, 117.06974933197759, \n 114.53342429855932, 89.37195405567235, 57.85335051891305, \n 60.00883955911155, 47.263185779784266, 25.17616431074491, \n 17.43259526890146, 47.33468996498019, 22.83975208548138, -\n 7.659282840823236, 8.564101665071064, 14.61585289361161, \n 11.560941733770441, 13.70120291865976, 1.1938438210799651, \n 19.01970713481836, 21.23459424444584, -5.667473522309328, -\n 5.203149619381393, 28.726275200889173, 41.03406433337882, \n 27.950322712127267, 21.267116786963925, 5.53344652490152, \n 6.753264259267268, -2.3288137435962213, -10.492766334689875, -\n 11.19641058176631, -20.134685945295644, -19.36581990084085, -\n 38.26894947177957, -34.90246284430353, -11.019543212232008, -\n 22.016714766708127, -18.710456277443544, -20.5568019328217, -\n 2.636583876625667, 24.787714491718187, 29.325261678088406, \n 45.31267371823666, -48.271054430207776, -59.7649172085901, \n 48.22724814037523]'], {}), "('d', [525.7553636732247, 352.8160300365902, 224.80744320456478, \n 193.72837054292074, 132.6059603765031, 117.06974933197759, \n 114.53342429855932, 89.37195405567235, 57.85335051891305, \n 60.00883955911155, 47.263185779784266, 25.17616431074491, \n 17.43259526890146, 47.33468996498019, 22.83975208548138, -\n 7.659282840823236, 8.564101665071064, 14.61585289361161, \n 11.560941733770441, 13.70120291865976, 1.1938438210799651, \n 19.01970713481836, 21.23459424444584, -5.667473522309328, -\n 5.203149619381393, 28.726275200889173, 41.03406433337882, \n 27.950322712127267, 21.267116786963925, 5.53344652490152, \n 6.753264259267268, -2.3288137435962213, -10.492766334689875, -\n 11.19641058176631, -20.134685945295644, -19.36581990084085, -\n 38.26894947177957, -34.90246284430353, -11.019543212232008, -\n 22.016714766708127, -18.710456277443544, -20.5568019328217, -\n 2.636583876625667, 24.787714491718187, 29.325261678088406, \n 45.31267371823666, -48.271054430207776, -59.7649172085901, \n 48.22724814037523])\n", (3865, 4916), False, 'from array import array\n'), ((5529, 5564), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (5548, 5564), False, 'from sklearn.metrics import mean_absolute_error\n'), ((5616, 5649), 'numpy.isclose', 'np.isclose', (['error', 'expected_error'], {}), '(error, expected_error)\n', (5626, 5649), True, 'import numpy as np\n'), ((6514, 6560), 'os.path.join', 'os.path.join', (['test_path', '"""regression_data.npz"""'], {}), "(test_path, 'regression_data.npz')\n", (6526, 6560), False, 'import os\n'), ((6572, 6590), 'numpy.load', 'np.load', (['test_file'], {}), '(test_file)\n', (6579, 6590), True, 'import numpy as np\n'), ((7031, 7118), 'skmultiflow.data.RegressionGenerator', 'RegressionGenerator', ([], {'n_samples': '(500)', 'n_features': '(20)', 'n_informative': '(15)', 'random_state': '(1)'}), '(n_samples=500, n_features=20, n_informative=15,\n random_state=1)\n', (7050, 7118), False, 'from skmultiflow.data import RegressionGenerator\n'), ((7144, 7190), 'skmultiflow.trees.HoeffdingTreeRegressor', 'HoeffdingTreeRegressor', ([], {'leaf_prediction': '"""mean"""'}), "(leaf_prediction='mean')\n", (7166, 7190), False, 'from skmultiflow.trees import HoeffdingTreeRegressor\n'), ((7830, 7893), 'os.path.join', 'os.path.join', (['test_path', '"""ht_categorical_features_testcase.npy"""'], {}), "(test_path, 'ht_categorical_features_testcase.npy')\n", (7842, 7893), False, 'import os\n'), ((7907, 7925), 'numpy.load', 'np.load', (['data_path'], {}), '(data_path)\n', (7914, 7925), True, 'import numpy as np\n'), ((8210, 8269), 'skmultiflow.trees.HoeffdingTreeRegressor', 'HoeffdingTreeRegressor', ([], {'nominal_attributes': 'nominal_attr_idx'}), '(nominal_attributes=nominal_attr_idx)\n', (8232, 8269), False, 'from skmultiflow.trees import HoeffdingTreeRegressor\n'), ((8174, 8186), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (8183, 8186), True, 'import numpy as np\n'), ((7985, 7997), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (7994, 7997), True, 'import numpy as np\n')] |
# Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utils of BERT Modules.
"""
import json
import os
from abc import ABC
from typing import Any, Dict
import torch
from texar.torch.modules.pretrained.pretrained_base import PretrainedMixin
__all__ = [
"PretrainedBERTMixin",
]
_BERT_PATH = "https://storage.googleapis.com/bert_models/"
class PretrainedBERTMixin(PretrainedMixin, ABC):
r"""A mixin class to support loading pre-trained checkpoints for modules
that implement the BERT model.
The BERT model was proposed in
`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_
by `Devlin et al.` It is a bidirectional Transformer model pre-trained
on a large corpus.
.. _`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`:
https://arxiv.org/abs/1810.04805
"""
_MODEL_NAME = "BERT"
_MODEL2URL = {
'bert-base-uncased':
_BERT_PATH + "2018_10_18/uncased_L-12_H-768_A-12.zip",
'bert-large-uncased':
_BERT_PATH + "2018_10_18/uncased_L-24_H-1024_A-16.zip",
'bert-base-cased':
_BERT_PATH + "2018_10_18/cased_L-12_H-768_A-12.zip",
'bert-large-cased':
_BERT_PATH + "2018_10_18/cased_L-24_H-1024_A-16.zip",
'bert-base-multilingual-uncased':
_BERT_PATH + "2018_11_23/multi_cased_L-12_H-768_A-12.zip",
'bert-base-multilingual-cased':
_BERT_PATH + "2018_11_03/multilingual_L-12_H-768_A-12.zip",
'bert-base-chinese':
_BERT_PATH + "2018_11_03/chinese_L-12_H-768_A-12.zip",
}
@classmethod
def _transform_config(cls, cache_dir: str) -> Dict[str, Any]:
info = list(os.walk(cache_dir))
root, _, files = info[0]
config_path = None
for file in files:
if file.endswith('config.json'):
config_path = os.path.join(root, file)
if config_path is None:
raise ValueError(f"Cannot find the config file in {cache_dir}")
with open(config_path) as f:
config_ckpt = json.loads(f.read())
hidden_dim = config_ckpt['hidden_size']
configs = {
'hidden_size': hidden_dim,
'embed': {
'name': 'word_embeddings',
'dim': hidden_dim
},
'vocab_size': config_ckpt['vocab_size'],
'segment_embed': {
'name': 'token_type_embeddings',
'dim': hidden_dim
},
'type_vocab_size': config_ckpt['type_vocab_size'],
'position_embed': {
'name': 'position_embeddings',
'dim': hidden_dim
},
'position_size': config_ckpt['max_position_embeddings'],
'encoder': {
'name': 'encoder',
'embedding_dropout': config_ckpt['hidden_dropout_prob'],
'num_blocks': config_ckpt['num_hidden_layers'],
'multihead_attention': {
'use_bias': True,
'num_units': hidden_dim,
'num_heads': config_ckpt['num_attention_heads'],
'output_dim': hidden_dim,
'dropout_rate': config_ckpt['attention_probs_dropout_prob'],
'name': 'self'
},
'residual_dropout': config_ckpt['hidden_dropout_prob'],
'dim': hidden_dim,
'use_bert_config': True,
'poswise_feedforward': {
"layers": [{
'type': 'Linear',
'kwargs': {
'in_features': hidden_dim,
'out_features': config_ckpt['intermediate_size'],
'bias': True,
}
}, {
'type': 'Bert' + config_ckpt['hidden_act'].upper()
}, {
'type': 'Linear',
'kwargs': {
'in_features': config_ckpt['intermediate_size'],
'out_features': hidden_dim,
'bias': True,
}
}],
},
}
}
return configs
def _init_from_checkpoint(self, cache_dir: str, **kwargs):
try:
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading TensorFlow models in PyTorch requires installing "
"TensorFlow. Please see https://www.tensorflow.org/install/ "
"for installation instructions.")
raise
global_tensor_map = {
'bert/embeddings/word_embeddings': 'word_embedder._embedding',
'bert/embeddings/token_type_embeddings':
'segment_embedder._embedding',
'bert/embeddings/position_embeddings':
'position_embedder._embedding',
'bert/embeddings/LayerNorm/beta':
'encoder.input_normalizer.bias',
'bert/embeddings/LayerNorm/gamma':
'encoder.input_normalizer.weight',
}
layer_tensor_map = {
"attention/self/key/bias": "self_attns.{}.K_dense.bias",
"attention/self/query/bias": "self_attns.{}.Q_dense.bias",
"attention/self/value/bias": "self_attns.{}.V_dense.bias",
"attention/output/dense/bias": "self_attns.{}.O_dense.bias",
"attention/output/LayerNorm/gamma": "poswise_layer_norm.{}.weight",
"attention/output/LayerNorm/beta": "poswise_layer_norm.{}.bias",
"intermediate/dense/bias": "poswise_networks.{}._layers.0.bias",
"output/dense/bias": "poswise_networks.{}._layers.2.bias",
"output/LayerNorm/gamma": "output_layer_norm.{}.weight",
"output/LayerNorm/beta": "output_layer_norm.{}.bias",
}
layer_transpose_map = {
"attention/self/key/kernel": "self_attns.{}.K_dense.weight",
"attention/self/query/kernel": "self_attns.{}.Q_dense.weight",
"attention/self/value/kernel": "self_attns.{}.V_dense.weight",
"attention/output/dense/kernel": "self_attns.{}.O_dense.weight",
"intermediate/dense/kernel": "poswise_networks.{}._layers.0.weight",
"output/dense/kernel": "poswise_networks.{}._layers.2.weight",
}
pooler_map = {
'bert/pooler/dense/bias': 'pooler.0.bias',
'bert/pooler/dense/kernel': 'pooler.0.weight'
}
tf_path = os.path.abspath(os.path.join(cache_dir, 'bert_model.ckpt'))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tfnames, arrays = [], []
for name, _ in init_vars:
array = tf.train.load_variable(tf_path, name)
tfnames.append(name)
arrays.append(array.squeeze())
py_prefix = "encoder."
idx = 0
for name, array in zip(tfnames, arrays):
if name.startswith('cls'):
# ignore those variables begin with cls
continue
if name in global_tensor_map:
v_name = global_tensor_map[name]
pointer = self._name_to_variable(v_name)
assert pointer.shape == array.shape
pointer.data = torch.from_numpy(array)
idx += 1
elif name in pooler_map:
pointer = self._name_to_variable(pooler_map[name])
if name.endswith('bias'):
assert pointer.shape == array.shape
pointer.data = torch.from_numpy(array)
idx += 1
else:
array_t = np.transpose(array)
assert pointer.shape == array_t.shape
pointer.data = torch.from_numpy(array_t)
idx += 1
else:
# here name is the TensorFlow variable name
name_tmp = name.split("/")
# e.g. layer_
layer_no = name_tmp[2][6:]
name_tmp = "/".join(name_tmp[3:])
if name_tmp in layer_tensor_map:
v_name = layer_tensor_map[name_tmp].format(layer_no)
pointer = self._name_to_variable(py_prefix + v_name)
assert pointer.shape == array.shape
pointer.data = torch.from_numpy(array)
elif name_tmp in layer_transpose_map:
v_name = layer_transpose_map[name_tmp].format(layer_no)
pointer = self._name_to_variable(py_prefix + v_name)
array_t = np.transpose(array)
assert pointer.shape == array_t.shape
pointer.data = torch.from_numpy(array_t)
else:
raise NameError(f"Variable with name '{name}' not found")
idx += 1
| [
"tensorflow.train.load_variable",
"os.path.join",
"torch.from_numpy",
"tensorflow.train.list_variables",
"numpy.transpose",
"os.walk"
] | [((7364, 7396), 'tensorflow.train.list_variables', 'tf.train.list_variables', (['tf_path'], {}), '(tf_path)\n', (7387, 7396), True, 'import tensorflow as tf\n'), ((2282, 2300), 'os.walk', 'os.walk', (['cache_dir'], {}), '(cache_dir)\n', (2289, 2300), False, 'import os\n'), ((7263, 7305), 'os.path.join', 'os.path.join', (['cache_dir', '"""bert_model.ckpt"""'], {}), "(cache_dir, 'bert_model.ckpt')\n", (7275, 7305), False, 'import os\n'), ((7484, 7521), 'tensorflow.train.load_variable', 'tf.train.load_variable', (['tf_path', 'name'], {}), '(tf_path, name)\n', (7506, 7521), True, 'import tensorflow as tf\n'), ((2464, 2488), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (2476, 2488), False, 'import os\n'), ((8047, 8070), 'torch.from_numpy', 'torch.from_numpy', (['array'], {}), '(array)\n', (8063, 8070), False, 'import torch\n'), ((8333, 8356), 'torch.from_numpy', 'torch.from_numpy', (['array'], {}), '(array)\n', (8349, 8356), False, 'import torch\n'), ((8438, 8457), 'numpy.transpose', 'np.transpose', (['array'], {}), '(array)\n', (8450, 8457), True, 'import numpy as np\n'), ((8551, 8576), 'torch.from_numpy', 'torch.from_numpy', (['array_t'], {}), '(array_t)\n', (8567, 8576), False, 'import torch\n'), ((9136, 9159), 'torch.from_numpy', 'torch.from_numpy', (['array'], {}), '(array)\n', (9152, 9159), False, 'import torch\n'), ((9393, 9412), 'numpy.transpose', 'np.transpose', (['array'], {}), '(array)\n', (9405, 9412), True, 'import numpy as np\n'), ((9506, 9531), 'torch.from_numpy', 'torch.from_numpy', (['array_t'], {}), '(array_t)\n', (9522, 9531), False, 'import torch\n')] |
import warnings
warnings.filterwarnings("ignore")
import csv
import ast
import numpy as np
import pandas as pd
import statsmodels.api as sm
import math
#gem column codes: 0 = latitude, 1 = longitude, 2 = altitude, 3 = accuracy, 4 = department, 5 = municipality, 6 = household salary, 7 = JG2, 8 = age, 9 = JG6, 10 = gender
#total_towers_within_range.txt is the output of the total_towers_within_range function in network_analysis.py saved to a txt file
#below are what the codes for each column in gem represent for the data being used
#0 = latitude
#1 = longitude
#2 = altitude
#3 = accuracy
#4 = department
#5 = municipality
#6 = salary
#7 = generally, how do you find out what is going in the country (JG2)
#8 = age
#9 = If I had access to a better internet service, what would be the main use I would give it? In what activity would its use increase? (JG6)
#10 = gender
#11 = marital status
#12 = ethnicity
#13 = religious denomination
#14 = does your household have cell phone access
#15 = does your household have a landline
#16 = does your household have internet access
def open_csv(name):
with open(name) as f:
reader = csv.reader(f)
data = list(reader)
data = data[1:]
return data
def open_dict(name):
##open dictionary from a file, untested with not txt files
with open(name) as f:
reader = f.read()
data = ast.literal_eval(reader)
return data
def convert_dict_to_list(data):
modified_data = list()
for key in data.keys():
modified_data.append(data[key])
return modified_data
def initial_cleanup_gem_data(gem):
##cleans up gem data by removing entries with incomplete data
to_be_removed = list()
for element in gem:
try:
#checks if entry is complete by checking if the fields
#contain the correct data type
data_check = [float(element[0]), float(element[1]), float(element[2]), int(element[3])]
except ValueError:
to_be_removed.append(element)
while len(to_be_removed) > 0:
gem.remove(to_be_removed.pop())
return gem
def final_cleanup_data(gem, towers):
##cleans up gem data by removing entries who refused to answer relevant
##questions and also removes their data from the towers file
to_be_removed = list()
for index in range(0, len(gem)):
try:
if int(gem[index][6]) in [-1, -2]:
to_be_removed.append(index)
except ValueError:
pass
while len(to_be_removed) > 0:
index = to_be_removed.pop()
del gem[index]
del towers[index]
return gem, towers
def get_column(gem, column_num):
##from the gem data, get one of the columns
data = list()
for element in gem:
data.append(element[column_num])
return data
def convert_income_codes(income):
##convert coded income to the midpoint of the range of income
##they answered with
data = list()
values = {-2:"ref",-1:"idk",1:250,2:625,3:1175,4:1800,5:2250,6:2750,7:4000,8:7500,9:12500,10:17500,11:20000}
for index in range(0, len(income)):
data.append(values[int(income[index][0])])
return data
def flag_data(data, flagged_numbers):
##flag data in a column to do logistic regression, flagged numbers
##should be a list of numbers to be flagged
flags = {}
for element in data:
if element not in flags.keys():
if int(element) in flagged_numbers:
#the chosen values to flag as True, the values here currently mean that the
##person surveyed primarily uses internet/social media for information
flags[element] = True
else:
flags[element] = False
flagged_data = list()
for element in data:
if element in flags.keys():
flagged_data.append(flags[element])
return flagged_data
def log(data, base):
##apply log function to a list with the inputed base for the log
modified_data = list()
for element in data:
modified_data.append(math.log(float(element),base))
return modified_data
def stats(predictor, response, model):
##will apply the statistical model you enter to the variables inputed, the
##codes for each statistical model are viewable in the chain of if statements
predictor = np.asarray(predictor)
response = np.asarray(response)
if model == 'logit':
model = sm.Logit(predictor, response)
elif model == 'lsr':
model = sm.OLS(predictor, response)
elif model == "probit":
model = sm.Probit(predictor, response)
elif model == "gls":
model = sm.GLS(predictor, response)
elif model == "glsar":
model = sm.GLSAR(predictor, response)
elif model == "quantreg":
model = sm.QuantReg(predictor, response)
else:
pass
model = model.fit()
print(model.summary())
##instead of printing the model summary, should return
##the model with the predict function as printing it here only allows you to view the summary rather than use it for anything
def string_to_int(data):
##convert list of strings to list of integers
modified_data = list()
for element in data:
modified_data.append(int(element))
return modified_data
def combine_lists(data):
##a list of n lists, each of size m will turn into a list of m
##lists, each of size n - i.e. [[1, 2, 3], [1, 2, 3]] -> [[1, 1], [2, 2], [3, 3]]
combined_data = list()
for x in range(0, len(data[0])):
inner_list = list()
for y in range(0, len(data)):
inner_list.append(data[y][x])
combined_data.append(inner_list)
return combined_data
def main():
gem_data = open_csv("data\\gem_data.csv")
tower_data = open_dict("data\\total_towers_within_range.txt")
tower_data = convert_dict_to_list(tower_data)
gem = initial_cleanup_gem_data(gem_data)
gem, towers = final_cleanup_data(gem, tower_data)
income = get_column(gem, 6)
income = convert_income_codes(income)
internet_use = get_column(gem, 9)
internet_use = flag_data(internet_use, [4])
age = get_column(gem, 8)
age = string_to_int(age)
gender = get_column(gem, 10)
gender = string_to_int(gender)
gender = flag_data(gender, [1])
marital_status = get_column(gem, 11)
marital_status = string_to_int(marital_status)
marital_status = flag_data(marital_status, [2, 5])
religion = get_column(gem, 13)
religion = string_to_int(religion)
religion = flag_data(religion, [1, 2, 3])
cell_phone_access = get_column(gem, 14)
cell_phone_access = string_to_int(cell_phone_access)
cell_phone_access = flag_data(cell_phone_access, [6])
landline_access = get_column(gem, 15)
landline_access = string_to_int(landline_access)
landline_access = flag_data(landline_access, [1])
internet_access = get_column(gem, 16)
internet_access = string_to_int(internet_access)
internet_access = flag_data(internet_access, [1])
income_logged = log(income, 10)
towers_logged = log(towers, 10)
demographics = combine_lists([towers, income, age, gender, marital_status, religion])
##combines whatever variables you want into a list of lists so that it can be inputed
##into a model
demographics = sm.add_constant(demographics)
##stats(towers_logged, demographics, 'lsr') #does a least squares regression
##with the first variable as the predictor and the second variable as the response
##stats(towers_logged, demographics, 'gls')
##stats(towers_logged, demographics, 'glsar')
##stats(towers_logged, demographics, 'quantreg')
##stats(internet_access, demographics, 'logit')
##does a logistic regression with the first variable being the binary predictor
##and the second variable as the response
##stats(internet_access, demographics, 'probit')
main()
| [
"statsmodels.api.QuantReg",
"numpy.asarray",
"statsmodels.api.GLSAR",
"statsmodels.api.Probit",
"ast.literal_eval",
"statsmodels.api.OLS",
"statsmodels.api.add_constant",
"statsmodels.api.GLS",
"statsmodels.api.Logit",
"csv.reader",
"warnings.filterwarnings"
] | [((16, 49), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (39, 49), False, 'import warnings\n'), ((4356, 4377), 'numpy.asarray', 'np.asarray', (['predictor'], {}), '(predictor)\n', (4366, 4377), True, 'import numpy as np\n'), ((4393, 4413), 'numpy.asarray', 'np.asarray', (['response'], {}), '(response)\n', (4403, 4413), True, 'import numpy as np\n'), ((7362, 7391), 'statsmodels.api.add_constant', 'sm.add_constant', (['demographics'], {}), '(demographics)\n', (7377, 7391), True, 'import statsmodels.api as sm\n'), ((1147, 1160), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (1157, 1160), False, 'import csv\n'), ((1378, 1402), 'ast.literal_eval', 'ast.literal_eval', (['reader'], {}), '(reader)\n', (1394, 1402), False, 'import ast\n'), ((4455, 4484), 'statsmodels.api.Logit', 'sm.Logit', (['predictor', 'response'], {}), '(predictor, response)\n', (4463, 4484), True, 'import statsmodels.api as sm\n'), ((4526, 4553), 'statsmodels.api.OLS', 'sm.OLS', (['predictor', 'response'], {}), '(predictor, response)\n', (4532, 4553), True, 'import statsmodels.api as sm\n'), ((4598, 4628), 'statsmodels.api.Probit', 'sm.Probit', (['predictor', 'response'], {}), '(predictor, response)\n', (4607, 4628), True, 'import statsmodels.api as sm\n'), ((4670, 4697), 'statsmodels.api.GLS', 'sm.GLS', (['predictor', 'response'], {}), '(predictor, response)\n', (4676, 4697), True, 'import statsmodels.api as sm\n'), ((4741, 4770), 'statsmodels.api.GLSAR', 'sm.GLSAR', (['predictor', 'response'], {}), '(predictor, response)\n', (4749, 4770), True, 'import statsmodels.api as sm\n'), ((4817, 4849), 'statsmodels.api.QuantReg', 'sm.QuantReg', (['predictor', 'response'], {}), '(predictor, response)\n', (4828, 4849), True, 'import statsmodels.api as sm\n')] |
import argparse
import random
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
from mario.tokens import REPLACE_TOKENS
from mario.level_image_gen import LevelImageGen
from mario.special_mario_downsampling import special_mario_downsampling
from PCA_Detector import PCA_Detector, unify_shapes, divergence
################################################################################
# PyTorch
# use GPU if available
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device, end=' ')
if device.type == 'cuda':
print(f'({torch.cuda.get_device_name(0)})')
print('Memory Allocated:',
round(torch.cuda.memory_allocated(0)/1024**3, 1), 'GB')
print('Memory Cached: ',
round(torch.cuda.memory_reserved(0)/1024**3, 1), 'GB')
else:
print()
################################################################################
# TOAD-GAN
# command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--not_cuda", help="disables cuda",
action="store_true", default=0)
parser.add_argument("--seed", help="manual seed", type=int)
parser.add_argument("--input-dir", help="input image dir", default="input")
parser.add_argument("--input-name", help="input image name",
default="lvl_1-1.txt")
parser.add_argument("--patch-width", help="horizontal patch dimension",
default=7)
parser.add_argument("--patch-height", help="vertical patch dimension",
default=7)
parser.add_argument("--kernel-width", help="horizontal kernel dimension",
default=7)
parser.add_argument("--kernel-height", help="vertical kernel dimension",
default=7)
parser.add_argument("--conv-layers", help="number of convolutional layers",
default=1)
opt = parser.parse_args()
def set_seed(seed=0):
""" Set the seed for all possible sources of randomness to allow for reproduceability. """
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
np.random.seed(seed)
random.seed(seed)
# configure default state
opt.device = "cpu" if opt.not_cuda else device
if torch.cuda.is_available() and opt.not_cuda:
print("WARNING: CUDA device is present but disabled.")
if opt.seed is None:
opt.seed = random.randint(1, 10000)
print("Random Seed: ", opt.seed)
set_seed(opt.seed)
opt.ImgGen = LevelImageGen('mario/sprites')
def read_level(opt):
""" Wrapper function for read_level_from_file using namespace opt. Updates parameters for opt."""
level, uniques = read_level_from_file(opt.input_dir, opt.input_name)
opt.token_list = uniques
print("Tokens in {}/{}: {}".format(
opt.input_dir,
opt.input_name,
' '.join(opt.token_list)))
return level
def read_level_from_file(input_dir, input_name):
""" Returns a full token level tensor from a .txt file. Also returns the unique tokens found in this level.
Token. """
txt_level = load_level_from_text("%s/%s" % (input_dir, input_name))
uniques = set()
for line in txt_level:
for token in line:
# if token != "\n" and token != "M" and token != "F":
if token != "\n" and token not in REPLACE_TOKENS.items():
uniques.add(token)
uniques = list(uniques)
uniques.sort() # necessary! otherwise we won't know the token order later
oh_level = ascii_to_one_hot_level(txt_level, uniques)
return oh_level.unsqueeze(dim=0), uniques
def load_level_from_text(path_to_level_txt):
""" Loads an ascii level from a text file. """
with open(path_to_level_txt, "r") as f:
ascii_level = []
for line in f:
for token, replacement in REPLACE_TOKENS.items():
line = line.replace(token, replacement)
ascii_level.append(line)
return ascii_level
def ascii_to_one_hot_level(level, tokens):
""" Converts an ascii level to a full token level tensor. """
oh_level = torch.zeros((len(tokens), len(level), len(level[-1])))
for i in range(len(level)):
for j in range(len(level[-1])):
token = level[i][j]
if token in tokens and token != "\n":
oh_level[tokens.index(token), i, j] = 1
return oh_level
def one_hot_to_ascii_level(level, tokens):
""" Converts a full token level tensor to an ascii level. """
ascii_level = []
for i in range(level.shape[2]):
line = ""
for j in range(level.shape[3]):
line += tokens[level[:, :, i, j].argmax()]
if i < level.shape[2] - 1:
line += "\n"
ascii_level.append(line)
return ascii_level
################################################################################
# PCA_Detector
input_names = ['1-1', '1-2', '1-3', '2-1', '3-1', '3-3', '4-1',
'4-2', '5-1', '5-3', '6-1', '6-2', '6-3', '7-1', '8-1']
kernel_size = (2, 2)
# # render the real levels
# for input_name in input_names:
# opt.input_name = f'lvl_{input_name}.txt'
# real = read_level(opt).to(opt.device)
# ascii_real = one_hot_to_ascii_level(real, opt.token_list)
# real_level = opt.ImgGen.render(ascii_real)
# real_level.save(f'output/lvl_{input_name}.png', format='png')
def preprocess(level):
# remove the sky layer
sky_index = opt.token_list.index('-')
before_sky = level[:, :sky_index]
after_sky = level[:, sky_index+1:]
level = torch.cat((before_sky, after_sky), dim=1)
# Undo one-hot encoding
level = level.argmax(dim=1).unsqueeze(1).float()
return level
# Load all levels
reals = {}
for input_name in input_names:
opt.input_name = f'lvl_{input_name}.txt'
real = read_level(opt).to(opt.device)
reals[input_name] = preprocess(real)
# Build a PCA_Detector for each level
detectors = {}
for input_name in input_names:
real = reals[input_name]
detectors[input_name] = PCA_Detector(
opt, input_name, reals[input_name], kernel_size)
# Compute pairwise divergence
N = len(input_names)
data = np.zeros((N, N))
for i, input_name1 in enumerate(input_names):
for j, input_name2 in enumerate(input_names):
a = detectors[input_name1](reals[input_name1])
b = detectors[input_name1](reals[input_name2])
data[j, i] = divergence(a, b)
plt.imshow(data, interpolation='nearest', extent=[0, 2*N, 0, 2*N])
plt.xticks([2*i + 1 for i in range(N)], input_names, rotation=45)
plt.yticks([2*i + 1 for i in range(N)], reversed(input_names))
plt.colorbar(shrink=0.85)
plt.savefig(f'pca-detector-2d-intensities.png',
bbox_inches='tight', pad_inches=0.1)
plt.show()
# # visualize detector outputs
# for input_name1 in input_names:
# for input_name2 in input_names:
# detectors[input_name1].visualize(
# input_name2,
# reals[input_name2],
# rf'PCA_Detector_output\detector_{input_name1}_level_{input_name2}.png')
| [
"torch.cuda.is_available",
"matplotlib.pyplot.imshow",
"argparse.ArgumentParser",
"mario.level_image_gen.LevelImageGen",
"mario.tokens.REPLACE_TOKENS.items",
"torch.cuda.memory_reserved",
"numpy.random.seed",
"random.randint",
"matplotlib.pyplot.savefig",
"PCA_Detector.PCA_Detector",
"PCA_Detect... | [((1061, 1086), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1084, 1086), False, 'import argparse\n'), ((2737, 2767), 'mario.level_image_gen.LevelImageGen', 'LevelImageGen', (['"""mario/sprites"""'], {}), "('mario/sprites')\n", (2750, 2767), False, 'from mario.level_image_gen import LevelImageGen\n'), ((6390, 6406), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (6398, 6406), True, 'import numpy as np\n'), ((6652, 6722), 'matplotlib.pyplot.imshow', 'plt.imshow', (['data'], {'interpolation': '"""nearest"""', 'extent': '[0, 2 * N, 0, 2 * N]'}), "(data, interpolation='nearest', extent=[0, 2 * N, 0, 2 * N])\n", (6662, 6722), True, 'import matplotlib.pyplot as plt\n'), ((6849, 6874), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'shrink': '(0.85)'}), '(shrink=0.85)\n', (6861, 6874), True, 'import matplotlib.pyplot as plt\n'), ((6876, 6964), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""pca-detector-2d-intensities.png"""'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(f'pca-detector-2d-intensities.png', bbox_inches='tight',\n pad_inches=0.1)\n", (6887, 6964), True, 'import matplotlib.pyplot as plt\n'), ((6973, 6983), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6981, 6983), True, 'import matplotlib.pyplot as plt\n'), ((2105, 2128), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2122, 2128), False, 'import torch\n'), ((2136, 2161), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2159, 2161), False, 'import torch\n'), ((2387, 2407), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2401, 2407), True, 'import numpy as np\n'), ((2412, 2429), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2423, 2429), False, 'import random\n'), ((2508, 2533), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2531, 2533), False, 'import torch\n'), ((2647, 2671), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (2661, 2671), False, 'import random\n'), ((5788, 5829), 'torch.cat', 'torch.cat', (['(before_sky, after_sky)'], {'dim': '(1)'}), '((before_sky, after_sky), dim=1)\n', (5797, 5829), False, 'import torch\n'), ((6260, 6321), 'PCA_Detector.PCA_Detector', 'PCA_Detector', (['opt', 'input_name', 'reals[input_name]', 'kernel_size'], {}), '(opt, input_name, reals[input_name], kernel_size)\n', (6272, 6321), False, 'from PCA_Detector import PCA_Detector, unify_shapes, divergence\n'), ((569, 594), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (592, 594), False, 'import torch\n'), ((2171, 2199), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (2193, 2199), False, 'import torch\n'), ((2208, 2240), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (2234, 2240), False, 'import torch\n'), ((6634, 6650), 'PCA_Detector.divergence', 'divergence', (['a', 'b'], {}), '(a, b)\n', (6644, 6650), False, 'from PCA_Detector import PCA_Detector, unify_shapes, divergence\n'), ((4068, 4090), 'mario.tokens.REPLACE_TOKENS.items', 'REPLACE_TOKENS.items', ([], {}), '()\n', (4088, 4090), False, 'from mario.tokens import REPLACE_TOKENS\n'), ((688, 717), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['(0)'], {}), '(0)\n', (714, 717), False, 'import torch\n'), ((769, 799), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', (['(0)'], {}), '(0)\n', (796, 799), False, 'import torch\n'), ((866, 895), 'torch.cuda.memory_reserved', 'torch.cuda.memory_reserved', (['(0)'], {}), '(0)\n', (892, 895), False, 'import torch\n'), ((3570, 3592), 'mario.tokens.REPLACE_TOKENS.items', 'REPLACE_TOKENS.items', ([], {}), '()\n', (3590, 3592), False, 'from mario.tokens import REPLACE_TOKENS\n')] |
# Copyright 2020 GreenWaves Technologies, SAS
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from abc import ABC, abstractmethod, abstractclassmethod
import numpy as np
import importlib
class JsonSerializable(ABC):
@abstractmethod
def _encapsulate(self):
raise NotImplementedError("JsonSerializable must implement _encapsulate()")
@abstractclassmethod
def _dencapsulate(cls, val):
raise NotImplementedError("JsonSerializable must implement _dencapsulate()")
@classmethod
def dencapsulate(cls, val):
return cls._dencapsulate(val)
def to_dict(self):
return {
'__type': 'JsonSerializable',
'__module_name': self.__class__.__module__,
'__class_name': self.__class__.__name__,
'__contents': self._encapsulate()
}
@classmethod
def from_dict(cls: 'JsonSerializable', val):
assert val['__type'] == 'JsonSerializable', 'no a JsonSerializable'
json_module = importlib.import_module(val['__module_name'])
json_class = getattr(json_module, val['__class_name'])
return json_class.dencapsulate(val['__contents'])
class JsonSerializableStateEncoder(json.JSONEncoder):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# pylint: disable=no-self-use, method-hidden
def default(self, o):
if isinstance(o, JsonSerializable):
return o.to_dict()
if isinstance(o, np.integer):
return int(o)
if isinstance(o, np.floating):
return float(o)
if isinstance(o, np.ndarray):
return {
'__type': 'numpy.ndarray',
'__contents': o.tolist(),
'__dtype': o.dtype.name
}
# Let the base class default method raise the
try:
return json.JSONEncoder.default(self, o)
except TypeError as err:
raise err
def prepare(self, obj):
if isinstance(obj, dict):
# if we have non string keys then encode as a list
if any(not isinstance(k, str) for k in obj.keys()):
return {
'__type': 'dict',
'__module_name': obj.__class__.__module__,
'__class_name': obj.__class__.__name__,
'__contents': [(self.prepare(k), self.prepare(v)) for k, v in obj.items()]
}
else:
return {k: self.prepare(v) for k, v in obj.items()}
if isinstance(obj, (list, tuple)):
return [self.prepare(v) for v in obj]
if isinstance(obj, (str, bool, float, int)) or obj is None:
return obj
return self.default(obj)
def iterencode(self, obj, **kwargs):
return super(JsonSerializableStateEncoder, self).iterencode(self.prepare(obj), **kwargs)
class JsonSerializableStateDecoder(json.JSONDecoder):
def __init__(self, *args, object_hook=None, **kwargs):
if object_hook is None:
super(JsonSerializableStateDecoder, self).__init__(object_hook=self.object_hook, *args, **kwargs)
else:
super(JsonSerializableStateDecoder, self).__init__(object_hook=object_hook, *args, **kwargs)
# pylint: disable=no-self-use, method-hidden
def object_hook(self, obj):
if '__type' in obj:
if obj['__type'] == 'dict':
json_module = importlib.import_module(obj['__module_name'])
json_class = getattr(json_module, obj['__class_name'])
return json_class(tuple(elem) for elem in obj['__contents'])
if obj['__type'] == 'numpy.ndarray':
return np.array(obj['__contents'], dtype=np.dtype(obj['__dtype']))
if obj['__type'] == 'JsonSerializable':
return JsonSerializable.from_dict(obj)
return obj
| [
"numpy.dtype",
"importlib.import_module",
"json.JSONEncoder.default"
] | [((1501, 1546), 'importlib.import_module', 'importlib.import_module', (["val['__module_name']"], {}), "(val['__module_name'])\n", (1524, 1546), False, 'import importlib\n'), ((2371, 2404), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'o'], {}), '(self, o)\n', (2395, 2404), False, 'import json\n'), ((3942, 3987), 'importlib.import_module', 'importlib.import_module', (["obj['__module_name']"], {}), "(obj['__module_name'])\n", (3965, 3987), False, 'import importlib\n'), ((4242, 4266), 'numpy.dtype', 'np.dtype', (["obj['__dtype']"], {}), "(obj['__dtype'])\n", (4250, 4266), True, 'import numpy as np\n')] |
import os
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default='../../../data/mask', help='path to the dataset')
parser.add_argument('--output', type=str, default='../../../data/edge_connect_flist/mask.flist', help='path to the file list')
args = parser.parse_args()
ext = {'.JPG', '.JPEG', '.PNG', '.TIF', 'TIFF'}
images = []
for root, dirs, files in os.walk(args.path):
# ------------------------------------------------------------
# root 所指的是当前正在遍历的这个文件夹的本身的地址
# dirs 是一个 list ,内容是该文件夹中所有的目录的名字(不包括子目录)
# files 同样是 list , 内容是该文件夹中所有的文件(不包括子目录)
# ------------------------------------------------------------
print('loading ' + root)
for file in files:
if os.path.splitext(file)[1].upper() in ext:
# -----------------------------------------------
# os.path.splitext():
# 分离文件名与扩展名;默认返回(fname,fextension)元组
# -----------------------------------------------
images.append(os.path.join(root, file))
images = sorted(images)
np.savetxt(args.output, images, fmt='%s')
# -------------------
# np.savetxt()
# 将array保存到txt文件
# ------------------- | [
"argparse.ArgumentParser",
"os.path.splitext",
"os.path.join",
"numpy.savetxt",
"os.walk"
] | [((55, 80), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (78, 80), False, 'import argparse\n'), ((420, 438), 'os.walk', 'os.walk', (['args.path'], {}), '(args.path)\n', (427, 438), False, 'import os\n'), ((1088, 1129), 'numpy.savetxt', 'np.savetxt', (['args.output', 'images'], {'fmt': '"""%s"""'}), "(args.output, images, fmt='%s')\n", (1098, 1129), True, 'import numpy as np\n'), ((1037, 1061), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (1049, 1061), False, 'import os\n'), ((762, 784), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (778, 784), False, 'import os\n')] |
from director import visualization as vis
from director import consoleapp
from director import vtkAll as vtk
import numpy as np
app = consoleapp.ConsoleApp()
view = app.createView()
view.show()
t = vtk.vtkTransform()
obj = vis.showFrame(t, 'frame')
obj.setProperty('Trace', True)
# move the frame along a spiral to create a path trace
for theta in np.linspace(0, 30, 1000):
p1 = np.array(t.GetPosition())
p2 = np.array([theta*0.03, np.sin(theta)*0.1, np.cos(theta)*0.1])
t.Translate(p2 - p1)
t.Modified()
view.resetCamera()
app.start()
| [
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"director.vtkAll.vtkTransform",
"director.consoleapp.ConsoleApp",
"director.visualization.showFrame"
] | [((135, 158), 'director.consoleapp.ConsoleApp', 'consoleapp.ConsoleApp', ([], {}), '()\n', (156, 158), False, 'from director import consoleapp\n'), ((200, 218), 'director.vtkAll.vtkTransform', 'vtk.vtkTransform', ([], {}), '()\n', (216, 218), True, 'from director import vtkAll as vtk\n'), ((225, 250), 'director.visualization.showFrame', 'vis.showFrame', (['t', '"""frame"""'], {}), "(t, 'frame')\n", (238, 250), True, 'from director import visualization as vis\n'), ((352, 376), 'numpy.linspace', 'np.linspace', (['(0)', '(30)', '(1000)'], {}), '(0, 30, 1000)\n', (363, 376), True, 'import numpy as np\n'), ((444, 457), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (450, 457), True, 'import numpy as np\n'), ((463, 476), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (469, 476), True, 'import numpy as np\n')] |
import json
import os
import os.path
import sys
from datetime import datetime
from pathlib import Path
from typing import Dict, List, NamedTuple, Optional, Tuple, Union
import napari.utils.theme
import numpy as np
from napari.resources import get_stylesheet
from napari.utils.theme import template as napari_template
from qtpy.QtCore import QObject, Signal
from PartSeg.common_backend.partially_const_dict import PartiallyConstDict
from PartSegCore.color_image import ColorMap, default_colormap_dict, default_label_dict
from PartSegCore.color_image.base_colors import starting_colors
from PartSegCore.io_utils import HistoryElement, load_metadata_base
from PartSegCore.json_hooks import ProfileDict, ProfileEncoder, check_loaded_dict
from PartSegCore.project_info import ProjectInfoBase
from PartSegCore.segmentation.algorithm_base import AdditionalLayerDescription
from PartSegCore.segmentation_info import SegmentationInfo
from PartSegImage import Image
class ImageSettings(QObject):
"""
Base class for all PartSeg settings. Keeps information about current Image.
"""
image_changed = Signal([Image], [int], [str])
image_spacing_changed = Signal()
""":py:class:`Signal` ``([Image], [int], [str])`` emitted when image has changed"""
segmentation_changed = Signal(SegmentationInfo)
"""
:py:class:`.Signal`
emitted when segmentation has changed
"""
segmentation_clean = Signal()
additional_layers_changed = Signal()
def __init__(self):
super().__init__()
self._image: Optional[Image] = None
self._image_path = ""
self._image_spacing = 210, 70, 70
self._segmentation_info = SegmentationInfo(None)
self._additional_layers = {}
@property
def full_segmentation(self):
raise AttributeError("full_segmentation not supported")
@full_segmentation.setter
def full_segmentation(self, val): # pylint: disable=R0201
raise AttributeError("full_segmentation not supported")
@property
def noise_remove_image_part(self):
raise AttributeError("full_segmentation not supported")
@noise_remove_image_part.setter
def noise_remove_image_part(self, val): # pylint: disable=R0201
raise AttributeError("full_segmentation not supported")
@property
def additional_layers(self) -> Dict[str, AdditionalLayerDescription]:
return self._additional_layers
@additional_layers.setter
def additional_layers(self, val: Dict[str, AdditionalLayerDescription]):
self._additional_layers = val
self.additional_layers_changed.emit()
@property
def image_spacing(self):
""":py:meth:`Image.spacing` proxy"""
if self._image is not None:
return self._image.spacing
return ()
def is_image_2d(self):
""":py:meth:`Image.is_2d` proxy"""
return self._image is None or self._image.is_2d
@image_spacing.setter
def image_spacing(self, value):
if len(value) not in [2, 3]:
raise ValueError(f"value parameter should have length 2 or 3. Current length is {len(value)}.")
if len(value) == 2:
self._image.set_spacing(tuple([self._image.spacing[0]] + list(value)))
else:
self._image.set_spacing(value)
self.image_spacing_changed.emit()
@property
def segmentation(self) -> np.ndarray:
"""current segmentation"""
return self._segmentation_info.segmentation
@property
def segmentation_info(self) -> SegmentationInfo:
return self._segmentation_info
@segmentation.setter
def segmentation(self, val: np.ndarray):
if val is not None:
try:
val = self.image.fit_array_to_image(val)
except ValueError:
raise ValueError("Segmentation do not fit to image")
self._segmentation_info = SegmentationInfo(val)
if val is not None:
self.segmentation_changed.emit(self._segmentation_info)
else:
self.segmentation_clean.emit()
@property
def sizes(self):
return self._segmentation_info.sizes
@property
def image(self):
return self._image
@image.setter
def image(self, value: Image):
if value is None:
return
self._image = value
if value.file_path is not None:
self.image_changed[str].emit(value.file_path)
self._image_changed()
self._segmentation_info = SegmentationInfo(None)
self.image_changed.emit(self._image)
self.image_changed[int].emit(self._image.channels)
@property
def has_channels(self):
return self._image.channels > 1
def _image_changed(self):
pass
@property
def image_path(self):
if self.image is not None:
return self._image.file_path
return ""
@property
def image_shape(self):
if self.image is not None:
return self._image.shape
return ()
@image_path.setter
def image_path(self, value):
self._image_path = value
self.image_changed[str].emmit(self._image_path)
@property
def channels(self):
if self._image is None:
return 0
return self._image.channels
def get_information(self, *pos):
return self._image[pos]
def components_mask(self):
return np.array([0] + [1] * np.max(self.segmentation), dtype=np.uint8)
class ColormapDict(PartiallyConstDict[ColorMap]):
"""
Dict for mixing custom colormap with predefined ones
"""
const_item_dict = default_colormap_dict
"""
Non removable items for this dict. Current value is :py:data:`default_colormap_dict`
"""
@property
def colormap_removed(self):
"""
Signal that colormap is removed form dict
"""
return self.item_removed
@property
def colormap_added(self):
"""
Signal that colormap is added to dict
"""
return self.item_added
class LabelColorDict(PartiallyConstDict[list]):
"""
Dict for mixing custom label colors with predefined ones`
"""
const_item_dict = default_label_dict
"""Non removable items for this dict. Current value is :py:data:`default_label_dict`"""
def get_array(self, key: str) -> np.ndarray:
"""Get labels as numpy array"""
return np.array(self[key][0], dtype=np.uint8)
class ViewSettings(ImageSettings):
colormap_changes = Signal()
labels_changed = Signal()
theme_changed = Signal()
def __init__(self):
super().__init__()
self.color_map = []
self.border_val = []
self.current_profile_dict = "default"
self.view_settings_dict = ProfileDict()
self.colormap_dict = ColormapDict(self.get_from_profile("custom_colormap", {}))
self.label_color_dict = LabelColorDict(self.get_from_profile("custom_label_colors", {}))
self.cached_labels: Optional[Tuple[str, np.ndarray]] = None
@property
def theme_name(self) -> str:
return self.get_from_profile("theme", "light")
@property
def style_sheet(self):
palette = napari.utils.theme.palettes[self.theme_name]
palette["canvas"] = "black"
return napari_template(get_stylesheet(), **palette)
@theme_name.setter
def theme_name(self, value: str):
if value not in napari.utils.theme.palettes:
raise ValueError(f"Unsupported theme {value}. Supported one: {self.theme_list()}")
if value == self.theme_name:
return
self.set_in_profile("theme", value)
self.theme_changed.emit()
@staticmethod
def theme_list():
return list(napari.utils.theme.palettes.keys())
@property
def chosen_colormap(self):
data = self.get_from_profile("colormaps", starting_colors[:])
res = [x for x in data if x in self.colormap_dict]
if len(res) != data:
if len(res) == 0:
res = starting_colors[:]
self.set_in_profile("colormaps", res)
return res
@chosen_colormap.setter
def chosen_colormap(self, val):
self.set_in_profile("colormaps", val)
self.colormap_changes.emit()
@property
def current_labels(self):
return self.get_from_profile("labels_used", "default")
@current_labels.setter
def current_labels(self, val):
if val not in self.label_color_dict:
raise ValueError(f"Unknown label scheme name '{val}'")
self.set_in_profile("labels_used", val)
self.labels_changed.emit()
@property
def label_colors(self):
key = self.current_labels
if key not in self.label_color_dict:
key = "default"
if not (self.cached_labels and key == self.cached_labels[0]):
self.cached_labels = key, self.label_color_dict.get_array(key)
return self.cached_labels[1]
def chosen_colormap_change(self, name, visibility):
colormaps = set(self.chosen_colormap)
if visibility:
colormaps.add(name)
else:
try:
colormaps.remove(name)
except KeyError:
pass
# TODO update sorting rule
self.chosen_colormap = list(sorted(colormaps, key=self.colormap_dict.get_position))
def get_channel_info(self, view: str, num: int, default: Optional[str] = None) -> List[str]:
cm = self.chosen_colormap
if default is None:
default = cm[num % len(cm)]
resp = self.get_from_profile(f"{view}.cmap{num}", default)
if resp not in self.colormap_dict:
resp = cm[num % len(cm)]
self.set_in_profile(f"{view}.cmap{num}", resp)
return resp
def set_channel_info(self, view: str, num, value: str):
self.set_in_profile(f"{view}.cmap{num}", value)
@property
def available_colormaps(self):
return list(self.colormap_dict.keys())
def _image_changed(self):
self.border_val = self.image.get_ranges()
super()._image_changed()
def change_profile(self, name):
self.current_profile_dict = name
if self.current_profile_dict not in self.view_settings_dict:
self.view_settings_dict = {self.current_profile_dict: ProfileDict()}
def set_in_profile(self, key_path, value):
"""
Function for saving information used in visualization. This is accessor to
:py:meth:`~.ProfileDict.set` of inner variable.
:param key_path: dot separated path
:param value: value to store. The value need to be json serializable. """
self.view_settings_dict.set(f"{self.current_profile_dict}.{key_path}", value)
def get_from_profile(self, key_path, default=None):
"""
Function for getting information used in visualization. This is accessor to
:py:meth:`~.ProfileDict.get` of inner variable.
:param key_path: dot separated path
:param default: default value if key is missed
"""
return self.view_settings_dict.get(f"{self.current_profile_dict}.{key_path}", default)
def dump_view_profiles(self):
# return json.dumps(self.profile_dict, cls=ProfileEncoder)
return self.view_settings_dict
class SaveSettingsDescription(NamedTuple):
file_name: str
values: Union[dict, ProfileDict]
class BaseSettings(ViewSettings):
"""
:ivar json_folder_path: default location for saving/loading settings data
:ivar last_executed_algorithm: name of last executed algorithm.
:cvar save_locations_keys: list of names of distinct save location.
location are stored in "io"
"""
mask_changed = Signal()
mask_representation_changed = Signal()
""":py:class:`~.Signal` mask changed signal"""
json_encoder_class = ProfileEncoder
load_metadata = staticmethod(load_metadata_base)
algorithm_changed = Signal()
""":py:class:`~.Signal` emitted when current algorithm should be changed"""
save_locations_keys = []
def __init__(self, json_path):
super().__init__()
self.current_segmentation_dict = "default"
self.segmentation_dict = ProfileDict()
self.json_folder_path = json_path
self.last_executed_algorithm = ""
self.history: List[HistoryElement] = []
self.history_index = -1
def mask_representation_changed_emit(self):
self.mask_representation_changed.emit()
def add_history_element(self, elem: HistoryElement) -> None:
self.history_index += 1
if self.history_index < len(self.history) and self.cmp_history_element(elem, self.history[self.history_index]):
self.history[self.history_index] = elem
else:
self.history = self.history[: self.history_index]
self.history.append(elem)
def history_size(self) -> int:
return self.history_index + 1
def history_redo_size(self) -> int:
if self.history_index + 1 == len(self.history):
return 0
return len(self.history[self.history_index + 1 :])
def history_redo_clean(self) -> None:
self.history = self.history[: self.history_size()]
def history_current_element(self) -> HistoryElement:
return self.history[self.history_index]
def history_next_element(self) -> HistoryElement:
return self.history[self.history_index + 1]
def history_pop(self) -> Optional[HistoryElement]:
if self.history_index != -1:
self.history_index -= 1
return self.history[self.history_index + 1]
return None
def set_history(self, history: List[HistoryElement]):
self.history = history
self.history_index = len(self.history) - 1
def get_history(self) -> List[HistoryElement]:
return self.history[: self.history_index + 1]
@staticmethod
def cmp_history_element(el1, el2):
return False
@property
def mask(self):
return self._image.mask
@mask.setter
def mask(self, value):
try:
self._image.set_mask(value)
self.mask_changed.emit()
except ValueError:
raise ValueError("mask do not fit to image")
def get_save_list(self) -> List[SaveSettingsDescription]:
"""List of files in which program save the state."""
return [
SaveSettingsDescription("segmentation_settings.json", self.segmentation_dict),
SaveSettingsDescription("view_settings.json", self.view_settings_dict),
]
def get_path_history(self) -> List[str]:
"""
return list containing last 10 elements added with :py:meth:`.add_path_history` and
last opened in each category form :py:attr:`save_location_keys`
"""
res = self.get("io.history", [])[:]
for name in self.save_locations_keys:
val = self.get("io." + name, str(Path.home()))
if val not in res:
res = res + [val]
return res
def add_path_history(self, dir_path: str):
"""Save path in history of visited directories. Store only 10 last"""
history: List[str] = self.get("io.history", [])
try:
history.remove(dir_path)
except ValueError:
history = history[:9]
self.set("io.history", [dir_path] + history[-9:])
def set(self, key_path: str, value):
"""
function for saving general state (not visualization). This is accessor to
:py:meth:`~.ProfileDict.set` of inner variable.
:param key_path: dot separated path
:param value: value to store. The value need to be json serializable.
"""
self.segmentation_dict.set(f"{self.current_segmentation_dict}.{key_path}", value)
def get(self, key_path: str, default=None):
"""
Function for getting general state (not visualization). This is accessor to
:py:meth:`~.ProfileDict.get` of inner variable.
:param key_path: dot separated path
:param default: default value if key is missed
"""
return self.segmentation_dict.get(f"{self.current_segmentation_dict}.{key_path}", default)
def dump_part(self, file_path, path_in_dict, names=None):
data = self.get(path_in_dict)
if names is not None:
data = {name: data[name] for name in names}
with open(file_path, "w") as ff:
json.dump(data, ff, cls=self.json_encoder_class, indent=2)
def load_part(self, file_path):
data = self.load_metadata(file_path)
bad_key = []
if isinstance(data, dict):
if not check_loaded_dict(data):
for k, v in data.items():
if not check_loaded_dict(v):
bad_key.append(k)
for el in bad_key:
del data[el]
elif isinstance(data, ProfileDict):
if not data.verify_data():
bad_key = data.filter_data()
return data, bad_key
def dump(self, folder_path: Optional[str] = None):
"""
Save current application settings to disc.
:param folder_path: path to directory in which data should be saved.
If is None then use :py:attr:`.json_folder_path`
"""
if folder_path is None:
folder_path = self.json_folder_path
if not os.path.exists(folder_path):
os.makedirs(folder_path)
errors_list = []
for el in self.get_save_list():
try:
dump_string = json.dumps(el.values, cls=self.json_encoder_class, indent=2)
with open(os.path.join(folder_path, el.file_name), "w") as ff:
ff.write(dump_string)
except Exception as e:
errors_list.append((e, os.path.join(folder_path, el.file_name)))
if errors_list:
print(errors_list, file=sys.stderr)
return errors_list
def load(self, folder_path: Optional[str] = None):
"""
Load settings state from given directory
:param folder_path: path to directory in which data should be saved.
If is None then use :py:attr:`.json_folder_path`
"""
if folder_path is None:
folder_path = self.json_folder_path
errors_list = []
for el in self.get_save_list():
file_path = os.path.join(folder_path, el.file_name)
if not os.path.exists(file_path):
continue
error = False
try:
data: ProfileDict = self.load_metadata(file_path)
if not data.verify_data():
errors_list.append((file_path, data.filter_data()))
error = True
el.values.update(data)
except Exception as e:
error = True
errors_list.append((file_path, e))
finally:
if error:
timestamp = datetime.today().strftime("%Y-%m-%d_%H_%M_%S")
base_path, ext = os.path.splitext(file_path)
os.rename(file_path, base_path + "_" + timestamp + ext)
if errors_list:
print(errors_list, file=sys.stderr)
return errors_list
def get_project_info(self) -> ProjectInfoBase:
"""Get all information needed to save project"""
raise NotImplementedError
def set_project_info(self, data: ProjectInfoBase):
"""Set project info"""
raise NotImplementedError
@staticmethod
def verify_image(image: Image, silent=True) -> Union[Image, bool]:
if image.is_time:
if image.is_stack:
raise TimeAndStackException()
if silent:
return image.swap_time_and_stack()
else:
raise SwapTimeStackException()
return True
class SwapTimeStackException(Exception):
"""
Exception which inform that current image shape is not supported,
but can be if time and stack axes were swapped
"""
class TimeAndStackException(Exception):
"""
Exception which inform that current image has both time
and stack dat which is not supported
"""
| [
"os.path.exists",
"PartSegCore.segmentation_info.SegmentationInfo",
"os.makedirs",
"qtpy.QtCore.Signal",
"napari.resources.get_stylesheet",
"PartSegCore.json_hooks.check_loaded_dict",
"os.path.join",
"json.dumps",
"pathlib.Path.home",
"PartSegCore.json_hooks.ProfileDict",
"numpy.max",
"numpy.a... | [((1107, 1136), 'qtpy.QtCore.Signal', 'Signal', (['[Image]', '[int]', '[str]'], {}), '([Image], [int], [str])\n', (1113, 1136), False, 'from qtpy.QtCore import QObject, Signal\n'), ((1165, 1173), 'qtpy.QtCore.Signal', 'Signal', ([], {}), '()\n', (1171, 1173), False, 'from qtpy.QtCore import QObject, Signal\n'), ((1289, 1313), 'qtpy.QtCore.Signal', 'Signal', (['SegmentationInfo'], {}), '(SegmentationInfo)\n', (1295, 1313), False, 'from qtpy.QtCore import QObject, Signal\n'), ((1421, 1429), 'qtpy.QtCore.Signal', 'Signal', ([], {}), '()\n', (1427, 1429), False, 'from qtpy.QtCore import QObject, Signal\n'), ((1462, 1470), 'qtpy.QtCore.Signal', 'Signal', ([], {}), '()\n', (1468, 1470), False, 'from qtpy.QtCore import QObject, Signal\n'), ((6522, 6530), 'qtpy.QtCore.Signal', 'Signal', ([], {}), '()\n', (6528, 6530), False, 'from qtpy.QtCore import QObject, Signal\n'), ((6552, 6560), 'qtpy.QtCore.Signal', 'Signal', ([], {}), '()\n', (6558, 6560), False, 'from qtpy.QtCore import QObject, Signal\n'), ((6581, 6589), 'qtpy.QtCore.Signal', 'Signal', ([], {}), '()\n', (6587, 6589), False, 'from qtpy.QtCore import QObject, Signal\n'), ((11767, 11775), 'qtpy.QtCore.Signal', 'Signal', ([], {}), '()\n', (11773, 11775), False, 'from qtpy.QtCore import QObject, Signal\n'), ((11810, 11818), 'qtpy.QtCore.Signal', 'Signal', ([], {}), '()\n', (11816, 11818), False, 'from qtpy.QtCore import QObject, Signal\n'), ((11987, 11995), 'qtpy.QtCore.Signal', 'Signal', ([], {}), '()\n', (11993, 11995), False, 'from qtpy.QtCore import QObject, Signal\n'), ((1673, 1695), 'PartSegCore.segmentation_info.SegmentationInfo', 'SegmentationInfo', (['None'], {}), '(None)\n', (1689, 1695), False, 'from PartSegCore.segmentation_info import SegmentationInfo\n'), ((3896, 3917), 'PartSegCore.segmentation_info.SegmentationInfo', 'SegmentationInfo', (['val'], {}), '(val)\n', (3912, 3917), False, 'from PartSegCore.segmentation_info import SegmentationInfo\n'), ((4504, 4526), 'PartSegCore.segmentation_info.SegmentationInfo', 'SegmentationInfo', (['None'], {}), '(None)\n', (4520, 4526), False, 'from PartSegCore.segmentation_info import SegmentationInfo\n'), ((6423, 6461), 'numpy.array', 'np.array', (['self[key][0]'], {'dtype': 'np.uint8'}), '(self[key][0], dtype=np.uint8)\n', (6431, 6461), True, 'import numpy as np\n'), ((6779, 6792), 'PartSegCore.json_hooks.ProfileDict', 'ProfileDict', ([], {}), '()\n', (6790, 6792), False, 'from PartSegCore.json_hooks import ProfileDict, ProfileEncoder, check_loaded_dict\n'), ((12252, 12265), 'PartSegCore.json_hooks.ProfileDict', 'ProfileDict', ([], {}), '()\n', (12263, 12265), False, 'from PartSegCore.json_hooks import ProfileDict, ProfileEncoder, check_loaded_dict\n'), ((7321, 7337), 'napari.resources.get_stylesheet', 'get_stylesheet', ([], {}), '()\n', (7335, 7337), False, 'from napari.resources import get_stylesheet\n'), ((16510, 16568), 'json.dump', 'json.dump', (['data', 'ff'], {'cls': 'self.json_encoder_class', 'indent': '(2)'}), '(data, ff, cls=self.json_encoder_class, indent=2)\n', (16519, 16568), False, 'import json\n'), ((17474, 17501), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (17488, 17501), False, 'import os\n'), ((17515, 17539), 'os.makedirs', 'os.makedirs', (['folder_path'], {}), '(folder_path)\n', (17526, 17539), False, 'import os\n'), ((18486, 18525), 'os.path.join', 'os.path.join', (['folder_path', 'el.file_name'], {}), '(folder_path, el.file_name)\n', (18498, 18525), False, 'import os\n'), ((10355, 10368), 'PartSegCore.json_hooks.ProfileDict', 'ProfileDict', ([], {}), '()\n', (10366, 10368), False, 'from PartSegCore.json_hooks import ProfileDict, ProfileEncoder, check_loaded_dict\n'), ((16726, 16749), 'PartSegCore.json_hooks.check_loaded_dict', 'check_loaded_dict', (['data'], {}), '(data)\n', (16743, 16749), False, 'from PartSegCore.json_hooks import ProfileDict, ProfileEncoder, check_loaded_dict\n'), ((17652, 17712), 'json.dumps', 'json.dumps', (['el.values'], {'cls': 'self.json_encoder_class', 'indent': '(2)'}), '(el.values, cls=self.json_encoder_class, indent=2)\n', (17662, 17712), False, 'import json\n'), ((18545, 18570), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (18559, 18570), False, 'import os\n'), ((5438, 5463), 'numpy.max', 'np.max', (['self.segmentation'], {}), '(self.segmentation)\n', (5444, 5463), True, 'import numpy as np\n'), ((14989, 15000), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (14998, 15000), False, 'from pathlib import Path\n'), ((19171, 19198), 'os.path.splitext', 'os.path.splitext', (['file_path'], {}), '(file_path)\n', (19187, 19198), False, 'import os\n'), ((19219, 19274), 'os.rename', 'os.rename', (['file_path', "(base_path + '_' + timestamp + ext)"], {}), "(file_path, base_path + '_' + timestamp + ext)\n", (19228, 19274), False, 'import os\n'), ((16820, 16840), 'PartSegCore.json_hooks.check_loaded_dict', 'check_loaded_dict', (['v'], {}), '(v)\n', (16837, 16840), False, 'from PartSegCore.json_hooks import ProfileDict, ProfileEncoder, check_loaded_dict\n'), ((17739, 17778), 'os.path.join', 'os.path.join', (['folder_path', 'el.file_name'], {}), '(folder_path, el.file_name)\n', (17751, 17778), False, 'import os\n'), ((17908, 17947), 'os.path.join', 'os.path.join', (['folder_path', 'el.file_name'], {}), '(folder_path, el.file_name)\n', (17920, 17947), False, 'import os\n'), ((19087, 19103), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (19101, 19103), False, 'from datetime import datetime\n')] |
import os
import numpy as np
import random
from sklearn.utils import shuffle
import scipy.io
import matlab.engine
import time
import glob
import argparse
from utils_noisescope import *
import logging
import joblib
random.seed(6666)
eng = matlab.engine.start_matlab()
def extract_fingerpint_via_clustering(all_res_paths, ground_truth_label,
thre_pce,
cluster_list_with_image_idx,
iter_round, img_dim, outlier_model_path, result_dir,
reduce_matrix=None, merged_cluster=None):
'''
Fingerprint Step 2 + 3.
:param all_res_paths: noise residuals of the test set.
:param ground_truth_label: gound truth labels for the test set.
:param thre_pce: T merge calibrated using function 'correlation_between_real_fps' in pipeline.py
:param cluster_list_with_image_idx: A list of residual clusters. Each cluster is a tuple, which includes residual indexes.
:param iter_round: clustering/merging iteration round
:param img_dim: image/residual dimension
:param outlier_model_path: fingerprint outlier detector
:param result_dir: save log, middle products like .mat files
:param logfile: log file
:param reduce_matrix: previous pair-wise correlation reused for this round of merging iteration
:param merged_cluster: Newly merged clusters from the last merging step
:return: ret_fake_cluster_list: A list of fake (model) clusters flagged; ret_cluster_list_with_image_idx: residual indexs in the flagged clusters
'''
logging.info("++++++++++PERFORM THE NEXT MERGING ITERATION++++++++++++\n")
logging.info('Currently, there are {} clusters\n'.format(len(
cluster_list_with_image_idx))) # cluster_list_with_image_idx show the latest cluster distribution and clusters
for cluster_with_img_idx in cluster_list_with_image_idx:
if len(cluster_with_img_idx) > 10:
fake_purity = compute_cluster_fake_purity(cluster_with_img_idx, ground_truth_label)
logging.info(
'This cluster has {} images with a fake purity: {} \n'.format(len(cluster_with_img_idx), fake_purity))
num_cluster = len(cluster_list_with_image_idx)
### calculate PCE matrix ###
if iter_round > 0:
pce_matrix = np.full((num_cluster, num_cluster), 0, dtype=float)
pce_matrix[0:num_cluster - len(merged_cluster), 0: num_cluster - len(merged_cluster)] = reduce_matrix # 98, 98
eng.get_pce_matrix_iterate(all_res_paths, cluster_list_with_image_idx, len(merged_cluster), img_dim,
result_dir,
iter_round)
new_pce_matrix = scipy.io.loadmat(result_dir + '{}_partial.mat'.format(iter_round))
pce_matrix[num_cluster - len(merged_cluster):, :] = np.array(new_pce_matrix['matrix'])
else:
t1 = time.time()
eng.get_pce_matrix_noise_average(all_res_paths, cluster_list_with_image_idx, result_dir, iter_round,
img_dim)
t2 = time.time()
logging.info('The first iteration takes {} seconds. \n'.format(t2 - t1))
pce_matrix = scipy.io.loadmat(result_dir + '{}.mat'.format(iter_round))
pce_matrix = np.array(pce_matrix['matrix'])
large_pce_pos_array = np.where(pce_matrix > thre_pce)
x_axis_idx = large_pce_pos_array[0]
y_axis_idx = large_pce_pos_array[1]
logging.info("{} pairs in the matrix is larger than the threshold. \n".format(len(list(x_axis_idx))))
# return cases for early stopping
sorted_cluster_list_with_image_idx = sorted(cluster_list_with_image_idx, key=len, reverse=True)
# if len(sorted_cluster_list_with_image_idx[0]) > 200: # if we have a big cluster >200, we test it
if len(sorted_cluster_list_with_image_idx[
0]) > 150: # if we have a big cluster > 150, we start the early stopping strategy
feed_list = []
for idx_tuple in sorted_cluster_list_with_image_idx:
if len(idx_tuple) > 50: # pick cluster size [50, X)
feed_list.append(idx_tuple)
else:
break
# return feed_list, tuple_tree_dict, cluster_list_with_image_idx # for skipping
fake_cluster_list, fake_flagged = fingerprint_classifier(feed_list, all_res_paths,
outlier_model_path, img_dim)
if fake_flagged:
logging.info(
"We detected suspicious fake clusters, NoiseScope will perform fingerprint classifier next.")
return fake_cluster_list, cluster_list_with_image_idx
else:
logging.info(
"Available candidate clusters are not recognized outliers, NoiseScope continues to do clustering.")
# another return case, when there is no more high correlated pairs
if len(list(x_axis_idx)) == 0:
fake_cluster_list, fake_flagged = fingerprint_classifier(sorted_cluster_list_with_image_idx, all_res_paths,
outlier_model_path, img_dim)
if fake_flagged:
return fake_cluster_list, cluster_list_with_image_idx
else:
logging.info("No fake clusters are flagged, NoiseScope will stop the detection.")
return fake_cluster_list, cluster_list_with_image_idx
# confirm how many pairs can be merged
idx_pairs = list(zip(x_axis_idx, y_axis_idx)) # idx_pairs includes all pair positions
idx_pairs_with_pce = list(map(lambda x: x + (pce_matrix[x[0], x[1]],), idx_pairs))
sorted_idx_pairs_with_pce = sorted(idx_pairs_with_pce, key=lambda x: x[2], reverse=True)
idx_pair_for_merge = []
delete_idxs = []
while len(sorted_idx_pairs_with_pce) > 0: # which means still having pairs to merge
x_idx_max_pce = sorted_idx_pairs_with_pce[0][0]
y_idx_max_pce = sorted_idx_pairs_with_pce[0][1]
assert pce_matrix[x_idx_max_pce][y_idx_max_pce] == sorted_idx_pairs_with_pce[0][2]
idx_pair_for_merge.append((x_idx_max_pce, y_idx_max_pce))
logging.info(
'Maximum pce value from current idx pairs is: {}\n'.format(pce_matrix[x_idx_max_pce][y_idx_max_pce]))
delete_idxs.append(x_idx_max_pce)
delete_idxs.append(y_idx_max_pce)
sorted_idx_pairs_with_pce[:] = [idx_pair for idx_pair in sorted_idx_pairs_with_pce if
(x_idx_max_pce not in idx_pair) and (y_idx_max_pce not in idx_pair)]
### merging rules ###
merge_clusters_set = set([]) # contain merged tuples that should be added
delete_clusters_set = set([]) # contain tuples that need to be deleted
for idx_pair in idx_pair_for_merge:
# record all the clusters need to be deleted from cluster_list_with_image_idx
delete_clusters_set.add(cluster_list_with_image_idx[idx_pair[0]])
delete_clusters_set.add(cluster_list_with_image_idx[idx_pair[1]])
# record all the merged cluster need to be added into cluster_list_with_image_idx
merge_tuple = cluster_list_with_image_idx[idx_pair[0]] + cluster_list_with_image_idx[idx_pair[1]]
merge_clusters_set.add(merge_tuple)
# here we remove clusters in delete_clusters_set
for delete_tuple in delete_clusters_set:
cluster_list_with_image_idx.remove(delete_tuple)
# here we add merged clusters in all_merge_set
for merge_tuple in merge_clusters_set:
cluster_list_with_image_idx.append(merge_tuple)
pce_values_for_next_iter = []
for i in range(0, num_cluster):
if i in delete_idxs:
continue
for j in range(0, num_cluster):
if j in delete_idxs:
continue
pce_values_for_next_iter.append(pce_matrix[i, j])
pce_matrix = np.reshape(pce_values_for_next_iter, (num_cluster - len(delete_idxs), num_cluster - len(delete_idxs)))
ret_fake_cluster_list, ret_cluster_list_with_image_idx = extract_fingerpint_via_clustering(all_res_paths,
ground_truth_label,
thre_pce,
cluster_list_with_image_idx,
iter_round + 1,
img_dim,
outlier_model_path,
result_dir,
pce_matrix,
merge_clusters_set)
return ret_fake_cluster_list, ret_cluster_list_with_image_idx
def fake_image_detector(fake_cluster_list, test_res_paths, ground_truth, img_dim, refer_dir):
'''
NoiseScope step 4.
:param fake_cluster_list: A list of fake clusters. Each cluster includes all the residual indexes.
:param test_res_paths: noise residual paths for test set.
:param ground_truth: Ground truth label for the test residuals.
:param img_dim: image/residual size
:param logfile: log file
:param refer_dir: reference dir
:return: detection F1 score
'''
if len(fake_cluster_list) == 0:
logging.info('No model fingerprint found! The detection will stop here! \n')
return
refer_res_paths = glob.glob(refer_dir + '*.mat')
test_max_pce = []
refer_max_pce = []
all_test_pce = []
all_refer_pce = []
cluster_stat = []
single_cluster_f1_scores = []
for i, fake_cluster in enumerate(fake_cluster_list):
logging.info('This fake cluster includes residual id: {}. \n'.format(fake_cluster))
# adjust the index, because in matlab, index starts from 1.
fake_cluster_idx_minus = list(map(lambda x: x - 1, fake_cluster))
fake_pos = np.where(np.array(ground_truth) == 1)
fake_purity = len(set(fake_pos[0]).intersection(set(fake_cluster_idx_minus))) / len(fake_cluster)
cluster_stat.append((len(fake_cluster), fake_purity))
logging.info('This cluster has a fake purity of {}. \n'.format(fake_purity))
logging.info('This cluster has image samples{} \n'.format(len(fake_cluster)))
model_fingerprint = compute_fp_from_cluster(fake_cluster, test_res_paths, img_dim)
logging.info('The shape of fake fingerprint: {}. \n'.format(np.shape(model_fingerprint)))
test_pce_corr = compute_pce_with_fingerprint(test_res_paths, model_fingerprint)
refer_pce_corr = compute_pce_with_fingerprint(refer_res_paths, model_fingerprint)
all_test_pce.append(test_pce_corr[0])
all_refer_pce.append(refer_pce_corr[0])
if i == 0:
test_max_pce = test_pce_corr[0]
refer_max_pce = refer_pce_corr[0]
else:
test_max_pce = list(map(lambda x, y: max(x, y), test_max_pce, test_pce_corr[0]))
refer_max_pce = list(map(lambda x, y: max(x, y), refer_max_pce, refer_pce_corr[0]))
calibrate_thres = np.percentile(refer_max_pce, 99.5)
logging.info('Calibrated PCE threshold for fake image detector, {} \n'.format(calibrate_thres))
label = list(map(lambda x: 1 if x > calibrate_thres else 0, test_max_pce))
conf_matrix, metric_scores = compute_confusion_matrix(ground_truth, label)
logging.info("Clustered with PCE threshold: {}. \n".format(calibrate_thres))
logging.info("TN, FP, FN, TP: {} \n".format(conf_matrix))
logging.info("+++++++++++++++++++++++++++++++ \n")
logging.info("Accuracy: {0:.2f}% \n".format(metric_scores["accuracy"] * 100))
logging.info("Precision: {0:.2f}% \n".format(metric_scores["precision"] * 100))
logging.info("Recall: {0:.2f}% \n".format(metric_scores["recall"] * 100))
logging.info("F1 score: {0:.2f}% \n".format(metric_scores["f1_score"] * 100))
final_f1 = metric_scores["f1_score"]
for test_pce in all_test_pce:
label = list(map(lambda x: 1 if x > calibrate_thres else 0, test_pce))
conf_matrix, metric_scores = compute_confusion_matrix(ground_truth, label)
logging.info("========Single cluster performance=========\n")
logging.info("TN, FP, FN, TP: {} \n".format(conf_matrix))
logging.info("+++++++++++++++++++++++++++++++ \n")
logging.info("Accuracy: {0:.2f}% \n".format(metric_scores["accuracy"] * 100))
logging.info("Precision: {0:.2f}% \n".format(metric_scores["precision"] * 100))
logging.info("Recall: {0:.2f}% \n".format(metric_scores["recall"] * 100))
logging.info("F1 score: {0:.2f}% \n".format(metric_scores["f1_score"] * 100))
single_cluster_f1_scores.append(metric_scores["f1_score"])
return final_f1
def fingerprint_classifier(cluster_list_with_image_idx, res_list, outlier_model_path, img_dim):
'''
NoiseScope Step 3: fingerprint classifier
:param cluster_list_with_image_idx: A list of residual clusters. Each cluster is a tuple, which includes residual indexes.
:param res_list: Noise residuals of test set.
:param outlier_model_path: Fingerprint outlier detector, which will flag model fingerprints as outliers
:param img_dim: image/residual size
:param logfile: log file
:return: a list of fake (model) clusters
'''
fake_cluster_list = []
fake_flagged = False
detection_model = joblib.load(outlier_model_path)
# cluster_list_with_image_idx = sorted(cluster_list_with_image_idx, key=len, reverse=True)
for cluster_with_img_idx in cluster_list_with_image_idx:
if len(cluster_with_img_idx) > 50: # find the fake set whose size is larger than 50
sampled_idx = random.sample(cluster_with_img_idx, 50) # sample cluster_list_with_image_idx
cluster_fp = compute_fp_from_cluster(sampled_idx, res_list, img_dim)
clipped_fp = clip_fp(cluster_fp)
haralick_feat = extract_haralick_features(clipped_fp)
pred_label = detection_model.predict(np.array(haralick_feat).reshape(1, -1))
if pred_label == -1:
fake_cluster_list.append(cluster_with_img_idx)
logging.info("One fake cluster is flagged, with {} images.\n".format(len(cluster_with_img_idx)))
else:
break
logging.info("{} fake clusters have been flagged.".format(len(fake_cluster_list)))
if len(fake_cluster_list) > 0: fake_flagged = True
return fake_cluster_list, fake_flagged
def detection_NoiseScope(args):
if args.result_dir[-1] != '/': args.result_dir = args.result_dir + '/'
if not os.path.exists(args.result_dir): os.mkdir(args.result_dir)
logging.basicConfig(filename='{}detection.log'.format(args.result_dir), filemode='w', level=logging.DEBUG, format='%(levelname)s:%(message)s')
real_res_list = random.sample(glob.glob(args.real_res_dir + '/*.mat'), args.num_real)
fake_res_list = random.sample(glob.glob(args.fake_res_dir + '/*.mat'), args.num_fake)
all_res_paths = real_res_list + fake_res_list
ground_truth_label = [0] * len(real_res_list) + [1] * len(fake_res_list)
shuffle_data = shuffle(list(zip(ground_truth_label, all_res_paths)))
[ground_truth_label_, all_res_paths_] = zip(*shuffle_data)
# logfile = open("{}logfile.txt".format(args.result_dir), "w")
all_res_paths = list(all_res_paths_)
ground_truth_label = ground_truth_label_
cluster_list_with_image_idx = [tuple([i]) for i in range(1, len(all_res_paths) + 1)]
############ find fake indexs and compute the fake fingerprint ################
logging.info('Merging threshold: {}\n'.format(args.pce_thre))
fake_cluster_list, cluster_list_with_image_idx = extract_fingerpint_via_clustering(all_res_paths,
ground_truth_label,
args.pce_thre,
cluster_list_with_image_idx,
0,
args.img_dim,
args.outlier_model_path,
args.result_dir)
f1_score = fake_image_detector(fake_cluster_list, all_res_paths, ground_truth_label, args.img_dim,
args.refer_res_dir)
return f1_score
if __name__ == '__main__':
'''
We grab 'num_real' samples from 'real_res_dir' and 'num_fake' samples from 'fake_res_dir'
specify the 'outlier_model_path' trained from prep_steps.py
specify 'pce_thre' calibrated from prep_steps.py
'''
parser = argparse.ArgumentParser()
parser.add_argument('--real_res_dir', default='', help='the path to REAL noise residual dir')
parser.add_argument('--fake_res_dir', default='', help='the path to FAKE noise residual dir')
parser.add_argument('--refer_res_dir', default='', help='the path to REFERENCE noise residual dir')
parser.add_argument('--num_real', type=int, help='The number of real images in the test set', default=500)
parser.add_argument('--num_fake', type=int, help='The number of fake images in the test set', default=500)
parser.add_argument('--img_dim', type=int, default=256, help='images should be in square shape.')
parser.add_argument('--outlier_model_path', default='', help='the path to pre-trained fingerprint outlier detector')
parser.add_argument('--result_dir', default='',
help='Specify the folder which saves log file and some matrix files produced in the middle')
parser.add_argument('--pce_thre', type=float, help='T merging threshold estimated')
args = parser.parse_args()
detection_NoiseScope(args)
| [
"os.path.exists",
"random.sample",
"numpy.full",
"argparse.ArgumentParser",
"numpy.where",
"time.time",
"random.seed",
"numpy.array",
"os.mkdir",
"joblib.load",
"numpy.percentile",
"numpy.shape",
"logging.info",
"glob.glob"
] | [((215, 232), 'random.seed', 'random.seed', (['(6666)'], {}), '(6666)\n', (226, 232), False, 'import random\n'), ((1615, 1689), 'logging.info', 'logging.info', (['"""++++++++++PERFORM THE NEXT MERGING ITERATION++++++++++++\n"""'], {}), "('++++++++++PERFORM THE NEXT MERGING ITERATION++++++++++++\\n')\n", (1627, 1689), False, 'import logging\n'), ((3372, 3403), 'numpy.where', 'np.where', (['(pce_matrix > thre_pce)'], {}), '(pce_matrix > thre_pce)\n', (3380, 3403), True, 'import numpy as np\n'), ((9848, 9878), 'glob.glob', 'glob.glob', (["(refer_dir + '*.mat')"], {}), "(refer_dir + '*.mat')\n", (9857, 9878), False, 'import glob\n'), ((11508, 11542), 'numpy.percentile', 'np.percentile', (['refer_max_pce', '(99.5)'], {}), '(refer_max_pce, 99.5)\n', (11521, 11542), True, 'import numpy as np\n'), ((11949, 11999), 'logging.info', 'logging.info', (['"""+++++++++++++++++++++++++++++++ \n"""'], {}), "('+++++++++++++++++++++++++++++++ \\n')\n", (11961, 11999), False, 'import logging\n'), ((13823, 13854), 'joblib.load', 'joblib.load', (['outlier_model_path'], {}), '(outlier_model_path)\n', (13834, 13854), False, 'import joblib\n'), ((17364, 17389), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17387, 17389), False, 'import argparse\n'), ((2350, 2401), 'numpy.full', 'np.full', (['(num_cluster, num_cluster)', '(0)'], {'dtype': 'float'}), '((num_cluster, num_cluster), 0, dtype=float)\n', (2357, 2401), True, 'import numpy as np\n'), ((2877, 2911), 'numpy.array', 'np.array', (["new_pce_matrix['matrix']"], {}), "(new_pce_matrix['matrix'])\n", (2885, 2911), True, 'import numpy as np\n'), ((2936, 2947), 'time.time', 'time.time', ([], {}), '()\n', (2945, 2947), False, 'import time\n'), ((3120, 3131), 'time.time', 'time.time', ([], {}), '()\n', (3129, 3131), False, 'import time\n'), ((3314, 3344), 'numpy.array', 'np.array', (["pce_matrix['matrix']"], {}), "(pce_matrix['matrix'])\n", (3322, 3344), True, 'import numpy as np\n'), ((9734, 9810), 'logging.info', 'logging.info', (['"""No model fingerprint found! The detection will stop here! \n"""'], {}), "('No model fingerprint found! The detection will stop here! \\n')\n", (9746, 9810), False, 'import logging\n'), ((12573, 12634), 'logging.info', 'logging.info', (['"""========Single cluster performance=========\n"""'], {}), "('========Single cluster performance=========\\n')\n", (12585, 12634), False, 'import logging\n'), ((12709, 12759), 'logging.info', 'logging.info', (['"""+++++++++++++++++++++++++++++++ \n"""'], {}), "('+++++++++++++++++++++++++++++++ \\n')\n", (12721, 12759), False, 'import logging\n'), ((15037, 15068), 'os.path.exists', 'os.path.exists', (['args.result_dir'], {}), '(args.result_dir)\n', (15051, 15068), False, 'import os\n'), ((15070, 15095), 'os.mkdir', 'os.mkdir', (['args.result_dir'], {}), '(args.result_dir)\n', (15078, 15095), False, 'import os\n'), ((15277, 15316), 'glob.glob', 'glob.glob', (["(args.real_res_dir + '/*.mat')"], {}), "(args.real_res_dir + '/*.mat')\n", (15286, 15316), False, 'import glob\n'), ((15367, 15406), 'glob.glob', 'glob.glob', (["(args.fake_res_dir + '/*.mat')"], {}), "(args.fake_res_dir + '/*.mat')\n", (15376, 15406), False, 'import glob\n'), ((4523, 4639), 'logging.info', 'logging.info', (['"""We detected suspicious fake clusters, NoiseScope will perform fingerprint classifier next."""'], {}), "(\n 'We detected suspicious fake clusters, NoiseScope will perform fingerprint classifier next.'\n )\n", (4535, 4639), False, 'import logging\n'), ((4739, 4861), 'logging.info', 'logging.info', (['"""Available candidate clusters are not recognized outliers, NoiseScope continues to do clustering."""'], {}), "(\n 'Available candidate clusters are not recognized outliers, NoiseScope continues to do clustering.'\n )\n", (4751, 4861), False, 'import logging\n'), ((5303, 5389), 'logging.info', 'logging.info', (['"""No fake clusters are flagged, NoiseScope will stop the detection."""'], {}), "(\n 'No fake clusters are flagged, NoiseScope will stop the detection.')\n", (5315, 5389), False, 'import logging\n'), ((14130, 14169), 'random.sample', 'random.sample', (['cluster_with_img_idx', '(50)'], {}), '(cluster_with_img_idx, 50)\n', (14143, 14169), False, 'import random\n'), ((10344, 10366), 'numpy.array', 'np.array', (['ground_truth'], {}), '(ground_truth)\n', (10352, 10366), True, 'import numpy as np\n'), ((10871, 10898), 'numpy.shape', 'np.shape', (['model_fingerprint'], {}), '(model_fingerprint)\n', (10879, 10898), True, 'import numpy as np\n'), ((14451, 14474), 'numpy.array', 'np.array', (['haralick_feat'], {}), '(haralick_feat)\n', (14459, 14474), True, 'import numpy as np\n')] |
import numpy as np
from sim.sim2d import sim_run
# Simulator options.
options = {}
options['FIG_SIZE'] = [8,8]
options['OBSTACLES'] = False
class ModelPredictiveControl:
def __init__(self):
self.horizon = 20
self.dt = 0.2
# self.beta_t = 0
# Reference or set point the controller will achieve.
self.reference1 = [10, 10, 0] # first goal point
self.reference2 = [10, 2, 3 * 3.14/2] # second goal point
def plant_model(self, prev_state, dt, pedal, steering):
x_t = prev_state[0]
y_t = prev_state[1]
psi_t = prev_state[2]
v_t = prev_state[3]
# steering_velocity = steering
beta_t = steering
# self.beta_t += self.beta_t + steering_velocity * dt
a_t = pedal
wheel_base = 2.5 # meter
#update
x_t_1 = x_t + v_t*np.cos(psi_t)*dt
y_t_1 = y_t + v_t*np.sin(psi_t)*dt
psi_t_1 = psi_t + v_t*(np.tan(beta_t)/wheel_base)*dt
v_t_1 = v_t + a_t*dt - v_t/25 # last term is drag
return [x_t_1, y_t_1, psi_t_1, v_t_1]
def cost_function(self, u, *args):
state = args[0]
ref = args[1] #self.reference, (x,y,theta)
cost = 0.0
steering_prev = 0
for k in range(self.horizon):
v_prev = state[3] #previous velocity
state = self.plant_model(state, self.dt, u[2*k], u[2*k+1]) #u[2*k]: pedal
#u[2*k+1]:steering
x = state[0]
y = state[1]
psi = state[2]
v = state[3]
#position cost
cost += abs((x - ref[0])) + abs((y - ref[1])) #linear cost func
#angle cost
cost += (psi - ref[2])**2
#acceleration cost
cost += (v - v_prev)**2
#steering input cost
if (k == 0):
steering_prev = u[2*k+1]
cost += (u[2*k+1] - steering_prev)**2
steering_prev = u[2*k+1]
return cost
sim_run(options, ModelPredictiveControl)
| [
"sim.sim2d.sim_run",
"numpy.sin",
"numpy.cos",
"numpy.tan"
] | [((2070, 2110), 'sim.sim2d.sim_run', 'sim_run', (['options', 'ModelPredictiveControl'], {}), '(options, ModelPredictiveControl)\n', (2077, 2110), False, 'from sim.sim2d import sim_run\n'), ((862, 875), 'numpy.cos', 'np.cos', (['psi_t'], {}), '(psi_t)\n', (868, 875), True, 'import numpy as np\n'), ((905, 918), 'numpy.sin', 'np.sin', (['psi_t'], {}), '(psi_t)\n', (911, 918), True, 'import numpy as np\n'), ((953, 967), 'numpy.tan', 'np.tan', (['beta_t'], {}), '(beta_t)\n', (959, 967), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
r"""
Implementation of the Merged Growing Neural Gas for temporal data.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, All rights reserved."
# __credits__ = []
__license__ = "Confidential"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "alpha"
__date__ = "2020-01-27"
__all__ = ["MergeGNG"]
import logging
from typing import List, Tuple
import numpy as np
from attr import attrib, attributes
from numpy.linalg import norm
from mgng.helpers import get_dymmy_2D_data
from mgng.validators import is_greater_zero, is_weight_factor, repr_ndarray
logger = logging.getLogger(__name__)
@attributes
class MergeGNG:
r"""
Class that represents a Merge Growing Neural Gas.
Differences to default implementation
* Neurons all kept in memory to allow numpy operations
* Introduce half life and threshold for connections (planned). For now, only decrease.
* Adaptation rate should depend on connection strength
* Introduce method (half-life?, decay on all synapses) to remove very old movements (I am sure that the original implementation allows for orphans)
* Compare with an approach of a regular neural gas with a refactory time
* Add threshold for an activity to trigger a new neuron (hey, make a fifo). I really want to enforce this. If a neuron gets activated 3 times in a row it's tiem for a new neuron!
* REALLY CONSIDER REMOVING THE DIOGONAL ELEMENTS! Implement neighbor learn rate .. maybe weighted by synapse strength
* Activity is never 0 unless it is a never used neuron or one removed because it had no connections
* Todo: did we remove neurons without connections?
Parameters
----------
n_neurons: int
Max. number of neurons
n_dim: int
Output dimension (of the feature space).
connection_decay: float
Hyper parameter for influencing the decay of neuron connections. NOT USED RIGHT NOW
temporal_influence: float
The influence of the temporal memory on finding the winning neuron
memory_weight: float
Determines the influence of past samples in the sequence. (Kinda "how long" it looks back into the past).
life_span: int
How many iterations until a synapse is deleted.
Note .. synapses of the winning neuron only are decayed (it forgets "wrong" neighbors)
max_activity:
Maximal activity allowed for a neuron (cf. refractory period). If a neuron is more active than this threshold,
a new neuron is inserted between it and the second most active neuron. Note that each time the neuron is the
winning neuron, it's activity level is increased by 1.0 and then continuously decreases continuously in
each iteration (c.f. `decrease_activity`)
This is different to the reference paper where the network gros in regular intervals.
Our approach reflects a more "on demand" approach and prevents the network from growing unnecessarily.
decrease_activity: float
Less important .. the activity decreases exponentially ... only interesting if there are only few iterations
between reccuring sequences
learn_rate: float
lorem ipsum
learn_rate_neighbors: float
lorem ipsum
delta: float
Need a better name, right? It's a parameter that decides the neuron's activity if a new neuron is added.
allow_removal: float
lorem ipsum
Attributes
----------
_weights: np.ndarray, :math:`n_{\text{neurons}} \times n_{\text{dim}}`
The amount of neurons is constant in this implementation for simplicity
reasons and speed (block operations).
"""
# FIXME: Until pylint + attrs work nicely together (or pylint and typehints)
# pylint: disable=unsubscriptable-object,unsupported-assignment-operation,no-member
# Note that comment type hints are used to ensure Python 3.5 support _and_ VSCode autocomplete
# See https://www.attrs.org/en/stable/types.html#mypy
n_neurons = attrib(default=100) # type: int
n_dim = attrib(default=3) # type: int
connection_decay = attrib(default=0.1, validator=[is_weight_factor]) # type: float
temporal_influence = attrib(default=0.5, validator=[is_weight_factor]) # type: float
memory_weight = attrib(default=0.5, validator=[is_weight_factor]) # type: float
life_span = attrib(default=10) # type: int
learn_rate = attrib(default=0.2, validator=[is_weight_factor]) # type: float
learn_rate_neighbors = attrib(default=0.2, validator=[is_weight_factor]) # type: float
decrease_activity = attrib(default=0.8, validator=[is_weight_factor]) # type: float
# TODO find a goood name for delta (in fact, update all names)
delta = attrib(default=0.8, validator=[is_weight_factor]) # type: float
max_activity = attrib(default=2.0, validator=[is_greater_zero]) # type: float
allow_removal = attrib(default=True) # type: bool
# I don't want this parameter truth to be told
creation_frequency = attrib(default=5) # type: int
# Private variables. Default initializers depend on n_neurons and n_dim. The order matters!
_weights = attrib(init=False, repr=repr_ndarray) # type: np.ndarray
_context = attrib(init=False, repr=repr_ndarray) # type: np.ndarray
_connections = attrib(init=False, repr=repr_ndarray) # type: np.ndarray
_counter = attrib(init=False, repr=False) # type: np.ndarray
_global_context = attrib(init=False, repr=repr_ndarray) # type: np.ndarray
debug = attrib(default=False) # type: bool
past = attrib(init=False, factory=list, repr=False) # type: List[List[np.ndarray]]
@_weights.default
def _default_weights(self):
self.n_neurons
return np.random.rand(self.n_neurons, self.n_dim)
@_context.default
def _default_context(self):
return np.random.rand(self.n_neurons, self.n_dim)
@_global_context.default
def _default_global_context(self):
return np.random.rand(self.n_dim)
@_connections.default
def _default_connections(self):
# XXX: we keep all neurons in memory such that we can do block operations
return np.zeros((self.n_neurons, self.n_neurons))
@_counter.default
def _default_counter(self):
return np.zeros(self.n_neurons)
def _decay(self, first: int):
"""
Decrease all synapses of a neuron but don't allow negative synampses.
Parameters
----------
first : int
Index of the neuron
"""
def decay_vector(vector: np.ndarray):
vector -= 1.0 / self.life_span
vector[vector < 0] = 0
logger.debug("Decaying connections before:\n%s", self._connections)
decay_vector(self._connections[first, :])
decay_vector(self._connections[:, first])
logger.debug("after \n%s", self._connections)
def kill_orphans(self):
# argwhere not suitable for indexing
orphans = np.nonzero(np.sum(self._connections, axis=1) == 0)
logger.debug("Orphans: %s", orphans)
logger.debug("counter before: %s", self._counter)
self._counter[orphans] = 0
logger.debug("counter after: %s", self._counter)
def adapt(self, sample: np.ndarray) -> Tuple[int, int]:
"""
Single adaptation step
Parameters
----------
sample : np.ndarray, shape: :math:`(n_{\text{dim}},)`
A single sample.
Returns
-------
Tuple[int,int]
Optionally returns the first and second winning
neurons used for Hebbian learning.
"""
if self.debug:
self.past.append(
[
self._weights.copy(),
self._context.copy(),
self._connections.copy(),
self._counter.copy(),
self.get_active_weights().copy(),
sample.copy(),
]
)
dist_weights = norm(self._weights - sample[np.newaxis, :], axis=-1)
dist_context = norm(self._context - self._global_context, axis=-1)
logger.debug("|weights| = %s, |context| = %s", dist_weights, dist_context)
logger.debug("connections at beginning:\n%s", self._connections)
# Todo remove this variable
distance = (
1 - self.temporal_influence
) * dist_weights + self.temporal_influence * dist_context
winners = np.argsort(
distance
# (1 - self.temporal_influence) * dist_weights + self.temporal_influence * dist_context
)
logger.debug("winners %s, %s", winners, distance[winners])
first = winners[0]
second = winners[1]
assert distance[first] <= distance[second]
old_global_context = self._global_context.copy()
# fmt: off
self._global_context = (
(1 - self.memory_weight) * self._weights[first, :]
+ self.memory_weight * self._context[first, :]
)
# fmt: on
self._decay(first) # Let's decay first so that the new new connection has maximal value
logger.debug("Adding edge to:\n%s", self._connections)
# Symmetric connection matrix
self._connections[first, second] = self._connections[second, first] = 1.0
# Diagonal only needed when the connection values are used in the update rule below.
# then it should probably not be 1.0
# self._connections[first, first] = 1.0
logger.debug("after\n%s", self._connections)
# Needs to be after new connections are created. otherwise the counter of first might be reset
self.kill_orphans()
self._weights[first, :] += self.learn_rate * (sample - self._weights[first, :])
self._context[first, :] += self.learn_rate * (old_global_context - self._context[first, :])
neighbors = np.nonzero(self._connections[first, :]) # == non-zeros in the row
logger.debug("winning neuron's neighbors %s", neighbors)
# Suggestion: weight adaptation by age of synapse
# self._weights[neighbors, :] += self.learn_rate * self._connections[first, neighbors] *\
# (sample - self._weights[neighbors, :])
# self._context[neighbors, :] += self.learn_rate * self._connections[first, neighbors] *\
# (old_global_context - self._context[neighbors, :])
self._weights[neighbors, :] += self.learn_rate_neighbors * (
sample - self._weights[neighbors, :]
)
self._context[neighbors, :] += self.learn_rate_neighbors * (
old_global_context - self._context[neighbors, :]
)
self._counter[first] += 1
logger.debug("New counter: %s, \n%s", self._counter, self._connections)
return first, second
def grow(self):
"""
Entropy maximization by adding neurons in regions of high activity.
Note: this picks the weakest neuron. TODO this needs to be implemented too!
"""
# Warning .. error when the max neuron does not have neighbors (pretty much impossible)
most = np.argmax(self._counter)
its_neighbors = np.nonzero(self._connections[most, :]) # e.g., (array([0, 2]),)
logger.debug(its_neighbors)
most_active_neighbors = np.argsort(
self._counter[its_neighbors]
) # get the activations and sort them
logger.debug(most_active_neighbors)
# The last entry is the winning neuron itself (WATCH OUT unless the diagonal is zero!, in that case, use -1)
neighbor = its_neighbors[0][most_active_neighbors[-1]]
logger.debug(
"Most active: %d\nIts neighbors: %s, Its most active neighbor: %s",
most,
its_neighbors,
neighbor,
)
new = self.kill_weakest()
self.delta = 0.8 # Yet another parameter :-(
self._weights[new, :] = 0.5 * (self._weights[most, :] + self._weights[neighbor, :])
self._context[new, :] = 0.5 * (self._context[most, :] + self._context[neighbor, :])
self._counter[new] = self.delta * (self._counter[most] + self._counter[neighbor])
self._counter[most] *= 1 - self.delta
self._counter[neighbor] *= 1 - self.delta
self._connections[most, neighbor] = self._connections[neighbor, most] = 0.0
self._connections[new, neighbor] = self._connections[neighbor, new] = 1.0
self._connections[most, new] = self._connections[new, most] = 1.0
def kill_weakest(self) -> np.signedinteger:
"""
Finds the weakest neuron (or the first with zero activity in the list)
and returns its index
Returns
-------
int
Index of the neuron
"""
least = np.argmin(self._counter) # That is a good metric? Probably yes
logger.info("Least active neuron: %d, value: %f", least, self._counter[least])
logger.info("Did it have conntections?\n%s", self._connections[least, :])
if np.sum(self._connections[least, :]) > 0:
logger.warning(
"Killing existing neuron. Consider larger pool! Activity: %f", self._counter[least]
)
# Remove connections:
self._connections[least, :] = 0.0
self._connections[:, least] = 0.0
return least
def learn(self, samples: np.ndarray, epochs: int):
r"""
Batch learning
Parameters
----------
samples : np.ndarray
Row array of points. Shape :math:`n_{\text{samples}} \times n_{\text{dim}}`.
epochs : int
Number of repetitions.
"""
assert samples.shape[1] == self.n_dim
for e in range(epochs):
for i, sample in enumerate(samples):
logger.info("\n\n\n%s\nSample: %d, Epoch: %d", "*" * 24, i, e)
self.adapt(sample)
self._counter *= self.decrease_activity
if True:
if np.max(self._counter) >= self.max_activity:
self.grow()
else:
if i % self.creation_frequency == self.creation_frequency - 1:
# Make this a factor depending on the activity of neurons
self.grow()
def get_active_weights(self):
# Watchout there is an argwhere not an nonzero
return (
self._weights[np.nonzero(self._counter > 0), :],
self._weights[np.nonzero(self._counter <= 0), :],
)
if __name__ == "__main__":
import matplotlib.pyplot as plt # type: ignore
mgng = MergeGNG(connection_decay=0.1)
print(mgng)
a = mgng.n_neurons
X = get_dymmy_2D_data(20)
print(repr_ndarray(X))
plt.plot(X[0, :], X[1, :])
plt.show()
| [
"logging.getLogger",
"numpy.random.rand",
"mgng.validators.repr_ndarray",
"matplotlib.pyplot.plot",
"numpy.argmax",
"numpy.max",
"attr.attrib",
"numpy.argsort",
"numpy.zeros",
"numpy.sum",
"numpy.nonzero",
"numpy.linalg.norm",
"numpy.argmin",
"mgng.helpers.get_dymmy_2D_data",
"matplotlib... | [((626, 653), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (643, 653), False, 'import logging\n'), ((4031, 4050), 'attr.attrib', 'attrib', ([], {'default': '(100)'}), '(default=100)\n', (4037, 4050), False, 'from attr import attrib, attributes\n'), ((4076, 4093), 'attr.attrib', 'attrib', ([], {'default': '(3)'}), '(default=3)\n', (4082, 4093), False, 'from attr import attrib, attributes\n'), ((4130, 4179), 'attr.attrib', 'attrib', ([], {'default': '(0.1)', 'validator': '[is_weight_factor]'}), '(default=0.1, validator=[is_weight_factor])\n', (4136, 4179), False, 'from attr import attrib, attributes\n'), ((4220, 4269), 'attr.attrib', 'attrib', ([], {'default': '(0.5)', 'validator': '[is_weight_factor]'}), '(default=0.5, validator=[is_weight_factor])\n', (4226, 4269), False, 'from attr import attrib, attributes\n'), ((4305, 4354), 'attr.attrib', 'attrib', ([], {'default': '(0.5)', 'validator': '[is_weight_factor]'}), '(default=0.5, validator=[is_weight_factor])\n', (4311, 4354), False, 'from attr import attrib, attributes\n'), ((4386, 4404), 'attr.attrib', 'attrib', ([], {'default': '(10)'}), '(default=10)\n', (4392, 4404), False, 'from attr import attrib, attributes\n'), ((4435, 4484), 'attr.attrib', 'attrib', ([], {'default': '(0.2)', 'validator': '[is_weight_factor]'}), '(default=0.2, validator=[is_weight_factor])\n', (4441, 4484), False, 'from attr import attrib, attributes\n'), ((4527, 4576), 'attr.attrib', 'attrib', ([], {'default': '(0.2)', 'validator': '[is_weight_factor]'}), '(default=0.2, validator=[is_weight_factor])\n', (4533, 4576), False, 'from attr import attrib, attributes\n'), ((4616, 4665), 'attr.attrib', 'attrib', ([], {'default': '(0.8)', 'validator': '[is_weight_factor]'}), '(default=0.8, validator=[is_weight_factor])\n', (4622, 4665), False, 'from attr import attrib, attributes\n'), ((4760, 4809), 'attr.attrib', 'attrib', ([], {'default': '(0.8)', 'validator': '[is_weight_factor]'}), '(default=0.8, validator=[is_weight_factor])\n', (4766, 4809), False, 'from attr import attrib, attributes\n'), ((4845, 4893), 'attr.attrib', 'attrib', ([], {'default': '(2.0)', 'validator': '[is_greater_zero]'}), '(default=2.0, validator=[is_greater_zero])\n', (4851, 4893), False, 'from attr import attrib, attributes\n'), ((4929, 4949), 'attr.attrib', 'attrib', ([], {'default': '(True)'}), '(default=True)\n', (4935, 4949), False, 'from attr import attrib, attributes\n'), ((5041, 5058), 'attr.attrib', 'attrib', ([], {'default': '(5)'}), '(default=5)\n', (5047, 5058), False, 'from attr import attrib, attributes\n'), ((5184, 5221), 'attr.attrib', 'attrib', ([], {'init': '(False)', 'repr': 'repr_ndarray'}), '(init=False, repr=repr_ndarray)\n', (5190, 5221), False, 'from attr import attrib, attributes\n'), ((5257, 5294), 'attr.attrib', 'attrib', ([], {'init': '(False)', 'repr': 'repr_ndarray'}), '(init=False, repr=repr_ndarray)\n', (5263, 5294), False, 'from attr import attrib, attributes\n'), ((5334, 5371), 'attr.attrib', 'attrib', ([], {'init': '(False)', 'repr': 'repr_ndarray'}), '(init=False, repr=repr_ndarray)\n', (5340, 5371), False, 'from attr import attrib, attributes\n'), ((5407, 5437), 'attr.attrib', 'attrib', ([], {'init': '(False)', 'repr': '(False)'}), '(init=False, repr=False)\n', (5413, 5437), False, 'from attr import attrib, attributes\n'), ((5480, 5517), 'attr.attrib', 'attrib', ([], {'init': '(False)', 'repr': 'repr_ndarray'}), '(init=False, repr=repr_ndarray)\n', (5486, 5517), False, 'from attr import attrib, attributes\n'), ((5551, 5572), 'attr.attrib', 'attrib', ([], {'default': '(False)'}), '(default=False)\n', (5557, 5572), False, 'from attr import attrib, attributes\n'), ((5598, 5642), 'attr.attrib', 'attrib', ([], {'init': '(False)', 'factory': 'list', 'repr': '(False)'}), '(init=False, factory=list, repr=False)\n', (5604, 5642), False, 'from attr import attrib, attributes\n'), ((14817, 14838), 'mgng.helpers.get_dymmy_2D_data', 'get_dymmy_2D_data', (['(20)'], {}), '(20)\n', (14834, 14838), False, 'from mgng.helpers import get_dymmy_2D_data\n'), ((14872, 14898), 'matplotlib.pyplot.plot', 'plt.plot', (['X[0, :]', 'X[1, :]'], {}), '(X[0, :], X[1, :])\n', (14880, 14898), True, 'import matplotlib.pyplot as plt\n'), ((14903, 14913), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14911, 14913), True, 'import matplotlib.pyplot as plt\n'), ((5768, 5810), 'numpy.random.rand', 'np.random.rand', (['self.n_neurons', 'self.n_dim'], {}), '(self.n_neurons, self.n_dim)\n', (5782, 5810), True, 'import numpy as np\n'), ((5881, 5923), 'numpy.random.rand', 'np.random.rand', (['self.n_neurons', 'self.n_dim'], {}), '(self.n_neurons, self.n_dim)\n', (5895, 5923), True, 'import numpy as np\n'), ((6008, 6034), 'numpy.random.rand', 'np.random.rand', (['self.n_dim'], {}), '(self.n_dim)\n', (6022, 6034), True, 'import numpy as np\n'), ((6195, 6237), 'numpy.zeros', 'np.zeros', (['(self.n_neurons, self.n_neurons)'], {}), '((self.n_neurons, self.n_neurons))\n', (6203, 6237), True, 'import numpy as np\n'), ((6308, 6332), 'numpy.zeros', 'np.zeros', (['self.n_neurons'], {}), '(self.n_neurons)\n', (6316, 6332), True, 'import numpy as np\n'), ((8055, 8107), 'numpy.linalg.norm', 'norm', (['(self._weights - sample[np.newaxis, :])'], {'axis': '(-1)'}), '(self._weights - sample[np.newaxis, :], axis=-1)\n', (8059, 8107), False, 'from numpy.linalg import norm\n'), ((8131, 8182), 'numpy.linalg.norm', 'norm', (['(self._context - self._global_context)'], {'axis': '(-1)'}), '(self._context - self._global_context, axis=-1)\n', (8135, 8182), False, 'from numpy.linalg import norm\n'), ((8523, 8543), 'numpy.argsort', 'np.argsort', (['distance'], {}), '(distance)\n', (8533, 8543), True, 'import numpy as np\n'), ((9972, 10011), 'numpy.nonzero', 'np.nonzero', (['self._connections[first, :]'], {}), '(self._connections[first, :])\n', (9982, 10011), True, 'import numpy as np\n'), ((11204, 11228), 'numpy.argmax', 'np.argmax', (['self._counter'], {}), '(self._counter)\n', (11213, 11228), True, 'import numpy as np\n'), ((11253, 11291), 'numpy.nonzero', 'np.nonzero', (['self._connections[most, :]'], {}), '(self._connections[most, :])\n', (11263, 11291), True, 'import numpy as np\n'), ((11386, 11426), 'numpy.argsort', 'np.argsort', (['self._counter[its_neighbors]'], {}), '(self._counter[its_neighbors])\n', (11396, 11426), True, 'import numpy as np\n'), ((12868, 12892), 'numpy.argmin', 'np.argmin', (['self._counter'], {}), '(self._counter)\n', (12877, 12892), True, 'import numpy as np\n'), ((14850, 14865), 'mgng.validators.repr_ndarray', 'repr_ndarray', (['X'], {}), '(X)\n', (14862, 14865), False, 'from mgng.validators import is_greater_zero, is_weight_factor, repr_ndarray\n'), ((13114, 13149), 'numpy.sum', 'np.sum', (['self._connections[least, :]'], {}), '(self._connections[least, :])\n', (13120, 13149), True, 'import numpy as np\n'), ((7021, 7054), 'numpy.sum', 'np.sum', (['self._connections'], {'axis': '(1)'}), '(self._connections, axis=1)\n', (7027, 7054), True, 'import numpy as np\n'), ((14535, 14564), 'numpy.nonzero', 'np.nonzero', (['(self._counter > 0)'], {}), '(self._counter > 0)\n', (14545, 14564), True, 'import numpy as np\n'), ((14596, 14626), 'numpy.nonzero', 'np.nonzero', (['(self._counter <= 0)'], {}), '(self._counter <= 0)\n', (14606, 14626), True, 'import numpy as np\n'), ((14098, 14119), 'numpy.max', 'np.max', (['self._counter'], {}), '(self._counter)\n', (14104, 14119), True, 'import numpy as np\n')] |
import metagraph.core.compiler as mg_compiler
from dask.delayed import delayed
from metagraph.tests.util import default_plugin_resolver, IdentityCompiler
from metagraph import translator, abstract_algorithm, concrete_algorithm
import networkx as nx
from metagraph.core.resolver import Resolver
from metagraph.core.dask.resolver import DaskResolver
from metagraph import PluginRegistry
from pytest import fixture
import pytest
import numpy as np
import dask
def test_dask_subgraph():
@delayed
def func1(x): # pragma: no cover
return x + 1
z = func1(func1(1))
subgraph = mg_compiler.DaskSubgraph(tasks=z.dask, input_keys=[], output_key=z.key)
assert len(subgraph.tasks) == 2
assert len(subgraph.input_keys) == 0
assert isinstance(subgraph.output_key, str)
def test_extract_subgraphs_noop():
@delayed
def func1(x): # pragma: no cover
return x + 1
z = func1(func1(1))
subgraphs = mg_compiler.extract_compilable_subgraphs(
z.dask, output_keys=[z.key], compiler="noexist"
)
assert len(subgraphs) == 0
def test_extract_subgraphs_singleton(res):
a = np.arange(100)
scale_func = res.algos.testing.scale
z = scale_func(a, 2.0)
# default behavior is to include compilable single node subgraphs
subgraphs = mg_compiler.extract_compilable_subgraphs(
z.__dask_graph__(), output_keys=[z.key], compiler="identity_comp"
)
assert len(subgraphs) == 1
# disable
subgraphs = mg_compiler.extract_compilable_subgraphs(
z._dsk, compiler="identity_comp", output_keys=[z.key], include_singletons=False
)
assert len(subgraphs) == 0
def test_extract_subgraphs_chain(res):
a = np.arange(100)
scale_func = res.algos.testing.scale
z = scale_func(scale_func(scale_func(a, 2.0), 3.0), 4.0)
subgraphs = mg_compiler.extract_compilable_subgraphs(
z.__dask_graph__(), output_keys=[z.key], compiler="identity_comp"
)
assert len(subgraphs) == 1
subgraph = subgraphs[0]
assert len(subgraph.tasks) == 3
# FIXME: This is zero because the input numpy array is not wrapped in its own placeholder object
assert len(subgraph.input_keys) == 0
assert subgraph.output_key == z.key
def test_extract_subgraphs_two_chains(res):
"""Two chains feeding into a combining node"""
a = np.arange(100)
scale_func = res.algos.testing.scale
z1 = scale_func(scale_func(scale_func(a, 2.0), 3.0), 4.0)
z2 = scale_func(scale_func(scale_func(a, 2.5), 3.5), 4.5)
merge = res.algos.testing.add(z1, z2)
# The merge node cannot be fused with z1 or z2 without reducing parallelism in the graph
subgraphs = mg_compiler.extract_compilable_subgraphs(
merge.__dask_graph__(),
compiler="identity_comp",
output_keys=[merge.key],
include_singletons=False, # exclude the add node
)
assert len(subgraphs) == 2
for subgraph in subgraphs:
assert len(subgraph.tasks) == 3
# FIXME: This is zero because the input numpy array is not wrapped in its own placeholder object
assert len(subgraph.input_keys) == 0
# we don't know what order the two chains will come out in
assert subgraph.output_key in (z1.key, z2.key)
# now check if we get the add node
subgraphs = mg_compiler.extract_compilable_subgraphs(
merge.__dask_graph__(),
output_keys=[merge.key],
compiler="identity_comp",
include_singletons=True,
)
assert len(subgraphs) == 3
assert merge.key in [s.output_key for s in subgraphs]
def test_extract_subgraphs_three_chains(res):
"""Two chains feeding into a third chain"""
a = np.arange(100)
scale_func = res.algos.testing.scale
z1 = scale_func(scale_func(scale_func(a, 2.0), 3.0), 4.0)
z2 = scale_func(scale_func(scale_func(a, 2.5), 3.5), 4.5)
merge = res.algos.testing.add(z1, z2)
ans = scale_func(merge, 2.8)
# The merge node cannot be fused with z1 or z2 without reducing parallelism in the graph,
# but the merge node can start the final chain
subgraphs = mg_compiler.extract_compilable_subgraphs(
ans.__dask_graph__(), output_keys=[ans.key], compiler="identity_comp"
)
assert len(subgraphs) == 3
# separate the final chain from the input chains
final_chain = None
input_chains = []
for subgraph in subgraphs:
# FIXME: key property
if subgraph.output_key == ans.key:
assert (
final_chain is None
), "found more than one subgraph with key of final chain"
final_chain = subgraph
else:
input_chains.append(subgraph)
# final chain tests
assert len(final_chain.tasks) == 2
assert len(final_chain.input_keys) == 2
for input_key in final_chain.input_keys:
# FIXME: key property
assert input_key in (z1.key, z2.key)
assert (
final_chain.output_key == ans.key
) # true by construction, checked here for completeness
# input_chain tests
for subgraph in input_chains:
assert len(subgraph.tasks) == 3
# FIXME: This is zero because the input numpy array is not wrapped in its own placeholder object
assert len(subgraph.input_keys) == 0
# we don't know what order the two chains will come out in
# FIXME: key property
assert subgraph.output_key in (z1.key, z2.key)
def test_extract_subgraphs_diamond(res):
a = np.arange(100)
scale_func = res.algos.testing.scale
top_node = res.algos.testing.offset(a, offset=2.0)
left_node = scale_func(top_node, 3.0)
right_node = scale_func(top_node, 5.0)
bottom_node = res.algos.testing.add(left_node, right_node)
result_node = bottom_node
subgraphs = mg_compiler.extract_compilable_subgraphs(
result_node.__dask_graph__(),
output_keys=[result_node.key],
compiler="identity_comp",
)
assert len(subgraphs) == 4
key_to_node = {
top_node.key: top_node,
left_node.key: left_node,
right_node.key: right_node,
bottom_node.key: bottom_node,
}
node_key_to_input_node_keys = {
top_node.key: set(),
left_node.key: {top_node.key},
right_node.key: {top_node.key},
bottom_node.key: {left_node.key, right_node.key},
}
for subgraph in subgraphs:
expected_input_node_keys = node_key_to_input_node_keys[subgraph.output_key]
assert set(subgraph.input_keys) == expected_input_node_keys
subgraphs = mg_compiler.extract_compilable_subgraphs(
result_node.__dask_graph__(),
compiler="identity_comp",
output_keys=[result_node.key],
include_singletons=False,
)
assert len(subgraphs) == 0
def test_compile_subgraphs_noop(res):
a = res.wrappers.NodeSet.NumpyNodeSet(np.arange(100))
compiler = res.compilers["identity_comp"]
optimized_dsk = mg_compiler.compile_subgraphs(
a.__dask_graph__(), output_keys=[a.key], compiler=compiler
)
assert len(optimized_dsk) == 1
assert a.key in optimized_dsk
def test_compile_subgraphs_three_chains(res):
"""Compile Y-shaped graph"""
a = np.arange(100)
scale_func = res.algos.testing.scale
z1 = scale_func(scale_func(scale_func(a, 2.0), 3.0), 4.0)
z2 = scale_func(scale_func(scale_func(a, 2.5), 3.5), 4.5)
merge = res.algos.testing.add(z1, z2)
ans = scale_func(merge, 2.8)
compiler = res.compilers["identity_comp"]
optimized_dsk = mg_compiler.compile_subgraphs(
ans.__dask_graph__(), output_keys=[ans.key], compiler=compiler
)
assert len(optimized_dsk) == 3
assert z1.key in optimized_dsk
assert z2.key in optimized_dsk
assert ans.key in optimized_dsk
optimized_result = dask.core.get(optimized_dsk, ans.key)
unoptimized_result = ans.compute(optimize_graph=False)
numpy_result = 2.8 * ((a * 2 * 3 * 4) + (a * 2.5 * 3.5 * 4.5))
np.testing.assert_array_equal(optimized_result, numpy_result)
np.testing.assert_array_equal(unoptimized_result, numpy_result)
def test_compile_subgraph_kwargs(res):
"""Compile subgraph with task that has kwargs"""
a = np.arange(100)
offset_func = res.algos.testing.offset
z = offset_func(offset_func(a=a, offset=1.0), offset=2.0)
compiler = res.compilers["identity_comp"]
optimized_dsk = mg_compiler.compile_subgraphs(
z.__dask_graph__(), output_keys=[z.key], compiler=compiler
)
assert len(optimized_dsk) == 1
optimized_result = dask.core.get(optimized_dsk, z.key)
unoptimized_result = z.compute(optimize_graph=False)
numpy_result = a + 1 + 2
np.testing.assert_array_equal(optimized_result, numpy_result)
np.testing.assert_array_equal(unoptimized_result, numpy_result)
def test_extract_subgraphs_multiple_outputs(res):
a = np.arange(100)
scale_func = res.algos.testing.scale
x = scale_func(a, 2.0)
y = scale_func(x, 3.0)
z = scale_func(y, 4.0)
subgraphs = mg_compiler.extract_compilable_subgraphs(
z.__dask_graph__(), output_keys=[z.key, y.key], compiler="identity_comp"
)
assert len(subgraphs) == 2
for subgraph in subgraphs:
if subgraph.output_key == z.key:
assert len(subgraph.tasks) == 1
assert subgraph.input_keys == [y.key]
elif subgraph.output_key == y.key:
assert len(subgraph.tasks) == 2
# FIXME: This is zero because the input numpy array is not wrapped in its own placeholder object
assert subgraph.input_keys == []
else:
assert Fail, f"unexpected subgraph with output key {subgraph.output_key}"
def test_compile_subgraphs_multiple_outputs(res):
a = np.arange(100)
scale_func = res.algos.testing.scale
x = scale_func(a, 2.0)
y = scale_func(x, 3.0)
z = scale_func(y, 4.0)
compiler = res.compilers["identity_comp"]
optimized_dsk = mg_compiler.compile_subgraphs(
z.__dask_graph__(), output_keys=[z.key, y.key], compiler=compiler
)
assert len(optimized_dsk) == 2
z_comp, y_comp = dask.core.get(optimized_dsk, [z.key, y.key])
np.testing.assert_array_equal(z_comp, a * 2 * 3 * 4)
np.testing.assert_array_equal(y_comp, a * 2 * 3)
def test_optimize(res):
a = np.arange(100)
scale_func = res.algos.testing.scale
x = scale_func(a, 2.0)
y = scale_func(x, 3.0)
z = scale_func(y, 4.0)
compiler = res.compilers["identity_comp"]
optimized_dsk = mg_compiler.optimize(
z.__dask_graph__(), output_keys=[z.key, y.key], compiler=compiler
)
assert len(optimized_dsk) == 2
def test_optimize_cull(res):
a = np.arange(100)
scale_func = res.algos.testing.scale
z1 = scale_func(scale_func(scale_func(a, 2.0), 3.0), 4.0)
z2 = scale_func(scale_func(scale_func(a, 2.5), 3.5), 4.5)
merge = res.algos.testing.add(z1, z2)
ans = scale_func(merge, 2.8)
compiler = res.compilers["identity_comp"]
optimized_dsk = mg_compiler.optimize(
ans.__dask_graph__(), output_keys=[z2.key], compiler=compiler
)
assert len(optimized_dsk) == 1
def test_automatic_optimize(res):
a = np.arange(100)
scale_func = res.algos.testing.scale
x = scale_func(a, 2.0)
y = scale_func(x, 3.0)
z = scale_func(y, 4.0)
compiler = res.compilers["identity_comp"]
# expect 1 compiled chain
compiler.clear_trace()
np.testing.assert_array_equal(z.compute(), a * 2 * 3 * 4)
assert len(compiler.compile_subgraph_calls) == 1
# expect 2 compiled chains
compiler.clear_trace()
result = dask.compute(z, y)
np.testing.assert_array_equal(result[0], a * 2 * 3 * 4)
np.testing.assert_array_equal(result[1], a * 2 * 3)
assert len(compiler.compile_subgraph_calls) == 2
# expect no compiled chains
compiler.clear_trace()
np.testing.assert_array_equal(z.compute(optimize_graph=False), a * 2 * 3 * 4)
assert len(compiler.compile_subgraph_calls) == 0
@fixture
def res():
from metagraph.plugins.core.types import Vector
from metagraph.plugins.numpy.types import NumpyVectorType
@abstract_algorithm("testing.add")
def testing_add(a: Vector, b: Vector) -> Vector: # pragma: no cover
pass
@concrete_algorithm("testing.add", compiler="identity_comp")
def compiled_add(a: NumpyVectorType, b: NumpyVectorType) -> NumpyVectorType:
return a + b
@abstract_algorithm("testing.scale")
def testing_scale(a: Vector, scale: float) -> Vector: # pragma: no cover
pass
@concrete_algorithm("testing.scale", compiler="identity_comp")
def compiled_scale(a: NumpyVectorType, scale: float) -> NumpyVectorType:
return a * scale
@abstract_algorithm("testing.offset")
def testing_offset(a: Vector, *, offset: float) -> Vector: # pragma: no cover
pass
@concrete_algorithm("testing.offset", compiler="identity_comp")
def compiled_offset(a: NumpyVectorType, *, offset: float) -> NumpyVectorType:
return a + offset
registry = PluginRegistry("test_subgraphs_plugin")
registry.register(testing_add)
registry.register(compiled_add)
registry.register(testing_scale)
registry.register(compiled_scale)
registry.register(testing_offset)
registry.register(compiled_offset)
registry.register(IdentityCompiler())
resolver = Resolver()
resolver.load_plugins_from_environment()
resolver.register(registry.plugins)
return DaskResolver(resolver)
| [
"dask.core.get",
"numpy.arange",
"dask.compute",
"metagraph.core.compiler.DaskSubgraph",
"metagraph.core.compiler.extract_compilable_subgraphs",
"metagraph.core.dask.resolver.DaskResolver",
"metagraph.core.resolver.Resolver",
"metagraph.abstract_algorithm",
"metagraph.tests.util.IdentityCompiler",
... | [((598, 669), 'metagraph.core.compiler.DaskSubgraph', 'mg_compiler.DaskSubgraph', ([], {'tasks': 'z.dask', 'input_keys': '[]', 'output_key': 'z.key'}), '(tasks=z.dask, input_keys=[], output_key=z.key)\n', (622, 669), True, 'import metagraph.core.compiler as mg_compiler\n'), ((946, 1039), 'metagraph.core.compiler.extract_compilable_subgraphs', 'mg_compiler.extract_compilable_subgraphs', (['z.dask'], {'output_keys': '[z.key]', 'compiler': '"""noexist"""'}), "(z.dask, output_keys=[z.key],\n compiler='noexist')\n", (986, 1039), True, 'import metagraph.core.compiler as mg_compiler\n'), ((1134, 1148), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (1143, 1148), True, 'import numpy as np\n'), ((1488, 1613), 'metagraph.core.compiler.extract_compilable_subgraphs', 'mg_compiler.extract_compilable_subgraphs', (['z._dsk'], {'compiler': '"""identity_comp"""', 'output_keys': '[z.key]', 'include_singletons': '(False)'}), "(z._dsk, compiler='identity_comp',\n output_keys=[z.key], include_singletons=False)\n", (1528, 1613), True, 'import metagraph.core.compiler as mg_compiler\n'), ((1704, 1718), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (1713, 1718), True, 'import numpy as np\n'), ((2342, 2356), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (2351, 2356), True, 'import numpy as np\n'), ((3683, 3697), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (3692, 3697), True, 'import numpy as np\n'), ((5475, 5489), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (5484, 5489), True, 'import numpy as np\n'), ((7199, 7213), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (7208, 7213), True, 'import numpy as np\n'), ((7795, 7832), 'dask.core.get', 'dask.core.get', (['optimized_dsk', 'ans.key'], {}), '(optimized_dsk, ans.key)\n', (7808, 7832), False, 'import dask\n'), ((7963, 8024), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['optimized_result', 'numpy_result'], {}), '(optimized_result, numpy_result)\n', (7992, 8024), True, 'import numpy as np\n'), ((8029, 8092), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['unoptimized_result', 'numpy_result'], {}), '(unoptimized_result, numpy_result)\n', (8058, 8092), True, 'import numpy as np\n'), ((8195, 8209), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (8204, 8209), True, 'import numpy as np\n'), ((8546, 8581), 'dask.core.get', 'dask.core.get', (['optimized_dsk', 'z.key'], {}), '(optimized_dsk, z.key)\n', (8559, 8581), False, 'import dask\n'), ((8672, 8733), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['optimized_result', 'numpy_result'], {}), '(optimized_result, numpy_result)\n', (8701, 8733), True, 'import numpy as np\n'), ((8738, 8801), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['unoptimized_result', 'numpy_result'], {}), '(unoptimized_result, numpy_result)\n', (8767, 8801), True, 'import numpy as np\n'), ((8862, 8876), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (8871, 8876), True, 'import numpy as np\n'), ((9743, 9757), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (9752, 9757), True, 'import numpy as np\n'), ((10114, 10158), 'dask.core.get', 'dask.core.get', (['optimized_dsk', '[z.key, y.key]'], {}), '(optimized_dsk, [z.key, y.key])\n', (10127, 10158), False, 'import dask\n'), ((10163, 10215), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['z_comp', '(a * 2 * 3 * 4)'], {}), '(z_comp, a * 2 * 3 * 4)\n', (10192, 10215), True, 'import numpy as np\n'), ((10220, 10268), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['y_comp', '(a * 2 * 3)'], {}), '(y_comp, a * 2 * 3)\n', (10249, 10268), True, 'import numpy as np\n'), ((10303, 10317), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (10312, 10317), True, 'import numpy as np\n'), ((10683, 10697), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (10692, 10697), True, 'import numpy as np\n'), ((11182, 11196), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (11191, 11196), True, 'import numpy as np\n'), ((11611, 11629), 'dask.compute', 'dask.compute', (['z', 'y'], {}), '(z, y)\n', (11623, 11629), False, 'import dask\n'), ((11634, 11689), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result[0]', '(a * 2 * 3 * 4)'], {}), '(result[0], a * 2 * 3 * 4)\n', (11663, 11689), True, 'import numpy as np\n'), ((11694, 11745), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result[1]', '(a * 2 * 3)'], {}), '(result[1], a * 2 * 3)\n', (11723, 11745), True, 'import numpy as np\n'), ((12136, 12169), 'metagraph.abstract_algorithm', 'abstract_algorithm', (['"""testing.add"""'], {}), "('testing.add')\n", (12154, 12169), False, 'from metagraph import translator, abstract_algorithm, concrete_algorithm\n'), ((12262, 12321), 'metagraph.concrete_algorithm', 'concrete_algorithm', (['"""testing.add"""'], {'compiler': '"""identity_comp"""'}), "('testing.add', compiler='identity_comp')\n", (12280, 12321), False, 'from metagraph import translator, abstract_algorithm, concrete_algorithm\n'), ((12430, 12465), 'metagraph.abstract_algorithm', 'abstract_algorithm', (['"""testing.scale"""'], {}), "('testing.scale')\n", (12448, 12465), False, 'from metagraph import translator, abstract_algorithm, concrete_algorithm\n'), ((12563, 12624), 'metagraph.concrete_algorithm', 'concrete_algorithm', (['"""testing.scale"""'], {'compiler': '"""identity_comp"""'}), "('testing.scale', compiler='identity_comp')\n", (12581, 12624), False, 'from metagraph import translator, abstract_algorithm, concrete_algorithm\n'), ((12733, 12769), 'metagraph.abstract_algorithm', 'abstract_algorithm', (['"""testing.offset"""'], {}), "('testing.offset')\n", (12751, 12769), False, 'from metagraph import translator, abstract_algorithm, concrete_algorithm\n'), ((12872, 12934), 'metagraph.concrete_algorithm', 'concrete_algorithm', (['"""testing.offset"""'], {'compiler': '"""identity_comp"""'}), "('testing.offset', compiler='identity_comp')\n", (12890, 12934), False, 'from metagraph import translator, abstract_algorithm, concrete_algorithm\n'), ((13059, 13098), 'metagraph.PluginRegistry', 'PluginRegistry', (['"""test_subgraphs_plugin"""'], {}), "('test_subgraphs_plugin')\n", (13073, 13098), False, 'from metagraph import PluginRegistry\n'), ((13380, 13390), 'metagraph.core.resolver.Resolver', 'Resolver', ([], {}), '()\n', (13388, 13390), False, 'from metagraph.core.resolver import Resolver\n'), ((13488, 13510), 'metagraph.core.dask.resolver.DaskResolver', 'DaskResolver', (['resolver'], {}), '(resolver)\n', (13500, 13510), False, 'from metagraph.core.dask.resolver import DaskResolver\n'), ((6853, 6867), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (6862, 6867), True, 'import numpy as np\n'), ((13344, 13362), 'metagraph.tests.util.IdentityCompiler', 'IdentityCompiler', ([], {}), '()\n', (13360, 13362), False, 'from metagraph.tests.util import default_plugin_resolver, IdentityCompiler\n')] |
import numpy as np
from vimms.Evaluation import evaluate_simulated_env, EvaluationData
from vimms_gym.common import METHOD_RANDOM, METHOD_FULLSCAN, METHOD_TOPN, METHOD_PPO, METHOD_DQN
from vimms_gym.env import DDAEnv
from vimms_gym.policy import random_policy, fullscan_policy, topN_policy, get_ppo_action_probs
class Episode():
def __init__(self, initial_obs):
self.env = None
self.rewards = []
self.observations = [initial_obs]
self.actions = []
self.action_probs = []
self.infos = []
self.num_steps = 0
def add_step_data(self, action, action_probs, obs, reward, info):
self.actions.append(action)
self.action_probs.append(action_probs)
self.observations.append(obs)
self.rewards.append(reward)
self.infos.append(info)
self.num_steps += 1
def get_step_data(self, i):
return {
'state': self.observations[i],
'reward': self.rewards[i],
'action': self.actions[i],
'action_prob': self.action_probs[i],
'info': self.infos[i]
}
def evaluate_environment(self, env, intensity_threshold):
vimms_env = env.vimms_env
self.eval_data = EvaluationData(vimms_env)
self.eval_res = evaluate(vimms_env, intensity_threshold)
return self.eval_res
def get_total_rewards(self):
return np.sum(self.rewards)
def evaluate(env, intensity_threshold):
# env can be either a DDAEnv or a ViMMS' Environment object
try:
vimms_env = env.vimms_env
except AttributeError:
vimms_env = env
# call vimms codes to compute various statistics
vimms_env_res = evaluate_simulated_env(vimms_env)
count_fragmented = np.count_nonzero(vimms_env_res['times_fragmented'])
count_ms1 = len(vimms_env.controller.scans[1])
count_ms2 = len(vimms_env.controller.scans[2])
ms1_ms2_ratio = float(count_ms1) / count_ms2
efficiency = float(count_fragmented) / count_ms2
# get all base chemicals used as input to the mass spec
all_chems = set(
chem.get_original_parent() for chem in vimms_env.mass_spec.chemicals
)
# assume all base chemicals are unfragmented
fragmented_intensities = {chem: 0.0 for chem in all_chems}
# loop through ms2 scans, getting frag_events
for ms2_scan in vimms_env.controller.scans[2]:
frag_events = ms2_scan.fragevent
if frag_events is not None: # if a chemical has been fragmented ...
# get the frag event for this scan
# TODO: assume only 1 chemical has been fragmented
# works for DDA but not for DIA
event = frag_events[0]
# get the base chemical that was fragmented
base_chem = event.chem.get_original_parent()
# store the max intensity of fragmentation for this base chem
parent_intensity = event.parents_intensity[0]
fragmented_intensities[base_chem] = max(
parent_intensity, fragmented_intensities[base_chem])
TP = 0 # chemicals hit correctly (above threshold)
FP = 0 # chemicals hit incorrectly (below threshold)
FN = 0 # chemicals not hit
for chem in fragmented_intensities:
frag_int = fragmented_intensities[chem]
if frag_int > 0: # chemical was fragmented ...
if fragmented_intensities[chem] > (intensity_threshold * chem.max_intensity):
TP += 1 # above threshold
else:
FP += 1 # below threshold
else:
FN += 1 # chemical was not fragmented
# compute precision, recall, f1
try:
precision = TP / (TP + FP)
except ZeroDivisionError:
precision = 0.0
try:
recall = TP / (TP + FN)
except ZeroDivisionError:
precision = 0.0
try:
f1 = 2 * (recall * precision) / (recall + precision)
except ZeroDivisionError:
f1 = 0.0
eval_res = {
'coverage_prop': '%.3f' % vimms_env_res['coverage_proportion'][0],
'intensity_prop': '%.3f' % vimms_env_res['intensity_proportion'][0],
'ms1/ms2 ratio': '%.3f' % ms1_ms2_ratio,
'efficiency': '%.3f' % efficiency,
'TP': '%d' % TP,
'FP': '%d' % FP,
'FN': '%d' % FN,
'precision': '%.3f' % precision,
'recall': '%.3f' % recall,
'f1': '%.3f' % f1
}
return eval_res
def run_method(env_name, env_params, max_peaks, chem_list, method, out_dir,
N=10, min_ms1_intensity=5000, model=None,
print_eval=False, print_reward=False, mzml_prefix=None,
intensity_threshold=0.5):
if method in [METHOD_DQN, METHOD_PPO]:
assert model is not None
# to store all results across all loop of chem_list
all_episodic_results = []
for i in range(len(chem_list)):
chems = chem_list[i]
if print_reward:
print(f'\nEpisode {i} ({len(chems)} chemicals)')
env = DDAEnv(max_peaks, env_params)
obs = env.reset(chems=chems)
done = False
# lists to store episodic results
episode = Episode(obs)
while not done: # repeat until episode is done
# select an action depending on the observation and method
action, action_probs = pick_action(
method, obs, model, env.features, N, min_ms1_intensity)
# make one step through the simulation
obs, reward, done, info = env.step(action)
# store new episodic information
if obs is not None:
episode.add_step_data(action, action_probs, obs, reward, info)
if print_reward and episode.num_steps % 500 == 0:
print('steps\t', episode.num_steps, '\ttotal rewards\t', episode.get_total_rewards())
# if episode is finished, break
if done:
break
if print_reward:
print(
f'Finished after {episode.num_steps} timesteps with '
f'total reward {episode.get_total_rewards()}')
# save mzML and other info useful for evaluation of the ViMMS environment
if mzml_prefix is None:
out_file = '%s_%d.mzML' % (method, i)
else:
out_file = '%s_%s_%d.mzML' % (mzml_prefix, method, i)
env.write_mzML(out_dir, out_file)
# environment will be evaluated here
eval_res = episode.evaluate_environment(env, intensity_threshold)
if print_eval:
print(eval_res)
all_episodic_results.append(episode)
return all_episodic_results
def pick_action(method, obs, model, features, N, min_ms1_intensity):
action_probs = []
if method == METHOD_RANDOM:
action = random_policy(obs)
elif method == METHOD_FULLSCAN:
action = fullscan_policy(obs)
elif method == METHOD_TOPN:
action = topN_policy(obs, features, N, min_ms1_intensity)
elif method == METHOD_PPO:
action, _states = model.predict(obs, deterministic=True)
# action = best_ppo_policy(obs, model)
action_probs = get_ppo_action_probs(model, obs)
elif method == METHOD_DQN:
action, _states = model.predict(obs, deterministic=True)
return action, action_probs
| [
"vimms_gym.env.DDAEnv",
"vimms_gym.policy.topN_policy",
"vimms.Evaluation.EvaluationData",
"vimms_gym.policy.fullscan_policy",
"numpy.count_nonzero",
"numpy.sum",
"vimms_gym.policy.get_ppo_action_probs",
"vimms_gym.policy.random_policy",
"vimms.Evaluation.evaluate_simulated_env"
] | [((1707, 1740), 'vimms.Evaluation.evaluate_simulated_env', 'evaluate_simulated_env', (['vimms_env'], {}), '(vimms_env)\n', (1729, 1740), False, 'from vimms.Evaluation import evaluate_simulated_env, EvaluationData\n'), ((1764, 1815), 'numpy.count_nonzero', 'np.count_nonzero', (["vimms_env_res['times_fragmented']"], {}), "(vimms_env_res['times_fragmented'])\n", (1780, 1815), True, 'import numpy as np\n'), ((1243, 1268), 'vimms.Evaluation.EvaluationData', 'EvaluationData', (['vimms_env'], {}), '(vimms_env)\n', (1257, 1268), False, 'from vimms.Evaluation import evaluate_simulated_env, EvaluationData\n'), ((1412, 1432), 'numpy.sum', 'np.sum', (['self.rewards'], {}), '(self.rewards)\n', (1418, 1432), True, 'import numpy as np\n'), ((5018, 5047), 'vimms_gym.env.DDAEnv', 'DDAEnv', (['max_peaks', 'env_params'], {}), '(max_peaks, env_params)\n', (5024, 5047), False, 'from vimms_gym.env import DDAEnv\n'), ((6802, 6820), 'vimms_gym.policy.random_policy', 'random_policy', (['obs'], {}), '(obs)\n', (6815, 6820), False, 'from vimms_gym.policy import random_policy, fullscan_policy, topN_policy, get_ppo_action_probs\n'), ((6874, 6894), 'vimms_gym.policy.fullscan_policy', 'fullscan_policy', (['obs'], {}), '(obs)\n', (6889, 6894), False, 'from vimms_gym.policy import random_policy, fullscan_policy, topN_policy, get_ppo_action_probs\n'), ((6944, 6992), 'vimms_gym.policy.topN_policy', 'topN_policy', (['obs', 'features', 'N', 'min_ms1_intensity'], {}), '(obs, features, N, min_ms1_intensity)\n', (6955, 6992), False, 'from vimms_gym.policy import random_policy, fullscan_policy, topN_policy, get_ppo_action_probs\n'), ((7159, 7191), 'vimms_gym.policy.get_ppo_action_probs', 'get_ppo_action_probs', (['model', 'obs'], {}), '(model, obs)\n', (7179, 7191), False, 'from vimms_gym.policy import random_policy, fullscan_policy, topN_policy, get_ppo_action_probs\n')] |
import numpy as np
from ConfigSpace.hyperparameters import (CategoricalHyperparameter,
OrdinalHyperparameter, Constant,
UniformFloatHyperparameter,
UniformIntegerHyperparameter)
def get_types(config_space, instance_features=None):
"""TODO"""
# Extract types vector for rf from config space and the bounds
types = [0] * len(config_space.get_hyperparameters())
bounds = [(np.nan, np.nan)] * len(types)
for i, param in enumerate(config_space.get_hyperparameters()):
parents = config_space.get_parents_of(param.name)
if len(parents) == 0:
can_be_inactive = False
else:
can_be_inactive = True
if isinstance(param, (CategoricalHyperparameter)):
n_cats = len(param.choices)
if can_be_inactive:
n_cats = len(param.choices) + 1
types[i] = n_cats
bounds[i] = (int(n_cats), np.nan)
elif isinstance(param, (OrdinalHyperparameter)):
n_cats = len(param.sequence)
types[i] = 0
if can_be_inactive:
bounds[i] = (0, int(n_cats))
else:
bounds[i] = (0, int(n_cats) - 1)
elif isinstance(param, Constant):
# for constants we simply set types to 0 which makes it a numerical
# parameter
if can_be_inactive:
bounds[i] = (2, np.nan)
types[i] = 2
else:
bounds[i] = (0, np.nan)
types[i] = 0
# and we leave the bounds to be 0 for now
elif isinstance(param, UniformFloatHyperparameter):
# Are sampled on the unit hypercube thus the bounds
# are always 0.0, 1.0
if can_be_inactive:
bounds[i] = (-1.0, 1.0)
else:
bounds[i] = (0, 1.0)
elif isinstance(param, UniformIntegerHyperparameter):
if can_be_inactive:
bounds[i] = (-1.0, 1.0)
else:
bounds[i] = (0, 1.0)
elif not isinstance(param, (UniformFloatHyperparameter,
UniformIntegerHyperparameter,
OrdinalHyperparameter,
CategoricalHyperparameter)):
raise TypeError("Unknown hyperparameter type %s" % type(param))
if instance_features is not None:
types = types + [0] * instance_features.shape[1]
types = np.array(types, dtype=np.uint)
bounds = np.array(bounds, dtype=object)
return types, bounds | [
"numpy.array"
] | [((2608, 2638), 'numpy.array', 'np.array', (['types'], {'dtype': 'np.uint'}), '(types, dtype=np.uint)\n', (2616, 2638), True, 'import numpy as np\n'), ((2652, 2682), 'numpy.array', 'np.array', (['bounds'], {'dtype': 'object'}), '(bounds, dtype=object)\n', (2660, 2682), True, 'import numpy as np\n')] |
from unittest.mock import MagicMock
import pytest
import numpy as np
import scipy as sp
import scipy.stats
import tensorflow as tf
from decompose.likelihoods.normal2dLikelihood import Normal2dLikelihood
from decompose.tests.fixtures import device, dtype
from decompose.distributions.distribution import UpdateType
@pytest.fixture(scope="module",
params=[0, 1])
def f(request):
f = request.param
return(f)
@pytest.fixture(scope="module",
params=[UpdateType.ALL, UpdateType.ONLYLATENTS])
def updateType(request):
updateType = request.param
return(updateType)
def test_residuals(device, dtype):
npdtype = dtype.as_numpy_dtype
M, K, tau = (20, 30), 3, 0.1
U = (tf.constant(np.random.normal(size=(K, M[0])).astype(npdtype)),
tf.constant(np.random.normal(size=(K, M[1])).astype(npdtype)))
noise = np.random.normal(size=M).astype(npdtype)
data = tf.matmul(tf.transpose(U[0]), U[1]) + tf.constant(noise)
lh = Normal2dLikelihood(M=M, K=K, tau=tau, dtype=dtype)
lh.init(data=data)
r = lh.residuals(U, data)
assert(r.dtype == dtype)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
npr = sess.run(r)
assert(np.allclose(noise.flatten(), npr, atol=1e-5, rtol=1e-5))
tf.reset_default_graph()
def test_loss(device, dtype):
npdtype = dtype.as_numpy_dtype
M, K, tau = (20, 30), 3, 0.1
U = (tf.constant(np.random.normal(size=(K, M[0])).astype(npdtype)),
tf.constant(np.random.normal(size=(K, M[1])).astype(npdtype)))
noise = np.random.normal(size=M).astype(npdtype)
data = tf.matmul(tf.transpose(U[0]), U[1]) + tf.constant(noise)
lh = Normal2dLikelihood(M=M, K=K, tau=tau, dtype=dtype)
lh.init(data=data)
loss = lh.loss(U, data)
assert(loss.dtype == dtype)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
nploss = sess.run(loss)
assert(np.allclose(np.sum(noise**2), nploss, atol=1e-5, rtol=1e-5))
tf.reset_default_graph()
def test_llh(device, dtype):
npdtype = dtype.as_numpy_dtype
M, K, tau = (20, 30), 3, 0.1
U = (tf.constant(np.random.normal(size=(K, M[0])).astype(npdtype)),
tf.constant(np.random.normal(size=(K, M[1])).astype(npdtype)))
noise = np.random.normal(size=M).astype(npdtype)
data = tf.matmul(tf.transpose(U[0]), U[1]) + tf.constant(noise)
lh = Normal2dLikelihood(M=M, K=K, tau=tau, dtype=dtype)
lh.init(data=data)
llh = lh.llh(U, data)
assert(llh.dtype == dtype)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
npllh = sess.run(llh)
llhgt = np.sum(sp.stats.norm(loc=0., scale=1./np.sqrt(tau)).logpdf(noise))
assert(np.allclose(llhgt, npllh, atol=1e-5, rtol=1e-5))
tf.reset_default_graph()
def test_prepVars(device, f, dtype):
npdtype = dtype.as_numpy_dtype
M, K, tau = (20, 30), 3, 0.1
npU = (np.random.normal(size=(K, M[0])).astype(npdtype),
np.random.normal(size=(K, M[1])).astype(npdtype))
U = (tf.constant(npU[0]), tf.constant(npU[1]))
npnoise = np.random.normal(size=M).astype(npdtype)
npdata = np.dot(npU[0].T, npU[1]) + npnoise
data = tf.constant(npdata, dtype=dtype)
lh = Normal2dLikelihood(M=M, K=K, tau=tau, dtype=dtype)
lh.init(data=data)
A, B, alpha = lh.prepVars(f, U, data)
assert(A.dtype == dtype)
assert(B.dtype == dtype)
assert(alpha.dtype.base_dtype == dtype)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
npA, npB, npalpha = sess.run([A, B, alpha])
if f == 0:
Agt = np.dot(npdata, npU[1].T)
Bgt = np.dot(npU[1], npU[1].T)
assert(np.allclose(Agt, npA, atol=1e-5, rtol=1e-5))
assert(np.allclose(Bgt, npB, atol=1e-5, rtol=1e-5))
assert(np.allclose(tau, npalpha, atol=1e-5, rtol=1e-5))
if f == 1:
Agt = np.dot(npdata.T, npU[0].T)
Bgt = np.dot(npU[0], npU[0].T)
assert(np.allclose(Agt, npA, atol=1e-5, rtol=1e-5))
assert(np.allclose(Bgt, npB, atol=1e-5, rtol=1e-5))
assert(np.allclose(tau, npalpha, atol=1e-5, rtol=1e-5))
tf.reset_default_graph()
def test_update(device, f, updateType, dtype):
npdtype = dtype.as_numpy_dtype
M, K, tau = (20, 30), 3, 0.1
npU = (np.random.normal(size=(K, M[0])).astype(npdtype),
np.random.normal(size=(K, M[1])).astype(npdtype))
U = (tf.constant(npU[0]), tf.constant(npU[1]))
npnoise = np.random.normal(size=M).astype(npdtype)
npdata = np.dot(npU[0].T, npU[1]) + npnoise
data = tf.constant(npdata, dtype=dtype)
lh = Normal2dLikelihood(M=M, K=K, tau=tau, updateType=updateType)
lh.init(data=data)
lh.noiseDistribution.update = MagicMock()
residuals = tf.ones_like(data)
lh.residuals = MagicMock(return_value=residuals)
lh.update(U, data)
if updateType == UpdateType.ALL:
lh.residuals.assert_called_once()
lh.noiseDistribution.update.assert_called_once()
else:
lh.residuals.assert_not_called()
lh.noiseDistribution.update.assert_not_called()
tf.reset_default_graph()
| [
"numpy.random.normal",
"numpy.allclose",
"tensorflow.reset_default_graph",
"numpy.sqrt",
"tensorflow.transpose",
"unittest.mock.MagicMock",
"tensorflow.Session",
"tensorflow.global_variables_initializer",
"numpy.sum",
"numpy.dot",
"tensorflow.constant",
"tensorflow.ones_like",
"pytest.fixtur... | [((318, 363), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'params': '[0, 1]'}), "(scope='module', params=[0, 1])\n", (332, 363), False, 'import pytest\n'), ((435, 514), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'params': '[UpdateType.ALL, UpdateType.ONLYLATENTS]'}), "(scope='module', params=[UpdateType.ALL, UpdateType.ONLYLATENTS])\n", (449, 514), False, 'import pytest\n'), ((991, 1041), 'decompose.likelihoods.normal2dLikelihood.Normal2dLikelihood', 'Normal2dLikelihood', ([], {'M': 'M', 'K': 'K', 'tau': 'tau', 'dtype': 'dtype'}), '(M=M, K=K, tau=tau, dtype=dtype)\n', (1009, 1041), False, 'from decompose.likelihoods.normal2dLikelihood import Normal2dLikelihood\n'), ((1309, 1333), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1331, 1333), True, 'import tensorflow as tf\n'), ((1709, 1759), 'decompose.likelihoods.normal2dLikelihood.Normal2dLikelihood', 'Normal2dLikelihood', ([], {'M': 'M', 'K': 'K', 'tau': 'tau', 'dtype': 'dtype'}), '(M=M, K=K, tau=tau, dtype=dtype)\n', (1727, 1759), False, 'from decompose.likelihoods.normal2dLikelihood import Normal2dLikelihood\n'), ((2038, 2062), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2060, 2062), True, 'import tensorflow as tf\n'), ((2437, 2487), 'decompose.likelihoods.normal2dLikelihood.Normal2dLikelihood', 'Normal2dLikelihood', ([], {'M': 'M', 'K': 'K', 'tau': 'tau', 'dtype': 'dtype'}), '(M=M, K=K, tau=tau, dtype=dtype)\n', (2455, 2487), False, 'from decompose.likelihoods.normal2dLikelihood import Normal2dLikelihood\n'), ((2775, 2824), 'numpy.allclose', 'np.allclose', (['llhgt', 'npllh'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(llhgt, npllh, atol=1e-05, rtol=1e-05)\n', (2786, 2824), True, 'import numpy as np\n'), ((2828, 2852), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2850, 2852), True, 'import tensorflow as tf\n'), ((3247, 3279), 'tensorflow.constant', 'tf.constant', (['npdata'], {'dtype': 'dtype'}), '(npdata, dtype=dtype)\n', (3258, 3279), True, 'import tensorflow as tf\n'), ((3290, 3340), 'decompose.likelihoods.normal2dLikelihood.Normal2dLikelihood', 'Normal2dLikelihood', ([], {'M': 'M', 'K': 'K', 'tau': 'tau', 'dtype': 'dtype'}), '(M=M, K=K, tau=tau, dtype=dtype)\n', (3308, 3340), False, 'from decompose.likelihoods.normal2dLikelihood import Normal2dLikelihood\n'), ((4207, 4231), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4229, 4231), True, 'import tensorflow as tf\n'), ((4636, 4668), 'tensorflow.constant', 'tf.constant', (['npdata'], {'dtype': 'dtype'}), '(npdata, dtype=dtype)\n', (4647, 4668), True, 'import tensorflow as tf\n'), ((4679, 4739), 'decompose.likelihoods.normal2dLikelihood.Normal2dLikelihood', 'Normal2dLikelihood', ([], {'M': 'M', 'K': 'K', 'tau': 'tau', 'updateType': 'updateType'}), '(M=M, K=K, tau=tau, updateType=updateType)\n', (4697, 4739), False, 'from decompose.likelihoods.normal2dLikelihood import Normal2dLikelihood\n'), ((4797, 4808), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4806, 4808), False, 'from unittest.mock import MagicMock\n'), ((4825, 4843), 'tensorflow.ones_like', 'tf.ones_like', (['data'], {}), '(data)\n', (4837, 4843), True, 'import tensorflow as tf\n'), ((4863, 4896), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'residuals'}), '(return_value=residuals)\n', (4872, 4896), False, 'from unittest.mock import MagicMock\n'), ((5169, 5193), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (5191, 5193), True, 'import tensorflow as tf\n'), ((962, 980), 'tensorflow.constant', 'tf.constant', (['noise'], {}), '(noise)\n', (973, 980), True, 'import tensorflow as tf\n'), ((1136, 1148), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1146, 1148), True, 'import tensorflow as tf\n'), ((1680, 1698), 'tensorflow.constant', 'tf.constant', (['noise'], {}), '(noise)\n', (1691, 1698), True, 'import tensorflow as tf\n'), ((1855, 1867), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1865, 1867), True, 'import tensorflow as tf\n'), ((1985, 2003), 'numpy.sum', 'np.sum', (['(noise ** 2)'], {}), '(noise ** 2)\n', (1991, 2003), True, 'import numpy as np\n'), ((2408, 2426), 'tensorflow.constant', 'tf.constant', (['noise'], {}), '(noise)\n', (2419, 2426), True, 'import tensorflow as tf\n'), ((2580, 2592), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2590, 2592), True, 'import tensorflow as tf\n'), ((3091, 3110), 'tensorflow.constant', 'tf.constant', (['npU[0]'], {}), '(npU[0])\n', (3102, 3110), True, 'import tensorflow as tf\n'), ((3112, 3131), 'tensorflow.constant', 'tf.constant', (['npU[1]'], {}), '(npU[1])\n', (3123, 3131), True, 'import tensorflow as tf\n'), ((3201, 3225), 'numpy.dot', 'np.dot', (['npU[0].T', 'npU[1]'], {}), '(npU[0].T, npU[1])\n', (3207, 3225), True, 'import numpy as np\n'), ((3520, 3532), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3530, 3532), True, 'import tensorflow as tf\n'), ((3676, 3700), 'numpy.dot', 'np.dot', (['npdata', 'npU[1].T'], {}), '(npdata, npU[1].T)\n', (3682, 3700), True, 'import numpy as np\n'), ((3715, 3739), 'numpy.dot', 'np.dot', (['npU[1]', 'npU[1].T'], {}), '(npU[1], npU[1].T)\n', (3721, 3739), True, 'import numpy as np\n'), ((3755, 3800), 'numpy.allclose', 'np.allclose', (['Agt', 'npA'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(Agt, npA, atol=1e-05, rtol=1e-05)\n', (3766, 3800), True, 'import numpy as np\n'), ((3815, 3860), 'numpy.allclose', 'np.allclose', (['Bgt', 'npB'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(Bgt, npB, atol=1e-05, rtol=1e-05)\n', (3826, 3860), True, 'import numpy as np\n'), ((3875, 3924), 'numpy.allclose', 'np.allclose', (['tau', 'npalpha'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(tau, npalpha, atol=1e-05, rtol=1e-05)\n', (3886, 3924), True, 'import numpy as np\n'), ((3953, 3979), 'numpy.dot', 'np.dot', (['npdata.T', 'npU[0].T'], {}), '(npdata.T, npU[0].T)\n', (3959, 3979), True, 'import numpy as np\n'), ((3994, 4018), 'numpy.dot', 'np.dot', (['npU[0]', 'npU[0].T'], {}), '(npU[0], npU[0].T)\n', (4000, 4018), True, 'import numpy as np\n'), ((4034, 4079), 'numpy.allclose', 'np.allclose', (['Agt', 'npA'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(Agt, npA, atol=1e-05, rtol=1e-05)\n', (4045, 4079), True, 'import numpy as np\n'), ((4094, 4139), 'numpy.allclose', 'np.allclose', (['Bgt', 'npB'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(Bgt, npB, atol=1e-05, rtol=1e-05)\n', (4105, 4139), True, 'import numpy as np\n'), ((4154, 4203), 'numpy.allclose', 'np.allclose', (['tau', 'npalpha'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(tau, npalpha, atol=1e-05, rtol=1e-05)\n', (4165, 4203), True, 'import numpy as np\n'), ((4480, 4499), 'tensorflow.constant', 'tf.constant', (['npU[0]'], {}), '(npU[0])\n', (4491, 4499), True, 'import tensorflow as tf\n'), ((4501, 4520), 'tensorflow.constant', 'tf.constant', (['npU[1]'], {}), '(npU[1])\n', (4512, 4520), True, 'import tensorflow as tf\n'), ((4590, 4614), 'numpy.dot', 'np.dot', (['npU[0].T', 'npU[1]'], {}), '(npU[0].T, npU[1])\n', (4596, 4614), True, 'import numpy as np\n'), ((872, 896), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'M'}), '(size=M)\n', (888, 896), True, 'import numpy as np\n'), ((934, 952), 'tensorflow.transpose', 'tf.transpose', (['U[0]'], {}), '(U[0])\n', (946, 952), True, 'import tensorflow as tf\n'), ((1175, 1208), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1206, 1208), True, 'import tensorflow as tf\n'), ((1590, 1614), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'M'}), '(size=M)\n', (1606, 1614), True, 'import numpy as np\n'), ((1652, 1670), 'tensorflow.transpose', 'tf.transpose', (['U[0]'], {}), '(U[0])\n', (1664, 1670), True, 'import tensorflow as tf\n'), ((1894, 1927), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1925, 1927), True, 'import tensorflow as tf\n'), ((2318, 2342), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'M'}), '(size=M)\n', (2334, 2342), True, 'import numpy as np\n'), ((2380, 2398), 'tensorflow.transpose', 'tf.transpose', (['U[0]'], {}), '(U[0])\n', (2392, 2398), True, 'import tensorflow as tf\n'), ((2619, 2652), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2650, 2652), True, 'import tensorflow as tf\n'), ((3147, 3171), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'M'}), '(size=M)\n', (3163, 3171), True, 'import numpy as np\n'), ((3559, 3592), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3590, 3592), True, 'import tensorflow as tf\n'), ((4536, 4560), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'M'}), '(size=M)\n', (4552, 4560), True, 'import numpy as np\n'), ((2971, 3003), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(K, M[0])'}), '(size=(K, M[0]))\n', (2987, 3003), True, 'import numpy as np\n'), ((3032, 3064), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(K, M[1])'}), '(size=(K, M[1]))\n', (3048, 3064), True, 'import numpy as np\n'), ((4360, 4392), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(K, M[0])'}), '(size=(K, M[0]))\n', (4376, 4392), True, 'import numpy as np\n'), ((4421, 4453), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(K, M[1])'}), '(size=(K, M[1]))\n', (4437, 4453), True, 'import numpy as np\n'), ((737, 769), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(K, M[0])'}), '(size=(K, M[0]))\n', (753, 769), True, 'import numpy as np\n'), ((809, 841), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(K, M[1])'}), '(size=(K, M[1]))\n', (825, 841), True, 'import numpy as np\n'), ((1455, 1487), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(K, M[0])'}), '(size=(K, M[0]))\n', (1471, 1487), True, 'import numpy as np\n'), ((1527, 1559), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(K, M[1])'}), '(size=(K, M[1]))\n', (1543, 1559), True, 'import numpy as np\n'), ((2183, 2215), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(K, M[0])'}), '(size=(K, M[0]))\n', (2199, 2215), True, 'import numpy as np\n'), ((2255, 2287), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(K, M[1])'}), '(size=(K, M[1]))\n', (2271, 2287), True, 'import numpy as np\n'), ((2735, 2747), 'numpy.sqrt', 'np.sqrt', (['tau'], {}), '(tau)\n', (2742, 2747), True, 'import numpy as np\n')] |
import datetime
import time
import pandas as pd
import pandas_ta as ta
from datetime import timedelta
import numpy as np
from alpaca_trade_api.rest import REST
APCA_API_KEY_ID = 'replace with yours'
APCA_API_SECRET_KEY = 'replace with yours'
APCA_API_BASE_URL = r'https://paper-api.alpaca.markets' # paper trading
class market_scalper():
'''Defines scalp trading obj'''
def __init__(self):
self.api = REST(key_id=APCA_API_KEY_ID, secret_key=APCA_API_SECRET_KEY, base_url=APCA_API_BASE_URL)
self.all_tickers = []
self.trending_tickers = []
self.buyable_tickers = []
self.sellable_tickers = []
self.price_data = {}
self.current_holdings = []
def __enter__(self):
'''Permit WITH instantiation'''
return self
def __exit__(self, exc_type, exc_value, traceback):
'''Cleanup when object is destroyed'''
return
def close(self):
'''Cleanup when object is destroyed'''
self._liquidate_holdings()
self.__exit__(None, None, None)
return
def update(self):
if self.all_tickers == []:
self._get_global_trending_tickers()
if self.all_tickers == []:
return
self._get_local_trending_stocks()
self._get_tradeable_stocks()
self._sell_stocks()
self._buy_stocks()
return
def update_global_ticker_list(self):
self._get_global_trending_tickers()
return
def is_market_open(self):
'''Use Alpaca Api to determine if market is open for the day'''
return self.api.get_clock().is_open
def __chunk_list(self, in_list, size):
'''break stock list into querable chunks of length size'''
return [in_list[i:i + size] for i in range(0, len(in_list), size)]
def __is_mo_trend_pass(self, df_60_min):
'''Fxn determins if monthly 1 hr data is trending above 20 SMA, 20 SMA > 50 SMA, and have higher closes [monthly 60 min chart]'''
sma_20 = ta.sma(df_60_min["close"], length=20)
sma_50 = ta.sma(df_60_min["close"], length=50)
last_sma_20 = sma_20.array[-1]
last_sma_50 = sma_50.array[-1]
last_close = df_60_min['close'][-1]
prior_close = df_60_min['close'][-2]
sma_uptrend = True if (last_sma_50 < last_sma_20) else False
price_uptrend = True if (last_sma_20 < last_close) else False
candle_uptrend = True if (prior_close <= last_close) else False
return True if False not in [sma_uptrend, price_uptrend, candle_uptrend] else False
def __is_stock_buyable(self, df_15_min):
'''Fxn determins if stock is trading below 15min CC1 -100 mark (undervalued)'''
cci = df_15_min.ta.cci(length=20)
cci_lst = list(cci.array)
if (cci_lst[-2] < -100 and cci_lst[-1] > -100) or (cci_lst[-2] < 100 and cci_lst[-1] > 100):
return True
else:
return False
def __is_stock_sellable(self, df_15_min):
'''Fxn determins if stock is trading above 15min CC1 100 mark (overvalued)'''
cci = df_15_min.ta.cci(length=20)
cci_lst = list(cci.array)
if (cci_lst[-2] > 100 and cci_lst[-1] < 100) or (cci_lst[-2] > -100 and cci_lst[-1] < -100):
return True
else:
return False
def __calc_stop_limit(self, df_15_min):
'''Calc stop limit price'''
buy_price = df_15_min['close'][-1]
stop_limit = buy_price * (1.005)
return np.ceil(stop_limit * 100) / 100
def __is_pass_stop_limit(self, df_15_min):
buy_price = df_15_min['close'][-1]
stop_limit = self.__calc_stop_limit(df_15_min)
return True if buy_price * 1.001 <= stop_limit else False
def _get_local_trending_stocks(self):
'''Get list of stocks that are trending upwards'''
current_date = datetime.datetime.now()
start_time = (current_date - timedelta(days=40)).strftime('%Y-%m-%d')
end_time = (current_date).strftime('%Y-%m-%d')
trending_tickers = []
symbol_set = self.__chunk_list(self.all_tickers, 100)
print('\nFinding Trending Stocks...')
for chunk in symbol_set:
all_data = self.api.get_barset(chunk, timeframe='15Min', start=start_time, end=end_time, limit=600).df
for symbol in chunk:
print(f'Symbol: {symbol}')
df_15_min = all_data[symbol].dropna()
if df_15_min.shape[0] == 0:
continue
df_calc = df_15_min.resample("1H", level=0).agg([('open', 'first'), ('close', 'last'), ('high', 'max'), ('low', 'min'), ('volume', 'sum')]).dropna()
df_60_min = pd.DataFrame({'open': df_calc['open']['open'],'high': df_calc['high']['high'], 'low': df_calc['low']['low'], 'close': df_calc['close']['close'], 'volume': df_calc['volume']['volume']}).dropna()
if self.__is_mo_trend_pass(df_60_min):
trending_tickers.append(symbol)
df = pd.DataFrame({'Symbols': trending_tickers})
df.to_csv('short_ticker_list.csv')
self.current_holdings = [position.symbol for position in self.api.list_positions()]
self.trending_tickers = list(set(trending_tickers + self.current_holdings)) # add owned tickers so they are constantly monitored
return
def _get_tradeable_stocks(self):
'''determine buyable stocks within trending stocks using cci analysis'''
buyable_tickers = []
sellable_tickers = []
buy_price = []
sell_price = []
profit = []
price_data = {}
current_date = datetime.datetime.now()
start_time = (current_date - timedelta(days=40)).strftime('%Y-%m-%d')
end_time = (current_date).strftime('%Y-%m-%d')
self.current_holdings = [position.symbol for position in self.api.list_positions()]
symbol_set = self.__chunk_list(self.trending_tickers, 100)
print('Finding Tradeable Stocks...')
for chunk in symbol_set:
all_data = self.api.get_barset(chunk, timeframe='15Min', start=start_time, end=end_time, limit=600).df
for symbol in chunk:
print(f'Symbol: {symbol}')
df_15_min = all_data[symbol].dropna()
if df_15_min.shape[0] == 0:
continue
if symbol not in self.current_holdings and self.__is_stock_buyable(df_15_min):
if self.__is_pass_stop_limit(df_15_min):
buyable_tickers.append(symbol)
buy_price.append(df_15_min['close'][-1])
sell_price.append(self.__calc_stop_limit(df_15_min))
profit.append(sell_price[-1] - buy_price[-1])
price_data[symbol] = {'buy': buy_price[-1], 'sell': sell_price[-1], 'profit': profit[-1]}
elif symbol in self.current_holdings and self.__is_stock_sellable(df_15_min):
sellable_tickers.append(symbol)
else:
continue
self.price_data = price_data
current_time = datetime.datetime.now().isoformat()
print(f"\nFound {len(buyable_tickers)} buyable tickers and {len(sellable_tickers)} sellable tickers\n")
with open("transactions.csv", "a") as ofile:
for i in range(len(buyable_tickers)):
ofile.write(f'{current_time},{buyable_tickers[i]},{buy_price[i]},{sell_price[i]},{profit[i]}\n')
self.buyable_tickers = buyable_tickers
self.sellable_tickers = sellable_tickers
return
def _buy_stocks(self):
if len(self.buyable_tickers) == 0:
return
account = self.api.get_account()
buying_power = account.buying_power
open_orders = [order.symbol for order in self.api.list_orders(status='open')]
current_positions = [position.symbol for position in self.api.list_positions()]
owned_tickers = list(set(current_positions + open_orders))
for i in reversed(range(len(self.buyable_tickers))):
if self.buyable_tickers[i] in owned_tickers:
self.buyable_tickers.pop(i)
if len(self.buyable_tickers) == 0:
return
max_buy_per_ticker = min([float(buying_power)/len(self.buyable_tickers), 5000])
non_buyable = 0
for ticker in self.buyable_tickers:
buy_price = float(self.api.get_latest_quote(ticker).ap)
buy_price = buy_price if buy_price > 0.00 else self.price_data[ticker]['buy']
buy_price = np.floor((buy_price + 0.01)*100)/100
sell_price = self.price_data[ticker]['sell']
qty = int(np.floor(max_buy_per_ticker/buy_price))
if qty > 0 and np.floor((sell_price - buy_price)*100)/100 > 0.01:
try:
# submiting stop limit at once:
# r = self.api.submit_order(side="buy", symbol=ticker, type="limit", limit_price=buy_price, qty=qty, time_in_force="day", order_class="oto", take_profit={"limit_price": sell_price})
# simple buy
r = self.api.submit_order(side="buy", symbol=ticker, type="limit", limit_price=buy_price, qty=qty, time_in_force="day")
except Exception as e:
pass
else:
non_buyable += 1
max_buy_per_ticker = float(buying_power)/(len(self.buyable_tickers)-non_buyable)
return
def _sell_stocks(self):
owned_tickers = [(position.symbol, position.qty) for position in self.api.list_positions()]
if len(owned_tickers) == 0:
return
for ticker, qty in owned_tickers:
if ticker not in self.sellable_tickers:
continue
try:
sell_price = float(self.api.get_latest_quote(ticker).ap)
sell_price = np.floor((sell_price - 0.01) * 100) / 100
r = self.api.submit_order(side="sell", symbol=ticker, type="limit", limit_price=sell_price, qty=qty, time_in_force="day")
except Exception as e:
pass
return
def _liquidate_holdings(self):
owned_tickers = [position.symbol for position in self.api.list_positions()]
self.sellable_tickers = owned_tickers
self.api.cancel_all_orders()
self._sell_stocks()
return
def _get_global_trending_tickers(self):
'''Get list of stocks that are trending upwards on longer time scales. Build list of tradable stocks'''
def __linear_fit(x, y):
'''Least squares linear fit calculation. Ruturns tuple(slope, intercept)'''
x = np.array(x)
y = np.array(y)
slope = (((np.average(x) * np.average(y)) - np.average(x * y)) / (
(np.average(x) * np.average(x)) - np.average(x * x)))
intercept = np.average(y) - slope * np.average(x)
return slope, intercept
def __calculate_avg_dialy_range_percent(df, look_back_days):
'''Calculate average daily stock movement over days specified'''
highs = np.array(df['high'][(look_back_days * -1):])
lows = np.array(df['low'][(look_back_days * -1):])
delta = (highs - lows) / lows * 100
return np.average(delta)
current_date = datetime.datetime.now()
start_time = (current_date - timedelta(days=40)).strftime('%Y-%m-%d')
end_time = (current_date).strftime('%Y-%m-%d')
trending_tickers = []
trending_slope = []
trending_range = []
col_list = ["Symbols"]
df = pd.read_csv("full_ticker_list.csv", usecols=col_list)
all_tickers = df['Symbols'].tolist()
symbol_set = [all_tickers[i:i + 100] for i in range(0, len(all_tickers), 100)]
print('\nFinding Trending Stocks...')
for chunk in symbol_set:
all_data = self.api.get_barset(chunk, timeframe='day', start=start_time, end=end_time, limit=100).df
for symbol in chunk:
print(f'Symbol: {symbol}')
df_1_day = all_data[symbol].dropna()
if df_1_day.shape[0] == 0:
continue
sma_20 = ta.sma(df_1_day["close"], length=20)
sma_50 = ta.sma(df_1_day["close"], length=50)
last_sma_20 = sma_20.array[-1]
last_sma_50 = sma_50.array[-1]
last_close = df_1_day['close'][-1]
slope_percent_per_day = \
__linear_fit([1, 2, 3, 4, 5], (df_1_day['close'][-5:] / df_1_day['close'][-5]).array)[0] * 100 # normalize price to percent for global comparison
average_daily_range = __calculate_avg_dialy_range_percent(df_1_day, 5)
sma_uptrend = True if (last_sma_50 < last_sma_20) else False
price_uptrend = True if (last_sma_20 < last_close) else False
divergent_moving_avg = True if (sma_20.array[-1] - sma_50.array[-1]) > (
sma_20.array[-2] - sma_50.array[-2]) else False
five_day_slope_above_co = True if slope_percent_per_day >= 0.15 else False
average_daily_range_above_co = True if average_daily_range >= 2.5 else False
if sma_uptrend and price_uptrend and divergent_moving_avg and five_day_slope_above_co and average_daily_range_above_co:
trending_tickers.append(symbol)
trending_slope.append(slope_percent_per_day)
trending_range.append(average_daily_range)
return_dict = {'Symbols': trending_tickers, '%_per_day': trending_slope, 'Average_%daily_range': trending_range}
df = pd.DataFrame(return_dict)
df.sort_values('%_per_day', ascending=False, inplace=True)
df.reset_index(drop=True, inplace=True)
df.to_csv('ticker_list.csv')
self.all_tickers = trending_tickers
return
def run_bot():
scalper = market_scalper()
while True:
now_UTC = datetime.datetime.utcnow() # 09:30EST = 13:30UTC ; 16:00EST = 20:00UTC
market_open = scalper.api.get_clock().is_open
if market_open:
scalper.update()
print('5 min pause...')
time.sleep(300)
elif not market_open and now_UTC.hour + (now_UTC.minute / 60) < 13.59:
# if market is not yet open, wait short period and check again
print('Market not open, trying again in 60s')
time.sleep(60)
elif now_UTC.hour + (now_UTC.minute / 60) >= 19.5:
# if market is closed, close class
print('Market Closed: cleaning up...')
scalper.close()
break
print('Script done for the day, exiting now.')
return
if __name__ == '__main__':
print('start')
run_bot()
print('complete...')
| [
"numpy.ceil",
"alpaca_trade_api.rest.REST",
"pandas.read_csv",
"datetime.datetime.utcnow",
"numpy.average",
"numpy.floor",
"time.sleep",
"datetime.datetime.now",
"pandas_ta.sma",
"numpy.array",
"pandas.DataFrame",
"datetime.timedelta"
] | [((422, 515), 'alpaca_trade_api.rest.REST', 'REST', ([], {'key_id': 'APCA_API_KEY_ID', 'secret_key': 'APCA_API_SECRET_KEY', 'base_url': 'APCA_API_BASE_URL'}), '(key_id=APCA_API_KEY_ID, secret_key=APCA_API_SECRET_KEY, base_url=\n APCA_API_BASE_URL)\n', (426, 515), False, 'from alpaca_trade_api.rest import REST\n'), ((2017, 2054), 'pandas_ta.sma', 'ta.sma', (["df_60_min['close']"], {'length': '(20)'}), "(df_60_min['close'], length=20)\n", (2023, 2054), True, 'import pandas_ta as ta\n'), ((2072, 2109), 'pandas_ta.sma', 'ta.sma', (["df_60_min['close']"], {'length': '(50)'}), "(df_60_min['close'], length=50)\n", (2078, 2109), True, 'import pandas_ta as ta\n'), ((3876, 3899), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3897, 3899), False, 'import datetime\n'), ((5030, 5073), 'pandas.DataFrame', 'pd.DataFrame', (["{'Symbols': trending_tickers}"], {}), "({'Symbols': trending_tickers})\n", (5042, 5073), True, 'import pandas as pd\n'), ((5654, 5677), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5675, 5677), False, 'import datetime\n'), ((11422, 11445), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11443, 11445), False, 'import datetime\n'), ((11710, 11763), 'pandas.read_csv', 'pd.read_csv', (['"""full_ticker_list.csv"""'], {'usecols': 'col_list'}), "('full_ticker_list.csv', usecols=col_list)\n", (11721, 11763), True, 'import pandas as pd\n'), ((13808, 13833), 'pandas.DataFrame', 'pd.DataFrame', (['return_dict'], {}), '(return_dict)\n', (13820, 13833), True, 'import pandas as pd\n'), ((14128, 14154), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (14152, 14154), False, 'import datetime\n'), ((3507, 3532), 'numpy.ceil', 'np.ceil', (['(stop_limit * 100)'], {}), '(stop_limit * 100)\n', (3514, 3532), True, 'import numpy as np\n'), ((10743, 10754), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (10751, 10754), True, 'import numpy as np\n'), ((10771, 10782), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (10779, 10782), True, 'import numpy as np\n'), ((11205, 11247), 'numpy.array', 'np.array', (["df['high'][look_back_days * -1:]"], {}), "(df['high'][look_back_days * -1:])\n", (11213, 11247), True, 'import numpy as np\n'), ((11269, 11310), 'numpy.array', 'np.array', (["df['low'][look_back_days * -1:]"], {}), "(df['low'][look_back_days * -1:])\n", (11277, 11310), True, 'import numpy as np\n'), ((11380, 11397), 'numpy.average', 'np.average', (['delta'], {}), '(delta)\n', (11390, 11397), True, 'import numpy as np\n'), ((14355, 14370), 'time.sleep', 'time.sleep', (['(300)'], {}), '(300)\n', (14365, 14370), False, 'import time\n'), ((7162, 7185), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7183, 7185), False, 'import datetime\n'), ((8618, 8652), 'numpy.floor', 'np.floor', (['((buy_price + 0.01) * 100)'], {}), '((buy_price + 0.01) * 100)\n', (8626, 8652), True, 'import numpy as np\n'), ((8734, 8774), 'numpy.floor', 'np.floor', (['(max_buy_per_ticker / buy_price)'], {}), '(max_buy_per_ticker / buy_price)\n', (8742, 8774), True, 'import numpy as np\n'), ((10964, 10977), 'numpy.average', 'np.average', (['y'], {}), '(y)\n', (10974, 10977), True, 'import numpy as np\n'), ((12317, 12353), 'pandas_ta.sma', 'ta.sma', (["df_1_day['close']"], {'length': '(20)'}), "(df_1_day['close'], length=20)\n", (12323, 12353), True, 'import pandas_ta as ta\n'), ((12379, 12415), 'pandas_ta.sma', 'ta.sma', (["df_1_day['close']"], {'length': '(50)'}), "(df_1_day['close'], length=50)\n", (12385, 12415), True, 'import pandas_ta as ta\n'), ((14595, 14609), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (14605, 14609), False, 'import time\n'), ((3937, 3955), 'datetime.timedelta', 'timedelta', ([], {'days': '(40)'}), '(days=40)\n', (3946, 3955), False, 'from datetime import timedelta\n'), ((5715, 5733), 'datetime.timedelta', 'timedelta', ([], {'days': '(40)'}), '(days=40)\n', (5724, 5733), False, 'from datetime import timedelta\n'), ((9951, 9986), 'numpy.floor', 'np.floor', (['((sell_price - 0.01) * 100)'], {}), '((sell_price - 0.01) * 100)\n', (9959, 9986), True, 'import numpy as np\n'), ((10839, 10856), 'numpy.average', 'np.average', (['(x * y)'], {}), '(x * y)\n', (10849, 10856), True, 'import numpy as np\n'), ((10920, 10937), 'numpy.average', 'np.average', (['(x * x)'], {}), '(x * x)\n', (10930, 10937), True, 'import numpy as np\n'), ((10988, 11001), 'numpy.average', 'np.average', (['x'], {}), '(x)\n', (10998, 11001), True, 'import numpy as np\n'), ((11483, 11501), 'datetime.timedelta', 'timedelta', ([], {'days': '(40)'}), '(days=40)\n', (11492, 11501), False, 'from datetime import timedelta\n'), ((4716, 4911), 'pandas.DataFrame', 'pd.DataFrame', (["{'open': df_calc['open']['open'], 'high': df_calc['high']['high'], 'low':\n df_calc['low']['low'], 'close': df_calc['close']['close'], 'volume':\n df_calc['volume']['volume']}"], {}), "({'open': df_calc['open']['open'], 'high': df_calc['high'][\n 'high'], 'low': df_calc['low']['low'], 'close': df_calc['close'][\n 'close'], 'volume': df_calc['volume']['volume']})\n", (4728, 4911), True, 'import pandas as pd\n'), ((8801, 8841), 'numpy.floor', 'np.floor', (['((sell_price - buy_price) * 100)'], {}), '((sell_price - buy_price) * 100)\n', (8809, 8841), True, 'import numpy as np\n'), ((10806, 10819), 'numpy.average', 'np.average', (['x'], {}), '(x)\n', (10816, 10819), True, 'import numpy as np\n'), ((10822, 10835), 'numpy.average', 'np.average', (['y'], {}), '(y)\n', (10832, 10835), True, 'import numpy as np\n'), ((10887, 10900), 'numpy.average', 'np.average', (['x'], {}), '(x)\n', (10897, 10900), True, 'import numpy as np\n'), ((10903, 10916), 'numpy.average', 'np.average', (['x'], {}), '(x)\n', (10913, 10916), True, 'import numpy as np\n')] |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""eval utils"""
import io as sysio
import math
import numpy as np
from numba import cuda
from numba import float32 as numba_float32
from numba import jit as numba_jit
@numba_jit(nopython=True)
def div_up(m, n):
"""div_up"""
return m // n + (m % n > 0)
@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True)
def trangle_area(a, b, c):
"""triangle_area"""
return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * (b[0] - c[0])) / 2.0
@cuda.jit('(float32[:], int32)', device=True, inline=True)
def area(int_pts, num_of_inter):
"""area"""
area_val = 0.0
for i in range(num_of_inter - 2):
area_val += abs(trangle_area(int_pts[:2], int_pts[2 * i + 2:2 * i + 4],
int_pts[2 * i + 4:2 * i + 6]))
return area_val
@cuda.jit('(float32[:], int32)', device=True, inline=True)
def sort_vertex_in_convex_polygon(int_pts, num_of_inter):
"""sort vertex in convex polygon"""
if num_of_inter > 0:
center = cuda.local.array((2,), dtype=numba_float32)
center[:] = 0.0
for i in range(num_of_inter):
center[0] += int_pts[2 * i]
center[1] += int_pts[2 * i + 1]
center[0] /= num_of_inter
center[1] /= num_of_inter
v = cuda.local.array((2,), dtype=numba_float32)
vs = cuda.local.array((16,), dtype=numba_float32)
for i in range(num_of_inter):
v[0] = int_pts[2 * i] - center[0]
v[1] = int_pts[2 * i + 1] - center[1]
d = math.sqrt(v[0] * v[0] + v[1] * v[1])
v[0] = v[0] / d
v[1] = v[1] / d
if v[1] < 0:
v[0] = -2 - v[0]
vs[i] = v[0]
for i in range(1, num_of_inter):
if vs[i - 1] > vs[i]:
temp = vs[i]
tx = int_pts[2 * i]
ty = int_pts[2 * i + 1]
j = i
while j > 0 and vs[j - 1] > temp:
vs[j] = vs[j - 1]
int_pts[j * 2] = int_pts[j * 2 - 2]
int_pts[j * 2 + 1] = int_pts[j * 2 - 1]
j -= 1
vs[j] = temp
int_pts[j * 2] = tx
int_pts[j * 2 + 1] = ty
@cuda.jit('(float32[:], float32[:], int32, int32, float32[:])',
device=True,
inline=True)
def line_segment_intersection(pts1, pts2, i, j, temp_pts):
"""line segment intersection"""
a = cuda.local.array((2,), dtype=numba_float32)
b = cuda.local.array((2,), dtype=numba_float32)
c = cuda.local.array((2,), dtype=numba_float32)
d = cuda.local.array((2,), dtype=numba_float32)
a[0] = pts1[2 * i]
a[1] = pts1[2 * i + 1]
b[0] = pts1[2 * ((i + 1) % 4)]
b[1] = pts1[2 * ((i + 1) % 4) + 1]
c[0] = pts2[2 * j]
c[1] = pts2[2 * j + 1]
d[0] = pts2[2 * ((j + 1) % 4)]
d[1] = pts2[2 * ((j + 1) % 4) + 1]
ba0 = b[0] - a[0]
ba1 = b[1] - a[1]
da0 = d[0] - a[0]
ca0 = c[0] - a[0]
da1 = d[1] - a[1]
ca1 = c[1] - a[1]
acd = da1 * ca0 > ca1 * da0
bcd = (d[1] - b[1]) * (c[0] - b[0]) > (c[1] - b[1]) * (d[0] - b[0])
if acd != bcd:
abc = ca1 * ba0 > ba1 * ca0
abd = da1 * ba0 > ba1 * da0
if abc != abd:
dc0 = d[0] - c[0]
dc1 = d[1] - c[1]
abba = a[0] * b[1] - b[0] * a[1]
cddc = c[0] * d[1] - d[0] * c[1]
dh = ba1 * dc0 - ba0 * dc1
dx = abba * dc0 - ba0 * cddc
dy = abba * dc1 - ba1 * cddc
temp_pts[0] = dx / dh
temp_pts[1] = dy / dh
return True
return False
@cuda.jit('(float32, float32, float32[:])', device=True, inline=True)
def point_in_quadrilateral(pt_x, pt_y, corners):
"""point in quadrilateral"""
ab0 = corners[2] - corners[0]
ab1 = corners[3] - corners[1]
ad0 = corners[6] - corners[0]
ad1 = corners[7] - corners[1]
ap0 = pt_x - corners[0]
ap1 = pt_y - corners[1]
abab = ab0 * ab0 + ab1 * ab1
abap = ab0 * ap0 + ab1 * ap1
adad = ad0 * ad0 + ad1 * ad1
adap = ad0 * ap0 + ad1 * ap1
return abab >= abap >= 0 and adad >= adap >= 0
@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True)
def quadrilateral_intersection(pts1, pts2, int_pts):
"""quadrilateral intersection"""
num_of_inter = 0
for i in range(4):
if point_in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2):
int_pts[num_of_inter * 2] = pts1[2 * i]
int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1]
num_of_inter += 1
if point_in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1):
int_pts[num_of_inter * 2] = pts2[2 * i]
int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1]
num_of_inter += 1
temp_pts = cuda.local.array((2,), dtype=numba_float32)
for i in range(4):
for j in range(4):
has_pts = line_segment_intersection(pts1, pts2, i, j, temp_pts)
if has_pts:
int_pts[num_of_inter * 2] = temp_pts[0]
int_pts[num_of_inter * 2 + 1] = temp_pts[1]
num_of_inter += 1
return num_of_inter
@cuda.jit('(float32[:], float32[:])', device=True, inline=True)
def rbbox_to_corners(corners, rbbox):
"""rbbox to corners"""
# generate clockwise corners and rotate it clockwise
angle = rbbox[4]
a_cos = math.cos(angle)
a_sin = math.sin(angle)
center_x = rbbox[0]
center_y = rbbox[1]
x_d = rbbox[2]
y_d = rbbox[3]
corners_x = cuda.local.array((4,), dtype=numba_float32)
corners_y = cuda.local.array((4,), dtype=numba_float32)
corners_x[0] = -x_d / 2
corners_x[1] = -x_d / 2
corners_x[2] = x_d / 2
corners_x[3] = x_d / 2
corners_y[0] = -y_d / 2
corners_y[1] = y_d / 2
corners_y[2] = y_d / 2
corners_y[3] = -y_d / 2
for i in range(4):
corners[2 * i] = a_cos * corners_x[i] + a_sin * corners_y[i] + center_x
corners[2 * i +
1] = -a_sin * corners_x[i] + a_cos * corners_y[i] + center_y
@cuda.jit('(float32[:], float32[:])', device=True, inline=True)
def inter(rbbox1, rbbox2):
"""inter"""
corners1 = cuda.local.array((8,), dtype=numba_float32)
corners2 = cuda.local.array((8,), dtype=numba_float32)
intersection_corners = cuda.local.array((16,), dtype=numba_float32)
rbbox_to_corners(corners1, rbbox1)
rbbox_to_corners(corners2, rbbox2)
num_intersection = quadrilateral_intersection(corners1, corners2,
intersection_corners)
sort_vertex_in_convex_polygon(intersection_corners, num_intersection)
return area(intersection_corners, num_intersection)
@cuda.jit('(float32[:], float32[:], int32)', device=True, inline=True)
def dev_rotate_iou_eval(rbox1, rbox2, criterion=-1):
"""dev_rotate_iou_eval"""
area1 = rbox1[2] * rbox1[3]
area2 = rbox2[2] * rbox2[3]
area_inter = inter(rbox1, rbox2)
if criterion == -1:
return area_inter / (area1 + area2 - area_inter)
if criterion == 0:
return area_inter / area1
if criterion == 1:
return area_inter / area2
return area_inter
@cuda.jit('(int64, int64, float32[:], float32[:], float32[:], int32)',
fastmath=False)
def rotate_iou_kernel_eval(n,
k,
dev_boxes,
dev_query_boxes,
dev_iou,
criterion=-1):
"""rotate iou kernel eval"""
threads_per_block = 8 * 8
row_start = cuda.blockIdx.x
col_start = cuda.blockIdx.y
tx = cuda.threadIdx.x
row_size = min(n - row_start * threads_per_block, threads_per_block)
col_size = min(k - col_start * threads_per_block, threads_per_block)
block_boxes = cuda.shared.array(shape=(64 * 5,), dtype=numba_float32)
block_qboxes = cuda.shared.array(shape=(64 * 5,), dtype=numba_float32)
dev_query_box_idx = threads_per_block * col_start + tx
dev_box_idx = threads_per_block * row_start + tx
if tx < col_size:
block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0]
block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1]
block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2]
block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3]
block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4]
if tx < row_size:
block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0]
block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1]
block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2]
block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3]
block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4]
cuda.syncthreads()
if tx < row_size:
for i in range(col_size):
offset = row_start * threads_per_block * k + col_start * threads_per_block + tx * k + i
dev_iou[offset] = dev_rotate_iou_eval(block_qboxes[i * 5:i * 5 + 5],
block_boxes[tx * 5:tx * 5 + 5],
criterion)
def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0):
"""rotated box iou running in gpu"""
boxes = boxes.astype(np.float32)
query_boxes = query_boxes.astype(np.float32)
n_boxes = boxes.shape[0]
k_qboxes = query_boxes.shape[0]
iou = np.zeros((n_boxes, k_qboxes), dtype=np.float32)
if n_boxes == 0 or k_qboxes == 0:
return iou
threads_per_block = 8 * 8
cuda.select_device(device_id)
blockspergrid = (div_up(n_boxes, threads_per_block), div_up(k_qboxes, threads_per_block))
stream = cuda.stream()
with stream.auto_synchronize():
boxes_dev = cuda.to_device(boxes.reshape([-1]), stream)
query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream)
iou_dev = cuda.to_device(iou.reshape([-1]), stream)
rotate_iou_kernel_eval[blockspergrid, threads_per_block, stream](n_boxes, k_qboxes, boxes_dev,
query_boxes_dev, iou_dev, criterion)
iou_dev.copy_to_host(iou.reshape([-1]), stream=stream)
return iou.astype(boxes.dtype)
@numba_jit
def get_thresholds(scores, num_gt, num_sample_pts=41):
"""get thresholds"""
scores.sort()
scores = scores[::-1]
current_recall = 0
thresholds = []
for i, score in enumerate(scores):
l_recall = (i + 1) / num_gt
if i < (len(scores) - 1):
r_recall = (i + 2) / num_gt
else:
r_recall = l_recall
if (((r_recall - current_recall) < (current_recall - l_recall))
and (i < (len(scores) - 1))):
continue
thresholds.append(score)
current_recall += 1 / (num_sample_pts - 1.0)
return thresholds
def _clean_gt_data(anno, current_cls_name, difficulty):
"""clean gt data"""
min_height = [40, 25, 25]
max_occlusion = [0, 1, 2]
max_truncation = [0.15, 0.3, 0.5]
num = len(anno['name'])
num_valid = 0
dc_bboxes, ignored = [], []
for i in range(num):
bbox = anno['bbox'][i]
name = anno['name'][i].lower()
height = abs(bbox[3] - bbox[1])
if name == current_cls_name:
valid_class = 1
elif ((current_cls_name == "pedestrian" and name == "person_sitting")
or (current_cls_name == "car" and name == "van")):
valid_class = 0
else:
valid_class = -1
ignore = False
if ((anno["occluded"][i] > max_occlusion[difficulty])
or (anno["truncated"][i] > max_truncation[difficulty])
or (height <= min_height[difficulty])):
ignore = True
if valid_class == 1 and not ignore:
ignored.append(0)
num_valid += 1
elif valid_class == 0 or (ignore and (valid_class == 1)):
ignored.append(1)
else:
ignored.append(-1)
if anno["name"][i] == "DontCare":
dc_bboxes.append(bbox)
return num_valid, ignored, dc_bboxes
def _clean_dt_data(anno, current_cls_name, difficulty):
"""clean dt data"""
min_height = [40, 25, 25]
num = len(anno['name'])
ignored = []
for i in range(num):
if anno["name"][i].lower() == current_cls_name:
valid_class = 1
else:
valid_class = -1
height = abs(anno["bbox"][i, 3] - anno["bbox"][i, 1])
if height < min_height[difficulty]:
ignored.append(1)
elif valid_class == 1:
ignored.append(0)
else:
ignored.append(-1)
return ignored
def clean_data(gt_anno, dt_anno, current_class, difficulty):
"""clean data"""
class_names = ['car', 'pedestrian', 'cyclist', 'van',
'person_sitting', 'car', 'tractor', 'trailer']
current_cls_name = class_names[current_class].lower()
num_valid_gt, ignored_gt, dc_bboxes = _clean_gt_data(gt_anno,
current_cls_name,
difficulty)
ignored_dt = _clean_dt_data(dt_anno, current_cls_name, difficulty)
return num_valid_gt, ignored_gt, ignored_dt, dc_bboxes
@numba_jit(nopython=True)
def image_box_overlap(boxes, query_boxes, criterion=-1):
"""image box overlap"""
n_boxes = boxes.shape[0]
k_qboxes = query_boxes.shape[0]
overlaps = np.zeros((n_boxes, k_qboxes), dtype=boxes.dtype)
for k in range(k_qboxes):
qbox_area = ((query_boxes[k, 2] - query_boxes[k, 0]) *
(query_boxes[k, 3] - query_boxes[k, 1]))
for n in range(n_boxes):
iw = (min(boxes[n, 2], query_boxes[k, 2]) -
max(boxes[n, 0], query_boxes[k, 0]))
if iw > 0:
ih = (min(boxes[n, 3], query_boxes[k, 3]) -
max(boxes[n, 1], query_boxes[k, 1]))
if ih > 0:
if criterion == -1:
ua = ((boxes[n, 2] - boxes[n, 0]) *
(boxes[n, 3] - boxes[n, 1]) + qbox_area - iw * ih)
elif criterion == 0:
ua = ((boxes[n, 2] - boxes[n, 0]) *
(boxes[n, 3] - boxes[n, 1]))
elif criterion == 1:
ua = qbox_area
else:
ua = 1.0
overlaps[n, k] = iw * ih / ua
return overlaps
def bev_box_overlap(boxes, qboxes, criterion=-1):
"""bev box overlap"""
riou = rotate_iou_gpu_eval(boxes, qboxes, criterion)
return riou
@numba_jit(nopython=True, parallel=True)
def d3_box_overlap_kernel(boxes, qboxes, rinc, criterion=-1):
"""3d box overlap kernel"""
# ONLY support overlap in CAMERA, not lider.
n_boxes, k_qboxes = boxes.shape[0], qboxes.shape[0]
for i in range(n_boxes):
for j in range(k_qboxes):
if rinc[i, j] > 0:
iw = (min(boxes[i, 1], qboxes[j, 1]) -
max(boxes[i, 1] - boxes[i, 4], qboxes[j, 1] - qboxes[j, 4]))
if iw > 0:
area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5]
area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5]
inc = iw * rinc[i, j]
if criterion == -1:
ua = (area1 + area2 - inc)
elif criterion == 0:
ua = area1
elif criterion == 1:
ua = area2
else:
ua = 1.0
rinc[i, j] = inc / ua
else:
rinc[i, j] = 0.0
def d3_box_overlap(boxes, qboxes, criterion=-1):
"""3d box overlap"""
rinc = rotate_iou_gpu_eval(boxes[:, [0, 2, 3, 5, 6]],
qboxes[:, [0, 2, 3, 5, 6]], 2)
d3_box_overlap_kernel(boxes, qboxes, rinc, criterion)
return rinc
@numba_jit(nopython=True)
def compute_statistics_jit(overlaps, gt_datas, dt_datas,
ignored_gt, ignored_det,
dc_bboxes, metric, min_overlap,
thresh=0., compute_fp=False, compute_aos=False):
"""compute statistics jit"""
det_size = dt_datas.shape[0]
gt_size = gt_datas.shape[0]
dt_scores = dt_datas[:, -1]
dt_alphas = dt_datas[:, 4]
gt_alphas = gt_datas[:, 4]
dt_bboxes = dt_datas[:, :4]
assigned_detection = [False] * det_size
ignored_threshold = [False] * det_size
if compute_fp:
for i in range(det_size):
if dt_scores[i] < thresh:
ignored_threshold[i] = True
# Using a large negative number to filter the cases with no detections
# for counting False Positives
no_detection = -10000000
tp, fp, fn, similarity = 0, 0, 0, 0
thresholds = np.zeros((gt_size,))
thresh_idx = 0
delta = np.zeros((gt_size,))
delta_idx = 0
for i in range(gt_size):
if ignored_gt[i] == -1:
continue
det_idx = -1
valid_detection = no_detection
max_overlap = 0
assigned_ignored_det = False
for j in range(det_size):
if ignored_det[j] == -1 or assigned_detection[j] or ignored_threshold[j]:
continue
overlap = overlaps[j, i]
dt_score = dt_scores[j]
if not compute_fp and overlap > min_overlap and dt_score > valid_detection:
det_idx = j
valid_detection = dt_score
elif (compute_fp and overlap > min_overlap
and (overlap > max_overlap or assigned_ignored_det) and ignored_det[j] == 0):
max_overlap = overlap
det_idx = j
valid_detection = 1
assigned_ignored_det = False
elif (compute_fp and overlap > min_overlap
and (valid_detection == no_detection) and ignored_det[j] == 1):
det_idx = j
valid_detection = 1
assigned_ignored_det = True
if valid_detection == no_detection and ignored_gt[i] == 0:
fn += 1
elif (valid_detection != no_detection
and (ignored_gt[i] == 1 or ignored_det[det_idx] == 1)):
assigned_detection[det_idx] = True
elif valid_detection != no_detection:
# only a tp add a threshold.
tp += 1
thresholds[thresh_idx] = dt_scores[det_idx]
thresh_idx += 1
if compute_aos:
delta[delta_idx] = gt_alphas[i] - dt_alphas[det_idx]
delta_idx += 1
assigned_detection[det_idx] = True
if compute_fp:
for i in range(det_size):
if (not (assigned_detection[i] or ignored_det[i] == -1
or ignored_det[i] == 1 or ignored_threshold[i])):
fp += 1
nstuff = 0
if metric == 0:
overlaps_dt_dc = image_box_overlap(dt_bboxes, dc_bboxes, 0)
for i in range(dc_bboxes.shape[0]):
for j in range(det_size):
if (assigned_detection[j] or ignored_det[j] == -1
or ignored_det[j] == 1 or ignored_threshold[j]):
continue
if overlaps_dt_dc[j, i] > min_overlap:
assigned_detection[j] = True
nstuff += 1
fp -= nstuff
if compute_aos:
tmp = np.zeros((fp + delta_idx,))
for i in range(delta_idx):
tmp[i + fp] = (1.0 + np.cos(delta[i])) / 2.0
if tp > 0 or fp > 0:
similarity = np.sum(tmp)
else:
similarity = -1
return tp, fp, fn, similarity, thresholds[:thresh_idx]
def get_split_parts(num, num_part):
"""get split parts"""
same_part = num // num_part
remain_num = num % num_part
if remain_num == 0:
return [same_part] * num_part
return [same_part] * num_part + [remain_num]
@numba_jit(nopython=True)
def fused_compute_statistics(overlaps,
pr,
gt_nums,
dt_nums,
dc_nums,
gt_datas,
dt_datas,
dontcares,
ignored_gts,
ignored_dets,
metric,
min_overlap,
thresholds,
compute_aos=False):
"""fused compute statistics"""
gt_num = 0
dt_num = 0
dc_num = 0
for i in range(gt_nums.shape[0]):
for t, thresh in enumerate(thresholds):
overlap = overlaps[dt_num:dt_num + dt_nums[i], gt_num:gt_num + gt_nums[i]]
gt_data = gt_datas[gt_num:gt_num + gt_nums[i]]
dt_data = dt_datas[dt_num:dt_num + dt_nums[i]]
ignored_gt = ignored_gts[gt_num:gt_num + gt_nums[i]]
ignored_det = ignored_dets[dt_num:dt_num + dt_nums[i]]
dontcare = dontcares[dc_num:dc_num + dc_nums[i]]
tp, fp, fn, similarity, _ = compute_statistics_jit(
overlap,
gt_data,
dt_data,
ignored_gt,
ignored_det,
dontcare,
metric,
min_overlap=min_overlap,
thresh=thresh,
compute_fp=True,
compute_aos=compute_aos
)
pr[t, 0] += tp
pr[t, 1] += fp
pr[t, 2] += fn
if similarity != -1:
pr[t, 3] += similarity
gt_num += gt_nums[i]
dt_num += dt_nums[i]
dc_num += dc_nums[i]
def _get_parted_overlaps(gt_annos, dt_annos, split_parts, metric):
"""get overlaps parted"""
parted_overlaps = []
example_idx = 0
for num_part in split_parts:
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
if metric == 0:
gt_boxes = np.concatenate([a["bbox"] for a in gt_annos_part], 0)
dt_boxes = np.concatenate([a["bbox"] for a in dt_annos_part], 0)
overlap_part = image_box_overlap(gt_boxes, dt_boxes)
elif metric == 1:
loc = np.concatenate([a["location"][:, [0, 2]] for a in gt_annos_part], 0)
dims = np.concatenate([a["dimensions"][:, [0, 2]] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1)
loc = np.concatenate([a["location"][:, [0, 2]] for a in dt_annos_part], 0)
dims = np.concatenate([a["dimensions"][:, [0, 2]] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1)
overlap_part = bev_box_overlap(gt_boxes, dt_boxes).astype(np.float64)
elif metric == 2:
loc = np.concatenate([a["location"] for a in gt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1)
loc = np.concatenate([a["location"] for a in dt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1)
overlap_part = d3_box_overlap(gt_boxes, dt_boxes).astype(np.float64)
else:
raise ValueError("unknown metric")
parted_overlaps.append(overlap_part)
example_idx += num_part
return parted_overlaps
def calculate_iou_partly(gt_annos, dt_annos, metric, num_parts=50):
"""fast iou algorithm. this function can be used independently to
do result analysis. Must be used in CAMERA coordinate system.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
metric: eval type. 0: bbox, 1: bev, 2: 3d
num_parts: int. a parameter for fast calculate algorithm
"""
assert len(gt_annos) == len(dt_annos)
total_dt_num = np.stack([len(a["name"]) for a in dt_annos], 0)
total_gt_num = np.stack([len(a["name"]) for a in gt_annos], 0)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
parted_overlaps = _get_parted_overlaps(gt_annos, dt_annos, split_parts, metric)
overlaps = []
example_idx = 0
for j, num_part in enumerate(split_parts):
gt_num_idx, dt_num_idx = 0, 0
for i in range(num_part):
gt_box_num = total_gt_num[example_idx + i]
dt_box_num = total_dt_num[example_idx + i]
overlaps.append(
parted_overlaps[j][gt_num_idx:gt_num_idx + gt_box_num,
dt_num_idx:dt_num_idx + dt_box_num]
)
gt_num_idx += gt_box_num
dt_num_idx += dt_box_num
example_idx += num_part
return overlaps, parted_overlaps, total_gt_num, total_dt_num
def _prepare_data(gt_annos, dt_annos, current_class, difficulty):
"""prepare data"""
gt_datas_list = []
dt_datas_list = []
total_dc_num = []
ignored_gts, ignored_dets, dontcares = [], [], []
total_num_valid_gt = 0
for i in range(len(gt_annos)):
rets = clean_data(gt_annos[i], dt_annos[i], current_class, difficulty)
num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets
ignored_gts.append(np.array(ignored_gt, dtype=np.int64))
ignored_dets.append(np.array(ignored_det, dtype=np.int64))
if np.array(dc_bboxes).shape[0] == 0:
dc_bboxes = np.zeros((0, 4)).astype(np.float64)
else:
dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64)
total_dc_num.append(dc_bboxes.shape[0])
dontcares.append(dc_bboxes)
total_num_valid_gt += num_valid_gt
gt_datas = np.concatenate([gt_annos[i]["bbox"], gt_annos[i]["alpha"][..., np.newaxis]], 1)
dt_datas = np.concatenate([
dt_annos[i]["bbox"], dt_annos[i]["alpha"][..., np.newaxis],
dt_annos[i]["score"][..., np.newaxis]
], 1)
gt_datas_list.append(gt_datas)
dt_datas_list.append(dt_datas)
total_dc_num = np.stack(total_dc_num, axis=0)
return (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets,
dontcares, total_dc_num, total_num_valid_gt)
def eval_class(gt_annos,
dt_annos,
current_classes,
difficultys,
metric,
min_overlaps,
compute_aos=False,
num_parts=50):
"""Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
current_classes: int, 0: car, 1: pedestrian, 2: cyclist
difficultys: int. eval difficulty, 0: easy, 1: normal, 2: hard
metric: eval type. 0: bbox, 1: bev, 2: 3d
min_overlaps: float, min overlap. official:
[[0.7, 0.5, 0.5], [0.7, 0.5, 0.5], [0.7, 0.5, 0.5]]
format: [metric, class]. choose one from matrix above.
compute_aos: bool. compute aos or not
num_parts: int. a parameter for fast calculate algorithm
Returns:
dict of recall, precision and aos
"""
if len(gt_annos) != len(dt_annos):
raise ValueError(
f'Number of elements in ground-truth and detected annotations '
f'lists must be equal, got {len(gt_annos)} and {len(dt_annos)}.'
)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
rets = calculate_iou_partly(dt_annos, gt_annos, metric, num_parts)
overlaps, parted_overlaps, total_dt_num, total_gt_num = rets
n_sample_pts = 41
num_minoverlap = len(min_overlaps)
num_class = len(current_classes)
num_difficulty = len(difficultys)
precision = np.zeros([num_class, num_difficulty, num_minoverlap, n_sample_pts])
recall = np.zeros([num_class, num_difficulty, num_minoverlap, n_sample_pts])
aos = np.zeros([num_class, num_difficulty, num_minoverlap, n_sample_pts])
for m, current_class in enumerate(current_classes):
for l, difficulty in enumerate(difficultys):
rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)
(gt_datas_list, dt_datas_list, ignored_gts, ignored_dets,
dontcares, total_dc_num, total_num_valid_gt) = rets
for k, min_overlap in enumerate(min_overlaps[:, metric, m]):
thresholdss = []
for i in range(len(gt_annos)):
rets = compute_statistics_jit(overlaps[i], gt_datas_list[i], dt_datas_list[i],
ignored_gts[i], ignored_dets[i],
dontcares[i], metric, min_overlap=min_overlap,
thresh=0.0, compute_fp=False)
_, _, _, _, thresholds = rets
thresholdss += thresholds.tolist()
thresholdss = np.array(thresholdss)
thresholds = get_thresholds(thresholdss, total_num_valid_gt)
thresholds = np.array(thresholds)
pr = np.zeros([len(thresholds), 4])
idx = 0
for j, num_part in enumerate(split_parts):
gt_datas_part = np.concatenate(gt_datas_list[idx:idx + num_part], 0)
dt_datas_part = np.concatenate(dt_datas_list[idx:idx + num_part], 0)
dc_datas_part = np.concatenate(dontcares[idx:idx + num_part], 0)
ignored_dets_part = np.concatenate(ignored_dets[idx:idx + num_part], 0)
ignored_gts_part = np.concatenate(ignored_gts[idx:idx + num_part], 0)
fused_compute_statistics(
parted_overlaps[j],
pr,
total_gt_num[idx:idx + num_part],
total_dt_num[idx:idx + num_part],
total_dc_num[idx:idx + num_part],
gt_datas_part,
dt_datas_part,
dc_datas_part,
ignored_gts_part,
ignored_dets_part,
metric,
min_overlap=min_overlap,
thresholds=thresholds,
compute_aos=compute_aos
)
idx += num_part
for i in range(len(thresholds)):
recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])
precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
if compute_aos:
aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])
for i in range(len(thresholds)):
precision[m, l, k, i] = np.max(precision[m, l, k, i:], axis=-1)
recall[m, l, k, i] = np.max(recall[m, l, k, i:], axis=-1)
if compute_aos:
aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)
ret_dict = {
"recall": recall,
"precision": precision,
"orientation": aos,
}
return ret_dict
def get_map(prec):
"""get map"""
sums = 0
for i in range(0, prec.shape[-1], 4):
sums = sums + prec[..., i]
return sums / 11 * 100
def do_eval(gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos=False,
difficultys=(0, 1, 2)):
"""do eval"""
# min_overlaps: [num_minoverlap, metric, num_class]
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 0,
min_overlaps, compute_aos)
# ret: [num_class, num_diff, num_minoverlap, num_sample_points]
map_bbox = get_map(ret["precision"])
map_aos = None
if compute_aos:
map_aos = get_map(ret["orientation"])
ret = eval_class(gt_annos, dt_annos, current_classes,
difficultys, 1, min_overlaps)
map_bev = get_map(ret["precision"])
ret = eval_class(gt_annos, dt_annos, current_classes,
difficultys, 2, min_overlaps)
map_3d = get_map(ret["precision"])
return map_bbox, map_bev, map_3d, map_aos
def do_coco_style_eval(gt_annos, dt_annos, current_classes,
overlap_ranges, compute_aos):
"""do coco style eval"""
# overlap_ranges: [range, metric, num_class]
min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]])
for i in range(overlap_ranges.shape[1]):
for j in range(overlap_ranges.shape[2]):
start, end, num = overlap_ranges[:, i, j]
min_overlaps[:, i, j] = np.linspace(start, end, int(num))
map_bbox, map_bev, map_3d, map_aos = do_eval(gt_annos, dt_annos, current_classes,
min_overlaps, compute_aos)
# ret: [num_class, num_diff, num_minoverlap]
map_bbox = map_bbox.mean(-1)
map_bev = map_bev.mean(-1)
map_3d = map_3d.mean(-1)
if map_aos is not None:
map_aos = map_aos.mean(-1)
return map_bbox, map_bev, map_3d, map_aos
def print_str(value, *arg, sstream=None):
"""print str"""
if sstream is None:
sstream = sysio.StringIO()
sstream.truncate(0)
sstream.seek(0)
print(value, *arg, file=sstream)
return sstream.getvalue()
def get_official_eval_result(gt_annos, dt_annos, current_classes, difficultys=(0, 1, 2), return_data=False):
"""get official eval result"""
min_overlaps = np.array([[
[0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7],
[0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7],
[0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7]
]])
class_to_name = {
0: 'Car',
1: 'Pedestrian',
2: 'Cyclist',
3: 'Van',
4: 'Person_sitting',
5: 'car',
6: 'tractor',
7: 'trailer',
}
name_to_class = {v: n for n, v in class_to_name.items()}
if not isinstance(current_classes, (list, tuple)):
current_classes = [current_classes]
current_classes_int = []
for curcls in current_classes:
if isinstance(curcls, str):
current_classes_int.append(name_to_class[curcls])
else:
current_classes_int.append(curcls)
current_classes = current_classes_int
min_overlaps = min_overlaps[:, :, current_classes]
result = ' Easy Mod Hard\n'
# check whether alpha is valid
compute_aos = False
for anno in dt_annos:
if anno['alpha'].shape[0] != 0:
if anno['alpha'][0] != -10:
compute_aos = True
break
map_bbox, map_bev, map_3d, map_aos = do_eval(gt_annos, dt_annos, current_classes,
min_overlaps, compute_aos, difficultys)
for j, curcls in enumerate(current_classes):
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
for i in range(min_overlaps.shape[0]):
result += print_str(
(f"{class_to_name[curcls]} "
"AP@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j]))
)
result += print_str((f"bbox AP:{map_bbox[j, 0, i]:.2f}, "
f"{map_bbox[j, 1, i]:.2f}, "
f"{map_bbox[j, 2, i]:.2f}"))
result += print_str((f"bev AP:{map_bev[j, 0, i]:.2f}, "
f"{map_bev[j, 1, i]:.2f}, "
f"{map_bev[j, 2, i]:.2f}"))
result += print_str((f"3d AP:{map_3d[j, 0, i]:.2f}, "
f"{map_3d[j, 1, i]:.2f}, "
f"{map_3d[j, 2, i]:.2f}"))
if compute_aos:
result += print_str((f"aos AP:{map_aos[j, 0, i]:.2f}, "
f"{map_aos[j, 1, i]:.2f}, "
f"{map_aos[j, 2, i]:.2f}"))
if return_data:
return result, map_bbox, map_bev, map_3d, map_aos
return result
def get_coco_eval_result(gt_annos, dt_annos, current_classes):
"""get coco eval result"""
class_to_name = {
0: 'Car',
1: 'Pedestrian',
2: 'Cyclist',
3: 'Van',
4: 'Person_sitting',
5: 'car',
6: 'tractor',
7: 'trailer',
}
class_to_range = {
0: [0.5, 0.95, 10],
1: [0.25, 0.7, 10],
2: [0.25, 0.7, 10],
3: [0.5, 0.95, 10],
4: [0.25, 0.7, 10],
5: [0.5, 0.95, 10],
6: [0.5, 0.95, 10],
7: [0.5, 0.95, 10],
}
name_to_class = {v: n for n, v in class_to_name.items()}
if not isinstance(current_classes, (list, tuple)):
current_classes = [current_classes]
current_classes_int = []
for curcls in current_classes:
if isinstance(curcls, str):
current_classes_int.append(name_to_class[curcls])
else:
current_classes_int.append(curcls)
current_classes = current_classes_int
overlap_ranges = np.zeros([3, 3, len(current_classes)])
for i, curcls in enumerate(current_classes):
overlap_ranges[:, :, i] = np.array(class_to_range[curcls])[:, np.newaxis]
result = ''
# check whether alpha is valid
compute_aos = False
for anno in dt_annos:
if anno['alpha'].shape[0] != 0:
if anno['alpha'][0] != -10:
compute_aos = True
break
map_bbox, map_bev, map_3d, map_aos = do_coco_style_eval(gt_annos, dt_annos, current_classes,
overlap_ranges, compute_aos)
for j, curcls in enumerate(current_classes):
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
o_range = np.array(class_to_range[curcls])[[0, 2, 1]]
o_range[1] = (o_range[2] - o_range[0]) / (o_range[1] - 1)
result += print_str(
(f"{class_to_name[curcls]} "
"coco AP@{:.2f}:{:.2f}:{:.2f}:".format(*o_range))
)
result += print_str((f"bbox AP:{map_bbox[j, 0]:.2f}, "
f"{map_bbox[j, 1]:.2f}, "
f"{map_bbox[j, 2]:.2f}"))
result += print_str((f"bev AP:{map_bev[j, 0]:.2f}, "
f"{map_bev[j, 1]:.2f}, "
f"{map_bev[j, 2]:.2f}"))
result += print_str((f"3d AP:{map_3d[j, 0]:.2f}, "
f"{map_3d[j, 1]:.2f}, "
f"{map_3d[j, 2]:.2f}"))
if compute_aos:
result += print_str((f"aos AP:{map_aos[j, 0]:.2f}, "
f"{map_aos[j, 1]:.2f}, "
f"{map_aos[j, 2]:.2f}"))
return result
| [
"numba.cuda.select_device",
"math.sqrt",
"numba.cuda.jit",
"numba.cuda.local.array",
"numba.cuda.shared.array",
"math.cos",
"numpy.stack",
"numpy.zeros",
"numba.jit",
"numba.cuda.stream",
"numpy.array",
"numpy.concatenate",
"numpy.sum",
"numpy.max",
"numpy.cos",
"io.StringIO",
"math.... | [((839, 863), 'numba.jit', 'numba_jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (848, 863), True, 'from numba import jit as numba_jit\n'), ((934, 1008), 'numba.cuda.jit', 'cuda.jit', (['"""(float32[:], float32[:], float32[:])"""'], {'device': '(True)', 'inline': '(True)'}), "('(float32[:], float32[:], float32[:])', device=True, inline=True)\n", (942, 1008), False, 'from numba import cuda\n'), ((1144, 1201), 'numba.cuda.jit', 'cuda.jit', (['"""(float32[:], int32)"""'], {'device': '(True)', 'inline': '(True)'}), "('(float32[:], int32)', device=True, inline=True)\n", (1152, 1201), False, 'from numba import cuda\n'), ((1478, 1535), 'numba.cuda.jit', 'cuda.jit', (['"""(float32[:], int32)"""'], {'device': '(True)', 'inline': '(True)'}), "('(float32[:], int32)', device=True, inline=True)\n", (1486, 1535), False, 'from numba import cuda\n'), ((2916, 3008), 'numba.cuda.jit', 'cuda.jit', (['"""(float32[:], float32[:], int32, int32, float32[:])"""'], {'device': '(True)', 'inline': '(True)'}), "('(float32[:], float32[:], int32, int32, float32[:])', device=True,\n inline=True)\n", (2924, 3008), False, 'from numba import cuda\n'), ((4313, 4381), 'numba.cuda.jit', 'cuda.jit', (['"""(float32, float32, float32[:])"""'], {'device': '(True)', 'inline': '(True)'}), "('(float32, float32, float32[:])', device=True, inline=True)\n", (4321, 4381), False, 'from numba import cuda\n'), ((4846, 4920), 'numba.cuda.jit', 'cuda.jit', (['"""(float32[:], float32[:], float32[:])"""'], {'device': '(True)', 'inline': '(True)'}), "('(float32[:], float32[:], float32[:])', device=True, inline=True)\n", (4854, 4920), False, 'from numba import cuda\n'), ((5868, 5930), 'numba.cuda.jit', 'cuda.jit', (['"""(float32[:], float32[:])"""'], {'device': '(True)', 'inline': '(True)'}), "('(float32[:], float32[:])', device=True, inline=True)\n", (5876, 5930), False, 'from numba import cuda\n'), ((6763, 6825), 'numba.cuda.jit', 'cuda.jit', (['"""(float32[:], float32[:])"""'], {'device': '(True)', 'inline': '(True)'}), "('(float32[:], float32[:])', device=True, inline=True)\n", (6771, 6825), False, 'from numba import cuda\n'), ((7415, 7484), 'numba.cuda.jit', 'cuda.jit', (['"""(float32[:], float32[:], int32)"""'], {'device': '(True)', 'inline': '(True)'}), "('(float32[:], float32[:], int32)', device=True, inline=True)\n", (7423, 7484), False, 'from numba import cuda\n'), ((7889, 7978), 'numba.cuda.jit', 'cuda.jit', (['"""(int64, int64, float32[:], float32[:], float32[:], int32)"""'], {'fastmath': '(False)'}), "('(int64, int64, float32[:], float32[:], float32[:], int32)',\n fastmath=False)\n", (7897, 7978), False, 'from numba import cuda\n'), ((14115, 14139), 'numba.jit', 'numba_jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (14124, 14139), True, 'from numba import jit as numba_jit\n'), ((15526, 15565), 'numba.jit', 'numba_jit', ([], {'nopython': '(True)', 'parallel': '(True)'}), '(nopython=True, parallel=True)\n', (15535, 15565), True, 'from numba import jit as numba_jit\n'), ((16882, 16906), 'numba.jit', 'numba_jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (16891, 16906), True, 'from numba import jit as numba_jit\n'), ((20991, 21015), 'numba.jit', 'numba_jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (21000, 21015), True, 'from numba import jit as numba_jit\n'), ((3128, 3171), 'numba.cuda.local.array', 'cuda.local.array', (['(2,)'], {'dtype': 'numba_float32'}), '((2,), dtype=numba_float32)\n', (3144, 3171), False, 'from numba import cuda\n'), ((3180, 3223), 'numba.cuda.local.array', 'cuda.local.array', (['(2,)'], {'dtype': 'numba_float32'}), '((2,), dtype=numba_float32)\n', (3196, 3223), False, 'from numba import cuda\n'), ((3232, 3275), 'numba.cuda.local.array', 'cuda.local.array', (['(2,)'], {'dtype': 'numba_float32'}), '((2,), dtype=numba_float32)\n', (3248, 3275), False, 'from numba import cuda\n'), ((3284, 3327), 'numba.cuda.local.array', 'cuda.local.array', (['(2,)'], {'dtype': 'numba_float32'}), '((2,), dtype=numba_float32)\n', (3300, 3327), False, 'from numba import cuda\n'), ((5496, 5539), 'numba.cuda.local.array', 'cuda.local.array', (['(2,)'], {'dtype': 'numba_float32'}), '((2,), dtype=numba_float32)\n', (5512, 5539), False, 'from numba import cuda\n'), ((6086, 6101), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (6094, 6101), False, 'import math\n'), ((6114, 6129), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (6122, 6129), False, 'import math\n'), ((6232, 6275), 'numba.cuda.local.array', 'cuda.local.array', (['(4,)'], {'dtype': 'numba_float32'}), '((4,), dtype=numba_float32)\n', (6248, 6275), False, 'from numba import cuda\n'), ((6292, 6335), 'numba.cuda.local.array', 'cuda.local.array', (['(4,)'], {'dtype': 'numba_float32'}), '((4,), dtype=numba_float32)\n', (6308, 6335), False, 'from numba import cuda\n'), ((6884, 6927), 'numba.cuda.local.array', 'cuda.local.array', (['(8,)'], {'dtype': 'numba_float32'}), '((8,), dtype=numba_float32)\n', (6900, 6927), False, 'from numba import cuda\n'), ((6943, 6986), 'numba.cuda.local.array', 'cuda.local.array', (['(8,)'], {'dtype': 'numba_float32'}), '((8,), dtype=numba_float32)\n', (6959, 6986), False, 'from numba import cuda\n'), ((7014, 7058), 'numba.cuda.local.array', 'cuda.local.array', (['(16,)'], {'dtype': 'numba_float32'}), '((16,), dtype=numba_float32)\n', (7030, 7058), False, 'from numba import cuda\n'), ((8522, 8577), 'numba.cuda.shared.array', 'cuda.shared.array', ([], {'shape': '(64 * 5,)', 'dtype': 'numba_float32'}), '(shape=(64 * 5,), dtype=numba_float32)\n', (8539, 8577), False, 'from numba import cuda\n'), ((8597, 8652), 'numba.cuda.shared.array', 'cuda.shared.array', ([], {'shape': '(64 * 5,)', 'dtype': 'numba_float32'}), '(shape=(64 * 5,), dtype=numba_float32)\n', (8614, 8652), False, 'from numba import cuda\n'), ((9529, 9547), 'numba.cuda.syncthreads', 'cuda.syncthreads', ([], {}), '()\n', (9545, 9547), False, 'from numba import cuda\n'), ((10204, 10251), 'numpy.zeros', 'np.zeros', (['(n_boxes, k_qboxes)'], {'dtype': 'np.float32'}), '((n_boxes, k_qboxes), dtype=np.float32)\n', (10212, 10251), True, 'import numpy as np\n'), ((10343, 10372), 'numba.cuda.select_device', 'cuda.select_device', (['device_id'], {}), '(device_id)\n', (10361, 10372), False, 'from numba import cuda\n'), ((10481, 10494), 'numba.cuda.stream', 'cuda.stream', ([], {}), '()\n', (10492, 10494), False, 'from numba import cuda\n'), ((14305, 14353), 'numpy.zeros', 'np.zeros', (['(n_boxes, k_qboxes)'], {'dtype': 'boxes.dtype'}), '((n_boxes, k_qboxes), dtype=boxes.dtype)\n', (14313, 14353), True, 'import numpy as np\n'), ((17794, 17814), 'numpy.zeros', 'np.zeros', (['(gt_size,)'], {}), '((gt_size,))\n', (17802, 17814), True, 'import numpy as np\n'), ((17846, 17866), 'numpy.zeros', 'np.zeros', (['(gt_size,)'], {}), '((gt_size,))\n', (17854, 17866), True, 'import numpy as np\n'), ((27723, 27753), 'numpy.stack', 'np.stack', (['total_dc_num'], {'axis': '(0)'}), '(total_dc_num, axis=0)\n', (27731, 27753), True, 'import numpy as np\n'), ((29490, 29557), 'numpy.zeros', 'np.zeros', (['[num_class, num_difficulty, num_minoverlap, n_sample_pts]'], {}), '([num_class, num_difficulty, num_minoverlap, n_sample_pts])\n', (29498, 29557), True, 'import numpy as np\n'), ((29571, 29638), 'numpy.zeros', 'np.zeros', (['[num_class, num_difficulty, num_minoverlap, n_sample_pts]'], {}), '([num_class, num_difficulty, num_minoverlap, n_sample_pts])\n', (29579, 29638), True, 'import numpy as np\n'), ((29649, 29716), 'numpy.zeros', 'np.zeros', (['[num_class, num_difficulty, num_minoverlap, n_sample_pts]'], {}), '([num_class, num_difficulty, num_minoverlap, n_sample_pts])\n', (29657, 29716), True, 'import numpy as np\n'), ((34177, 34218), 'numpy.zeros', 'np.zeros', (['[10, *overlap_ranges.shape[1:]]'], {}), '([10, *overlap_ranges.shape[1:]])\n', (34185, 34218), True, 'import numpy as np\n'), ((35249, 35392), 'numpy.array', 'np.array', (['[[[0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7], [0.7, 0.5, 0.5, 0.7, 0.5, 0.7, \n 0.7, 0.7], [0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7]]]'], {}), '([[[0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7], [0.7, 0.5, 0.5, 0.7, \n 0.5, 0.7, 0.7, 0.7], [0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7]]])\n', (35257, 35392), True, 'import numpy as np\n'), ((1676, 1719), 'numba.cuda.local.array', 'cuda.local.array', (['(2,)'], {'dtype': 'numba_float32'}), '((2,), dtype=numba_float32)\n', (1692, 1719), False, 'from numba import cuda\n'), ((1946, 1989), 'numba.cuda.local.array', 'cuda.local.array', (['(2,)'], {'dtype': 'numba_float32'}), '((2,), dtype=numba_float32)\n', (1962, 1989), False, 'from numba import cuda\n'), ((2003, 2047), 'numba.cuda.local.array', 'cuda.local.array', (['(16,)'], {'dtype': 'numba_float32'}), '((16,), dtype=numba_float32)\n', (2019, 2047), False, 'from numba import cuda\n'), ((27374, 27453), 'numpy.concatenate', 'np.concatenate', (["[gt_annos[i]['bbox'], gt_annos[i]['alpha'][..., np.newaxis]]", '(1)'], {}), "([gt_annos[i]['bbox'], gt_annos[i]['alpha'][..., np.newaxis]], 1)\n", (27388, 27453), True, 'import numpy as np\n'), ((27473, 27595), 'numpy.concatenate', 'np.concatenate', (["[dt_annos[i]['bbox'], dt_annos[i]['alpha'][..., np.newaxis], dt_annos[i][\n 'score'][..., np.newaxis]]", '(1)'], {}), "([dt_annos[i]['bbox'], dt_annos[i]['alpha'][..., np.newaxis],\n dt_annos[i]['score'][..., np.newaxis]], 1)\n", (27487, 27595), True, 'import numpy as np\n'), ((34956, 34972), 'io.StringIO', 'sysio.StringIO', ([], {}), '()\n', (34970, 34972), True, 'import io as sysio\n'), ((2198, 2234), 'math.sqrt', 'math.sqrt', (['(v[0] * v[0] + v[1] * v[1])'], {}), '(v[0] * v[0] + v[1] * v[1])\n', (2207, 2234), False, 'import math\n'), ((20438, 20465), 'numpy.zeros', 'np.zeros', (['(fp + delta_idx,)'], {}), '((fp + delta_idx,))\n', (20446, 20465), True, 'import numpy as np\n'), ((23146, 23199), 'numpy.concatenate', 'np.concatenate', (["[a['bbox'] for a in gt_annos_part]", '(0)'], {}), "([a['bbox'] for a in gt_annos_part], 0)\n", (23160, 23199), True, 'import numpy as np\n'), ((23223, 23276), 'numpy.concatenate', 'np.concatenate', (["[a['bbox'] for a in dt_annos_part]", '(0)'], {}), "([a['bbox'] for a in dt_annos_part], 0)\n", (23237, 23276), True, 'import numpy as np\n'), ((26937, 26973), 'numpy.array', 'np.array', (['ignored_gt'], {'dtype': 'np.int64'}), '(ignored_gt, dtype=np.int64)\n', (26945, 26973), True, 'import numpy as np\n'), ((27003, 27040), 'numpy.array', 'np.array', (['ignored_det'], {'dtype': 'np.int64'}), '(ignored_det, dtype=np.int64)\n', (27011, 27040), True, 'import numpy as np\n'), ((38947, 38979), 'numpy.array', 'np.array', (['class_to_range[curcls]'], {}), '(class_to_range[curcls])\n', (38955, 38979), True, 'import numpy as np\n'), ((39606, 39638), 'numpy.array', 'np.array', (['class_to_range[curcls]'], {}), '(class_to_range[curcls])\n', (39614, 39638), True, 'import numpy as np\n'), ((20628, 20639), 'numpy.sum', 'np.sum', (['tmp'], {}), '(tmp)\n', (20634, 20639), True, 'import numpy as np\n'), ((23386, 23454), 'numpy.concatenate', 'np.concatenate', (["[a['location'][:, [0, 2]] for a in gt_annos_part]", '(0)'], {}), "([a['location'][:, [0, 2]] for a in gt_annos_part], 0)\n", (23400, 23454), True, 'import numpy as np\n'), ((23474, 23544), 'numpy.concatenate', 'np.concatenate', (["[a['dimensions'][:, [0, 2]] for a in gt_annos_part]", '(0)'], {}), "([a['dimensions'][:, [0, 2]] for a in gt_annos_part], 0)\n", (23488, 23544), True, 'import numpy as np\n'), ((23564, 23623), 'numpy.concatenate', 'np.concatenate', (["[a['rotation_y'] for a in gt_annos_part]", '(0)'], {}), "([a['rotation_y'] for a in gt_annos_part], 0)\n", (23578, 23623), True, 'import numpy as np\n'), ((23647, 23705), 'numpy.concatenate', 'np.concatenate', (['[loc, dims, rots[..., np.newaxis]]'], {'axis': '(1)'}), '([loc, dims, rots[..., np.newaxis]], axis=1)\n', (23661, 23705), True, 'import numpy as np\n'), ((23725, 23793), 'numpy.concatenate', 'np.concatenate', (["[a['location'][:, [0, 2]] for a in dt_annos_part]", '(0)'], {}), "([a['location'][:, [0, 2]] for a in dt_annos_part], 0)\n", (23739, 23793), True, 'import numpy as np\n'), ((23813, 23883), 'numpy.concatenate', 'np.concatenate', (["[a['dimensions'][:, [0, 2]] for a in dt_annos_part]", '(0)'], {}), "([a['dimensions'][:, [0, 2]] for a in dt_annos_part], 0)\n", (23827, 23883), True, 'import numpy as np\n'), ((23903, 23962), 'numpy.concatenate', 'np.concatenate', (["[a['rotation_y'] for a in dt_annos_part]", '(0)'], {}), "([a['rotation_y'] for a in dt_annos_part], 0)\n", (23917, 23962), True, 'import numpy as np\n'), ((23986, 24044), 'numpy.concatenate', 'np.concatenate', (['[loc, dims, rots[..., np.newaxis]]'], {'axis': '(1)'}), '([loc, dims, rots[..., np.newaxis]], axis=1)\n', (24000, 24044), True, 'import numpy as np\n'), ((30688, 30709), 'numpy.array', 'np.array', (['thresholdss'], {}), '(thresholdss)\n', (30696, 30709), True, 'import numpy as np\n'), ((30816, 30836), 'numpy.array', 'np.array', (['thresholds'], {}), '(thresholds)\n', (30824, 30836), True, 'import numpy as np\n'), ((24172, 24229), 'numpy.concatenate', 'np.concatenate', (["[a['location'] for a in gt_annos_part]", '(0)'], {}), "([a['location'] for a in gt_annos_part], 0)\n", (24186, 24229), True, 'import numpy as np\n'), ((24249, 24308), 'numpy.concatenate', 'np.concatenate', (["[a['dimensions'] for a in gt_annos_part]", '(0)'], {}), "([a['dimensions'] for a in gt_annos_part], 0)\n", (24263, 24308), True, 'import numpy as np\n'), ((24328, 24387), 'numpy.concatenate', 'np.concatenate', (["[a['rotation_y'] for a in gt_annos_part]", '(0)'], {}), "([a['rotation_y'] for a in gt_annos_part], 0)\n", (24342, 24387), True, 'import numpy as np\n'), ((24411, 24469), 'numpy.concatenate', 'np.concatenate', (['[loc, dims, rots[..., np.newaxis]]'], {'axis': '(1)'}), '([loc, dims, rots[..., np.newaxis]], axis=1)\n', (24425, 24469), True, 'import numpy as np\n'), ((24489, 24546), 'numpy.concatenate', 'np.concatenate', (["[a['location'] for a in dt_annos_part]", '(0)'], {}), "([a['location'] for a in dt_annos_part], 0)\n", (24503, 24546), True, 'import numpy as np\n'), ((24566, 24625), 'numpy.concatenate', 'np.concatenate', (["[a['dimensions'] for a in dt_annos_part]", '(0)'], {}), "([a['dimensions'] for a in dt_annos_part], 0)\n", (24580, 24625), True, 'import numpy as np\n'), ((24645, 24704), 'numpy.concatenate', 'np.concatenate', (["[a['rotation_y'] for a in dt_annos_part]", '(0)'], {}), "([a['rotation_y'] for a in dt_annos_part], 0)\n", (24659, 24704), True, 'import numpy as np\n'), ((24728, 24786), 'numpy.concatenate', 'np.concatenate', (['[loc, dims, rots[..., np.newaxis]]'], {'axis': '(1)'}), '([loc, dims, rots[..., np.newaxis]], axis=1)\n', (24742, 24786), True, 'import numpy as np\n'), ((27053, 27072), 'numpy.array', 'np.array', (['dc_bboxes'], {}), '(dc_bboxes)\n', (27061, 27072), True, 'import numpy as np\n'), ((27112, 27128), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {}), '((0, 4))\n', (27120, 27128), True, 'import numpy as np\n'), ((27186, 27208), 'numpy.stack', 'np.stack', (['dc_bboxes', '(0)'], {}), '(dc_bboxes, 0)\n', (27194, 27208), True, 'import numpy as np\n'), ((31008, 31060), 'numpy.concatenate', 'np.concatenate', (['gt_datas_list[idx:idx + num_part]', '(0)'], {}), '(gt_datas_list[idx:idx + num_part], 0)\n', (31022, 31060), True, 'import numpy as np\n'), ((31097, 31149), 'numpy.concatenate', 'np.concatenate', (['dt_datas_list[idx:idx + num_part]', '(0)'], {}), '(dt_datas_list[idx:idx + num_part], 0)\n', (31111, 31149), True, 'import numpy as np\n'), ((31186, 31234), 'numpy.concatenate', 'np.concatenate', (['dontcares[idx:idx + num_part]', '(0)'], {}), '(dontcares[idx:idx + num_part], 0)\n', (31200, 31234), True, 'import numpy as np\n'), ((31275, 31326), 'numpy.concatenate', 'np.concatenate', (['ignored_dets[idx:idx + num_part]', '(0)'], {}), '(ignored_dets[idx:idx + num_part], 0)\n', (31289, 31326), True, 'import numpy as np\n'), ((31366, 31416), 'numpy.concatenate', 'np.concatenate', (['ignored_gts[idx:idx + num_part]', '(0)'], {}), '(ignored_gts[idx:idx + num_part], 0)\n', (31380, 31416), True, 'import numpy as np\n'), ((32549, 32588), 'numpy.max', 'np.max', (['precision[m, l, k, i:]'], {'axis': '(-1)'}), '(precision[m, l, k, i:], axis=-1)\n', (32555, 32588), True, 'import numpy as np\n'), ((32630, 32666), 'numpy.max', 'np.max', (['recall[m, l, k, i:]'], {'axis': '(-1)'}), '(recall[m, l, k, i:], axis=-1)\n', (32636, 32666), True, 'import numpy as np\n'), ((20542, 20558), 'numpy.cos', 'np.cos', (['delta[i]'], {}), '(delta[i])\n', (20548, 20558), True, 'import numpy as np\n'), ((32745, 32778), 'numpy.max', 'np.max', (['aos[m, l, k, i:]'], {'axis': '(-1)'}), '(aos[m, l, k, i:], axis=-1)\n', (32751, 32778), True, 'import numpy as np\n')] |
import numpy as np
from enum import Enum, auto
class SolverState(Enum):
PRESOLVE = auto()
PHASE1_STEP = auto()
DRIVEOUT_STEP = auto()
PHASE2_STEP = auto()
DONE = auto()
class SimplexSolver:
def __stripReturnChar(self, str):
if str[-1] == '\n':
return str[:-1]
return str
'''
Reads MPS file to solve LP problem. Currently does not support RANGES and BOUNDS
mpsName - file location string of MPS file
'''
def readMps(self, mpsName):
with open(mpsName, encoding='utf16') as f:
mode = ""
rowMap = {}
rowCount = 1
varCount = 1
objectiveRow = ""
for line in f:
if len(line) == 0:
continue
if line[0] != ' ':
mode = line.strip().upper()
continue
if mode == "ROWS":
boundType = line[0:4].upper()
rowName = self.__stripReturnChar(line[4:].upper()).strip()
if boundType != " N ":
self.__S = np.insert(self.__S, self.__S.shape[0], 0, axis=0)
rowMap[rowName] = rowCount
rowCount += 1
lessThan = boundType == " L "
moreThan = boundType == " G "
if lessThan:
slackVarName = "*slack-{}*".format(self.__slackVars)
slackVar = 1.0
elif moreThan:
slackVarName = "*surplus-{}*".format(self.__slackVars)
slackVar = -1.0
if lessThan or moreThan:
varLen = len(self.__vars) + 1
self.__vars[slackVarName] = varLen
self.__S = np.insert(self.__S, self.__S.shape[1], 0, axis=1)
self.__S[self.__S.shape[0] - 1, self.__S.shape[1] - 1] = slackVar
self.__slackVars += 1
else:
if objectiveRow != "":
continue
objectiveRow = rowName
rowMap[rowName] = 0
elif mode == "COLUMNS":
varName = line[4:12].upper().strip()
if varName not in self.__vars:
self.__vars[varName] = varCount
self.__S = np.insert(self.__S, varCount, 0, axis=1)
varCount += 1
rowName1 = line[12:22].upper().strip()
value1 = float(self.__stripReturnChar(line[22:min(36,len(line))].strip()))
self.__S[rowMap[rowName1], self.__vars[varName]] = value1
if len(line) >= 47:
rowName2 = line[36:47].upper().strip()
value2 = float(self.__stripReturnChar(line[47:].strip()))
self.__S[rowMap[rowName2], self.__vars[varName]] = value2
elif mode == "RHS":
rowName1 = line[12:22].upper().strip()
value1 = float(self.__stripReturnChar(line[22:min(36,len(line))].strip()))
self.__S[rowMap[rowName1], 0] = value1
if len(line) >= 47:
rowName2 = line[36:47].upper().strip()
value2 = float(self.__stripReturnChar(line[47:].strip()))
self.__S[rowMap[rowName2], 0] = value2
else:
print("{} not supported".format(mode))
break
self.status = "not started"
'''Split term to coefficient and var'''
def __splitToCoeffAndVar(self, x):
split = x.split('*')
if len(split) == 1:
x = x.strip()
if len(x) == 0:
return ['0', '*null*']
if x[0] == '-':
return ['-1', x[1:].strip()]
return ['1', x]
elif len(split) == 2:
if split[0][0] == '-':
split[0] = '-' + split[0][1:].strip()
return [split[0].strip(), split[1].strip()]
else:
raise ValueError('Illegal amount of multiplication characters')
'''Add a row to table from string, be it objective row or constraint row'''
def __setRow(self, str, objective=False):
#If not objective row, determine equality/inequality sign and bound
if not objective:
pair = str.split('=')
if len(pair) != 2:
raise ValueError("Malformed constraint expression, should have equality character (=)")
str = pair[0]
bound = float(pair[1])
lessThan = str[-1] == '<'
moreThan = str[-1] == '>'
#Remove inequality sign
if lessThan or moreThan:
str = str[:-1].strip()
#Separate linear expression into terms and split coefficients and variables
coeffAndVar = list( \
map( \
lambda x: [float(x[0]), x[1]], \
map(self.__splitToCoeffAndVar, str.replace("-", "+-").split('+')) \
) \
)
coeffAndVar = list(filter(lambda x: x[1] != '*null*', coeffAndVar))
#For one variable constraints, prune infeasible variables
if not objective:
varDuplicationRequired = True
if len(coeffAndVar) == 1:
posCoeff = coeffAndVar[0][0] >= 0.0
posBound = bound >= 0.0
if posCoeff == posBound and posCoeff == moreThan:
varDuplicationRequired = False
if "*neg*" + coeffAndVar[0][1] in self.__vars:
self.__varsToDelete.append(self.__vars["*neg*" + coeffAndVar[0][1]])
elif posBound == moreThan:
varDuplicationRequired = False
if coeffAndVar[0][1] in self.__vars:
self.__varsToDelete.append(self.__vars[coeffAndVar[0][1]])
#Simplex table assumes variables are non-negative, so such constraints can be skipped
if not varDuplicationRequired and bound == 0.0:
return
#Split variable to positive and negative variables, add column if not already in table
for x in coeffAndVar:
if x[1] not in self.__vars:
varLen = len(self.__vars) + 2
self.__vars[x[1]] = varLen - 1
self.__vars["*neg*" + x[1]] = varLen
self.__S = np.insert(self.__S, self.__S.shape[1], 0, axis=1)
self.__S = np.insert(self.__S, self.__S.shape[1], 0, axis=1)
#Add slack/surplus variable
if not objective:
if lessThan:
slackVarName = "*slack-{}*".format(self.__slackVars)
elif moreThan:
slackVarName = "*surplus-{}*".format(self.__slackVars)
if lessThan or moreThan:
varLen = len(self.__vars) + 1
self.__vars[slackVarName] = varLen
self.__S = np.insert(self.__S, self.__S.shape[1], 0, axis=1)
row = [0] * (len(self.__vars) + 1)
row[0] = bound if not objective else 0.0
for x in coeffAndVar:
row[self.__vars[x[1]]] = x[0]
row[self.__vars["*neg*" + x[1]]] = -x[0]
if not objective:
if lessThan:
row[self.__vars[slackVarName]] = 1.0
if moreThan:
row[self.__vars[slackVarName]] = -1.0
#If row is objective, replace first row. Otherwise add new row
if objective:
self.__S[0,:] = np.array(row)
else:
self.__S = np.insert(self.__S, self.__S.shape[0], row, axis=0)
if not objective and (lessThan or moreThan):
self.__slackVars += 1
'''
Core execution of Simplex algorithm.
Set twoPhase to True to execute first phase of two-phase Simplex
Returns True if simplex loop halts succesfully, False if more iterations needed
or LP is unbounded, in which case isDone is set to True.
'''
def __coreSimplex(self, twoPhase=False):
divide = np.vectorize(lambda a,b: a/b if b > 10e-6 else np.inf)
objRow = 1 if twoPhase else 0
negRow0 = self.__S[objRow,1:] >= 0.0
if np.all(negRow0):
self.status = "optimum"
return True
#TODO: different strategies for choosing pivot column
pivotColIdx = 1 + np.where(negRow0 == False)[1][0]
#End TODO
pivotCol = self.__S[:, pivotColIdx][:,0]
#If all pivot column has non-positive entries, problem is unbounded
if np.all(pivotCol[objRow+1:] <= 0):
self.status = "unbounded"
self.__state = SolverState.DONE
self.isDone = True
return False
pivotRowIdx = objRow + 1 + np.argmin(divide(self.__S[objRow+1:,0], pivotCol[objRow+1:]))
self.__basis[pivotRowIdx-objRow-1] = pivotColIdx-1
self.__S[pivotRowIdx] = self.__S[pivotRowIdx] / pivotCol[pivotRowIdx]
for i in range(self.__S.shape[0]):
if i == pivotRowIdx:
continue
self.__S[i] -= pivotCol[i]*self.__S[pivotRowIdx]
return False
'''Clear SimplexSolver object'''
def clear(self):
self.__S = np.matrix([0.0])
self.__vars = {}
self.__invVars = []
self.__slackVars = 0
self.__varsToDelete = []
self.__basis = np.array([])
self.__missingBasis = 0
self.__state = SolverState.PRESOLVE
self.opt = np.inf
self.optVars = {}
self.status = "objective not set"
self.isDone = False
'''
Define objective for LP program using a string.
Statement should be separated to terms via plus or minus signs,
each term should have floating number as coefficient on the left side
and a variable name on the right side, joined by multiplication sign.
Example: 2.0*x1 + 3.6e10*x2 - x3 + x4 - 7.8*x5
'''
def setObjective(self, str):
self.__setRow(str, objective=True)
self.status = "not started"
'''
Add a constraint to LP program using a string.
Statement should be separated to terms via plus or minus signs,
each term should have floating number as coefficient on the left side
and a variable name on the right side, joined by multiplication sign.
Constraint should end with ( <= | = | => ) and a single floating number.
Example: 2.0*x1 + 3.6e10*x2 - x3 + x4 - 7.8*x5 <= 6.6
'''
def addConstraint(self, str):
self.__setRow(str, objective=False)
'''Before starting Simplex algorithm, subtract basis rows from the objective row'''
def __subtractBasisFromObjectiveRow(self, twoPhase=False):
objRow = 1 if twoPhase else 0
for idx,col in enumerate(self.__basis):
self.__S[objRow,:] = self.__S[objRow,:] - self.__S[objRow,col+1]*self.__S[objRow+idx+1,:]
'''Calculation up to core simplex execution'''
def __presolveStep(self):
self.status = "preprocessing"
#Flip signs of a row if rhs is negative
negativeRows = 1 + np.where(self.__S[1:,0] < 0)[0]
self.__S[negativeRows,:] = -1.0 * self.__S[negativeRows,:]
#Delete unneeded variables and their corresponding columns
self.__S = np.delete(self.__S, self.__varsToDelete, 1)
#Invert vars dictionary
self.__invVars = [""] * (len(self.__vars) + 1)
for var, idx in self.__vars.items():
self.__invVars[idx] = var
self.__invVars = np.array(self.__invVars)
self.__invVars = np.delete(self.__invVars, self.__varsToDelete, 0)
#Simplex table without objective row and boundary column
SnoZero = self.__S[1:,1:]
colSize = SnoZero.shape[0]
#Detect unit columns as basis
unit = np.zeros(colSize)
self.__basis = np.full(colSize, -1)
for i in range(colSize):
unit[i] = 1.0
for j in range(SnoZero.shape[1]):
if np.all(np.transpose(SnoZero[:,j]) == unit):
self.__basis[i] = j
break
unit[i] = 0
self.__missingBasis = np.count_nonzero(self.__basis < 0)
#Couldn't find whole basis, do two phase Simplex
if self.__missingBasis > 0:
self.__S = np.insert(self.__S, 1, 0, axis=0)
unit = np.zeros(self.__S.shape[0])
unit[1] = 1
j = 0
for idx,col in enumerate(self.__basis):
if col >= 0:
self.__basis[idx] += self.__missingBasis
continue
unit[idx+2] = 1
self.__S = np.insert(self.__S, j+1, unit, axis=1)
self.__basis[idx] = j
unit[idx+2] = 0
j += 1
self.__subtractBasisFromObjectiveRow(True)
self.__state = SolverState.PHASE1_STEP
self.status = "calculating phase 1 Simplex"
else:
self.__subtractBasisFromObjectiveRow()
self.__state = SolverState.PHASE2_STEP
self.status = "calculating"
'''Phase 1 Simplex in two-phase Simplex execution'''
def __phase1Step(self):
if self.__coreSimplex(True):
#Feasible set empty, end solving
if self.__S[1,0] > 10e-6:
self.__state = SolverState.DONE
self.status = "feasible set empty"
self.isDone = True
else:
self.__state = SolverState.DRIVEOUT_STEP
self.status = "driving out artificial variables"
'''
Iteration of driving out artificial variables in case after
phase 1 there are still artificial variables in basis
'''
def __driveoutStep(self):
if np.all(self.__basis >= self.__missingBasis):
self.__S = np.delete(self.__S, 1, 0)
self.__S = np.delete(self.__S, range(1, 1+self.__missingBasis), 1)
self.__basis -= self.__missingBasis
self.__subtractBasisFromObjectiveRow()
self.__state = SolverState.PHASE2_STEP
self.status = "calculating phase 2 Simplex"
else:
#Artificial variable in basis, need to drive it out
artificialsInBasis = 2 + np.where(self.__basis < self.__missingBasis)[0]
for pivotRowIdx in artificialsInBasis:
nonzeroEntires = np.where(self.__S[pivotRowIdx,(1+self.__missingBasis):] != 0)[1]
if len(nonzeroEntires) == 0:
continue
for x in nonzeroEntires:
if x not in self.__basis:
pivotColIdx = 1 + self.__missingBasis + x
break
self.__basis[pivotRowIdx-2] = pivotColIdx-1
self.__S[pivotRowIdx] = self.__S[pivotRowIdx] / self.__S[pivotRowIdx, pivotColIdx]
for i in range(self.__S.shape[0]):
if i == pivotRowIdx:
continue
self.__S[i] -= self.__S[i, pivotColIdx]*self.__S[pivotRowIdx]
break
'''Standard Simplex or phase 2 Simplex for two-phase Simplex execution. Gathers results on succesful execution'''
def __phase2Step(self):
if self.__coreSimplex():
#Gather results
resultVector = self.__S[:,0]
self.opt = -resultVector[0,0]
self.optVars = {}
for idx, var in enumerate(self.__basis):
self.optVars[self.__invVars[var+1]] = resultVector[idx+1,0]
for var in self.__vars:
if var not in self.optVars:
self.optVars[var] = 0.0
self.__state = SolverState.DONE
self.status = "done"
self.isDone = True
'''Dummy method in case iteration called on solver that is done'''
def __doneStep(self):
pass
def __init__(self):
self.__S = np.matrix([0.0]) #Simplex table
self.__vars = {} #Dictionary of variable names to table column index
self.__invVars = [] #Reverse of vars
self.__slackVars = 0 #Counter of slack/surplus variables
self.__varsToDelete = [] #Variables with infeasible constraints to delete
self.__basis = np.array([]) #Basis of current solution as array of variable indices
self.__missingBasis = 0 #Amount of rows not assigned to basis
self.__state = SolverState.PRESOLVE #State to enable external loop
self.__stateMethods = { \
SolverState.PRESOLVE: self.__presolveStep, \
SolverState.PHASE1_STEP: self.__phase1Step, \
SolverState.DRIVEOUT_STEP: self.__driveoutStep, \
SolverState.PHASE2_STEP: self.__phase2Step, \
SolverState.DONE: self.__doneStep \
}
self.opt = np.inf #Optimum value after solving LP
self.optVars = {} #Variable substitutions making up optimum value
self.status = "objective not set" #Human readable status
self.isDone = False #Solver has ended its execution
'''One step of solving process, could be used to analyze solving process from outside this class'''
def solveStep(self):
self.__stateMethods[self.__state]()
'''Solve call'''
def solve(self):
while self.__state is not SolverState.DONE:
self.solveStep()
| [
"numpy.insert",
"numpy.transpose",
"enum.auto",
"numpy.where",
"numpy.delete",
"numpy.count_nonzero",
"numpy.array",
"numpy.zeros",
"numpy.full",
"numpy.all",
"numpy.matrix",
"numpy.vectorize"
] | [((88, 94), 'enum.auto', 'auto', ([], {}), '()\n', (92, 94), False, 'from enum import Enum, auto\n'), ((113, 119), 'enum.auto', 'auto', ([], {}), '()\n', (117, 119), False, 'from enum import Enum, auto\n'), ((140, 146), 'enum.auto', 'auto', ([], {}), '()\n', (144, 146), False, 'from enum import Enum, auto\n'), ((165, 171), 'enum.auto', 'auto', ([], {}), '()\n', (169, 171), False, 'from enum import Enum, auto\n'), ((183, 189), 'enum.auto', 'auto', ([], {}), '()\n', (187, 189), False, 'from enum import Enum, auto\n'), ((7407, 7464), 'numpy.vectorize', 'np.vectorize', (['(lambda a, b: a / b if b > 1e-05 else np.inf)'], {}), '(lambda a, b: a / b if b > 1e-05 else np.inf)\n', (7419, 7464), True, 'import numpy as np\n'), ((7550, 7565), 'numpy.all', 'np.all', (['negRow0'], {}), '(negRow0)\n', (7556, 7565), True, 'import numpy as np\n'), ((7882, 7916), 'numpy.all', 'np.all', (['(pivotCol[objRow + 1:] <= 0)'], {}), '(pivotCol[objRow + 1:] <= 0)\n', (7888, 7916), True, 'import numpy as np\n'), ((8511, 8527), 'numpy.matrix', 'np.matrix', (['[0.0]'], {}), '([0.0])\n', (8520, 8527), True, 'import numpy as np\n'), ((8656, 8668), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (8664, 8668), True, 'import numpy as np\n'), ((10511, 10554), 'numpy.delete', 'np.delete', (['self.__S', 'self.__varsToDelete', '(1)'], {}), '(self.__S, self.__varsToDelete, 1)\n', (10520, 10554), True, 'import numpy as np\n'), ((10738, 10762), 'numpy.array', 'np.array', (['self.__invVars'], {}), '(self.__invVars)\n', (10746, 10762), True, 'import numpy as np\n'), ((10786, 10835), 'numpy.delete', 'np.delete', (['self.__invVars', 'self.__varsToDelete', '(0)'], {}), '(self.__invVars, self.__varsToDelete, 0)\n', (10795, 10835), True, 'import numpy as np\n'), ((11014, 11031), 'numpy.zeros', 'np.zeros', (['colSize'], {}), '(colSize)\n', (11022, 11031), True, 'import numpy as np\n'), ((11053, 11073), 'numpy.full', 'np.full', (['colSize', '(-1)'], {}), '(colSize, -1)\n', (11060, 11073), True, 'import numpy as np\n'), ((11324, 11358), 'numpy.count_nonzero', 'np.count_nonzero', (['(self.__basis < 0)'], {}), '(self.__basis < 0)\n', (11340, 11358), True, 'import numpy as np\n'), ((12790, 12833), 'numpy.all', 'np.all', (['(self.__basis >= self.__missingBasis)'], {}), '(self.__basis >= self.__missingBasis)\n', (12796, 12833), True, 'import numpy as np\n'), ((14759, 14775), 'numpy.matrix', 'np.matrix', (['[0.0]'], {}), '([0.0])\n', (14768, 14775), True, 'import numpy as np\n'), ((15142, 15154), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (15150, 15154), True, 'import numpy as np\n'), ((6895, 6908), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (6903, 6908), True, 'import numpy as np\n'), ((6940, 6991), 'numpy.insert', 'np.insert', (['self.__S', 'self.__S.shape[0]', 'row'], {'axis': '(0)'}), '(self.__S, self.__S.shape[0], row, axis=0)\n', (6949, 6991), True, 'import numpy as np\n'), ((11467, 11500), 'numpy.insert', 'np.insert', (['self.__S', '(1)', '(0)'], {'axis': '(0)'}), '(self.__S, 1, 0, axis=0)\n', (11476, 11500), True, 'import numpy as np\n'), ((11516, 11543), 'numpy.zeros', 'np.zeros', (['self.__S.shape[0]'], {}), '(self.__S.shape[0])\n', (11524, 11543), True, 'import numpy as np\n'), ((12854, 12879), 'numpy.delete', 'np.delete', (['self.__S', '(1)', '(0)'], {}), '(self.__S, 1, 0)\n', (12863, 12879), True, 'import numpy as np\n'), ((5872, 5921), 'numpy.insert', 'np.insert', (['self.__S', 'self.__S.shape[1]', '(0)'], {'axis': '(1)'}), '(self.__S, self.__S.shape[1], 0, axis=1)\n', (5881, 5921), True, 'import numpy as np\n'), ((5943, 5992), 'numpy.insert', 'np.insert', (['self.__S', 'self.__S.shape[1]', '(0)'], {'axis': '(1)'}), '(self.__S, self.__S.shape[1], 0, axis=1)\n', (5952, 5992), True, 'import numpy as np\n'), ((6368, 6417), 'numpy.insert', 'np.insert', (['self.__S', 'self.__S.shape[1]', '(0)'], {'axis': '(1)'}), '(self.__S, self.__S.shape[1], 0, axis=1)\n', (6377, 6417), True, 'import numpy as np\n'), ((10331, 10360), 'numpy.where', 'np.where', (['(self.__S[1:, 0] < 0)'], {}), '(self.__S[1:, 0] < 0)\n', (10339, 10360), True, 'import numpy as np\n'), ((11770, 11810), 'numpy.insert', 'np.insert', (['self.__S', '(j + 1)', 'unit'], {'axis': '(1)'}), '(self.__S, j + 1, unit, axis=1)\n', (11779, 11810), True, 'import numpy as np\n'), ((7703, 7729), 'numpy.where', 'np.where', (['(negRow0 == False)'], {}), '(negRow0 == False)\n', (7711, 7729), True, 'import numpy as np\n'), ((13250, 13294), 'numpy.where', 'np.where', (['(self.__basis < self.__missingBasis)'], {}), '(self.__basis < self.__missingBasis)\n', (13258, 13294), True, 'import numpy as np\n'), ((13372, 13434), 'numpy.where', 'np.where', (['(self.__S[pivotRowIdx, 1 + self.__missingBasis:] != 0)'], {}), '(self.__S[pivotRowIdx, 1 + self.__missingBasis:] != 0)\n', (13380, 13434), True, 'import numpy as np\n'), ((1024, 1073), 'numpy.insert', 'np.insert', (['self.__S', 'self.__S.shape[0]', '(0)'], {'axis': '(0)'}), '(self.__S, self.__S.shape[0], 0, axis=0)\n', (1033, 1073), True, 'import numpy as np\n'), ((11189, 11216), 'numpy.transpose', 'np.transpose', (['SnoZero[:, j]'], {}), '(SnoZero[:, j])\n', (11201, 11216), True, 'import numpy as np\n'), ((1655, 1704), 'numpy.insert', 'np.insert', (['self.__S', 'self.__S.shape[1]', '(0)'], {'axis': '(1)'}), '(self.__S, self.__S.shape[1], 0, axis=1)\n', (1664, 1704), True, 'import numpy as np\n'), ((2173, 2213), 'numpy.insert', 'np.insert', (['self.__S', 'varCount', '(0)'], {'axis': '(1)'}), '(self.__S, varCount, 0, axis=1)\n', (2182, 2213), True, 'import numpy as np\n')] |
import numpy as np
msg1 = "\nsingle dimentional array"
print(msg1)
n1 = np.array([10, 20, 30, 40, 50])
print(type(n1))
print(n1)
msg2 = "\nmulti dimentional array"
print(msg2)
n2 = np.array([[10, 20, 30, 40, 50],
[1, 2, 3, 4, 5]])
print(type(n2))
print(n2)
msg3 = "\ninitializing numpy array as zeroes"
print(msg3)
n3 = np.zeros((1, 5))
print(type(n3))
print(n3)
print("Once more...")
n4 = np.zeros((5, 5))
print(type(n4))
print(n4)
msg4 = "\ninitialize numpy with same number"
n5 = np.full((5, 2), '$')
print(type(n5))
print(n5)
msg5 = "\ninitialize numpy array within a range"
n6 = np.arange(10, 20)
print(msg5)
print(n6)
msg6 = "\ninitialize numpy array within range but with step"
n7 = np.arange(10, 50, 5)
print(msg6)
print(n7)
msg7 = "\ninitialize numpy array with random numbers"
n8 = np.random.randint(low=1, high=100, size=6)
print(msg7)
print(n8)
msg8 = "\nchecking the shape of numpy array"
print(msg8)
n9 = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
n10 = np.array([[1, 2, 3, 4], [5, 6, 7, 9]])
print(n9.shape)
print(n10.shape)
msg9 = "\nJoining numpy arrays\n1.vstack\t2.hstack\t3.column_stack"
print(msg9)
n11 = np.array([10, 20, 30, 40])
n12 = np.array([90, 80, 70, 60])
print("example of joining using vstack")
nv = np.vstack((n11, n12))
print(nv)
print("example of joining using hstack")
nh = np.hstack((n11, n12))
print(nh)
print("example of joining using column_stack")
nc = np.column_stack((n11, n12))
print(nc)
msg10 = "\nNumpy Intersaction & Difference"
n13 = np.array([1, 5, 9, 4, 7, 3, ])
n14 = np.array([15, 1, 5, 9, 8, 2, ])
print("intersaction")
n15 = np.intersect1d(n13, n14)
print(n15)
print("first_array - second_array ; here - means exclude the elements")
n16 = np.setdiff1d(n13, n14)
print(n16)
msg11 = "\nAddition of numpy arrays - n17,n18"
print(msg11)
n17 = np.array([10, 20, 30, 40, 50])
n18 = np.array([5, 7, 9, 11, 13])
sum1 = np.sum([n17, n18])
print(sum1)
sum2 = np.sum([n17, n18], axis=0) # elements number must be same
print(f"When axis = 0\n {sum2}")
sum3 = np.sum([n17, n18], axis=1) # elements number must be same
print("When axis = 1\n", sum3)
msg12 = "\nNumpy Mathematics"
arr = np.array([10, 20, 30, 40, 50])
print("Basic Addition")
arr = arr + 5
print(arr)
print("Basic Subtraction")
arr = arr - 5
print(arr)
print("Basic Multiplication")
arr = arr * 2
print(arr)
print("Basic Division")
arr = arr / 5
print(arr)
msg13 = "\nNumpy Math Functions"
print(msg13)
arr1 = np.mean(arr)
print(f"Median of arr: {arr1}")
arr2 = np.median(arr)
print(f"Median of arr: {arr2}")
arr3 = np.std(arr)
print(f"Standard Deviation of arr : {arr3}")
msg14 = "\nSave & Load Numpy Array"
print(msg14)
n19 = np.random.randint(low=1,high=100,size=10)
print("Now i want to save this array for performing operation later")
np.save('i_will_use_later',n19)
print("Okay,saved it")
print("Now i want to Load that saved array")
use_it = np.load('i_will_use_later.npy')
print(use_it)
| [
"numpy.intersect1d",
"numpy.mean",
"numpy.median",
"numpy.hstack",
"numpy.column_stack",
"numpy.array",
"numpy.random.randint",
"numpy.zeros",
"numpy.setdiff1d",
"numpy.vstack",
"numpy.sum",
"numpy.std",
"numpy.full",
"numpy.save",
"numpy.arange",
"numpy.load"
] | [((73, 103), 'numpy.array', 'np.array', (['[10, 20, 30, 40, 50]'], {}), '([10, 20, 30, 40, 50])\n', (81, 103), True, 'import numpy as np\n'), ((183, 232), 'numpy.array', 'np.array', (['[[10, 20, 30, 40, 50], [1, 2, 3, 4, 5]]'], {}), '([[10, 20, 30, 40, 50], [1, 2, 3, 4, 5]])\n', (191, 232), True, 'import numpy as np\n'), ((338, 354), 'numpy.zeros', 'np.zeros', (['(1, 5)'], {}), '((1, 5))\n', (346, 354), True, 'import numpy as np\n'), ((408, 424), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {}), '((5, 5))\n', (416, 424), True, 'import numpy as np\n'), ((502, 522), 'numpy.full', 'np.full', (['(5, 2)', '"""$"""'], {}), "((5, 2), '$')\n", (509, 522), True, 'import numpy as np\n'), ((604, 621), 'numpy.arange', 'np.arange', (['(10)', '(20)'], {}), '(10, 20)\n', (613, 621), True, 'import numpy as np\n'), ((710, 730), 'numpy.arange', 'np.arange', (['(10)', '(50)', '(5)'], {}), '(10, 50, 5)\n', (719, 730), True, 'import numpy as np\n'), ((813, 855), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(100)', 'size': '(6)'}), '(low=1, high=100, size=6)\n', (830, 855), True, 'import numpy as np\n'), ((941, 986), 'numpy.array', 'np.array', (['[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]'], {}), '([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])\n', (949, 986), True, 'import numpy as np\n'), ((993, 1031), 'numpy.array', 'np.array', (['[[1, 2, 3, 4], [5, 6, 7, 9]]'], {}), '([[1, 2, 3, 4], [5, 6, 7, 9]])\n', (1001, 1031), True, 'import numpy as np\n'), ((1152, 1178), 'numpy.array', 'np.array', (['[10, 20, 30, 40]'], {}), '([10, 20, 30, 40])\n', (1160, 1178), True, 'import numpy as np\n'), ((1185, 1211), 'numpy.array', 'np.array', (['[90, 80, 70, 60]'], {}), '([90, 80, 70, 60])\n', (1193, 1211), True, 'import numpy as np\n'), ((1258, 1279), 'numpy.vstack', 'np.vstack', (['(n11, n12)'], {}), '((n11, n12))\n', (1267, 1279), True, 'import numpy as np\n'), ((1336, 1357), 'numpy.hstack', 'np.hstack', (['(n11, n12)'], {}), '((n11, n12))\n', (1345, 1357), True, 'import numpy as np\n'), ((1420, 1447), 'numpy.column_stack', 'np.column_stack', (['(n11, n12)'], {}), '((n11, n12))\n', (1435, 1447), True, 'import numpy as np\n'), ((1509, 1537), 'numpy.array', 'np.array', (['[1, 5, 9, 4, 7, 3]'], {}), '([1, 5, 9, 4, 7, 3])\n', (1517, 1537), True, 'import numpy as np\n'), ((1546, 1575), 'numpy.array', 'np.array', (['[15, 1, 5, 9, 8, 2]'], {}), '([15, 1, 5, 9, 8, 2])\n', (1554, 1575), True, 'import numpy as np\n'), ((1606, 1630), 'numpy.intersect1d', 'np.intersect1d', (['n13', 'n14'], {}), '(n13, n14)\n', (1620, 1630), True, 'import numpy as np\n'), ((1720, 1742), 'numpy.setdiff1d', 'np.setdiff1d', (['n13', 'n14'], {}), '(n13, n14)\n', (1732, 1742), True, 'import numpy as np\n'), ((1821, 1851), 'numpy.array', 'np.array', (['[10, 20, 30, 40, 50]'], {}), '([10, 20, 30, 40, 50])\n', (1829, 1851), True, 'import numpy as np\n'), ((1858, 1885), 'numpy.array', 'np.array', (['[5, 7, 9, 11, 13]'], {}), '([5, 7, 9, 11, 13])\n', (1866, 1885), True, 'import numpy as np\n'), ((1893, 1911), 'numpy.sum', 'np.sum', (['[n17, n18]'], {}), '([n17, n18])\n', (1899, 1911), True, 'import numpy as np\n'), ((1931, 1957), 'numpy.sum', 'np.sum', (['[n17, n18]'], {'axis': '(0)'}), '([n17, n18], axis=0)\n', (1937, 1957), True, 'import numpy as np\n'), ((2030, 2056), 'numpy.sum', 'np.sum', (['[n17, n18]'], {'axis': '(1)'}), '([n17, n18], axis=1)\n', (2036, 2056), True, 'import numpy as np\n'), ((2157, 2187), 'numpy.array', 'np.array', (['[10, 20, 30, 40, 50]'], {}), '([10, 20, 30, 40, 50])\n', (2165, 2187), True, 'import numpy as np\n'), ((2447, 2459), 'numpy.mean', 'np.mean', (['arr'], {}), '(arr)\n', (2454, 2459), True, 'import numpy as np\n'), ((2499, 2513), 'numpy.median', 'np.median', (['arr'], {}), '(arr)\n', (2508, 2513), True, 'import numpy as np\n'), ((2553, 2564), 'numpy.std', 'np.std', (['arr'], {}), '(arr)\n', (2559, 2564), True, 'import numpy as np\n'), ((2666, 2709), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(100)', 'size': '(10)'}), '(low=1, high=100, size=10)\n', (2683, 2709), True, 'import numpy as np\n'), ((2778, 2810), 'numpy.save', 'np.save', (['"""i_will_use_later"""', 'n19'], {}), "('i_will_use_later', n19)\n", (2785, 2810), True, 'import numpy as np\n'), ((2887, 2918), 'numpy.load', 'np.load', (['"""i_will_use_later.npy"""'], {}), "('i_will_use_later.npy')\n", (2894, 2918), True, 'import numpy as np\n')] |
import os
import tensorflow as tf
import numpy as np
from tensorflow import keras
from PIL import Image
from matplotlib import pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from autoencoder.autoencoder import AE_Upsampling_Sample_TypeA as AE_Upsampling_Sample_TypeA
from autoencoder.autoencoder import AE_Upsampling_Sample_TypeB as AE_Upsampling_Sample_TypeB
from autoencoder.autoencoder import AE_Upsampling_rezise as AE_Upsampling_rezise
import cv2
import time
def main(filenames, autoencoderModel, savePredictionName, saveModelName, num_epochs=50, batch_size=5, filesSize=[]):
# Check
tf.random.set_seed(22)
np.random.seed(22)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
assert tf.__version__.startswith('2.')
# Load Numpy Data -> current solution
if isinstance(filenames[0], str):
Z_train = np.load(filenames[0])
Z_val = np.load(filenames[1])
Z_test = np.load(filenames[2])
print('All data are loaded')
else: # To do, check is filesSize is not empty
Z_train = filenames[:filesSize[0], :, :]
Z_val = filenames[filesSize[0]:(filesSize[0]+filesSize[1]), :, :]
Z_test = filenames[(filesSize[0]+filesSize[1]):, :, :]
Z_train_crop, Z_val_crop, Z_test_crop = Z_train.astype(
np.float32), Z_val.astype(np.float32), Z_test.astype(np.float32)
Z_train_crop = np.reshape(Z_train_crop, (len(
Z_train_crop), Z_train_crop.shape[1], Z_train_crop.shape[2], 1))
Z_val_crop = np.reshape(
Z_val_crop, (len(Z_val_crop), Z_val_crop.shape[1], Z_val_crop.shape[2], 1))
Z_test_crop = np.reshape(
Z_test_crop, (len(Z_test_crop), Z_test_crop.shape[1], Z_test_crop.shape[2], 1))
if autoencoderModel == 'AE_Upsampling_rezise':
autoencoder = AE_Upsampling_rezise()
print(autoencoder._model.summary())
elif autoencoderModel == 'AE_Upsampling_Sample_TypeB':
autoencoder = AE_Upsampling_Sample_TypeB()
print(autoencoder._model.summary())
elif autoencoderModel == 'AE_Upsampling_Sample_TypeA':
autoencoder = AE_Upsampling_Sample_TypeA()
print(autoencoder._model.summary())
# Convolutional implementation
autoencoder.train(Z_train_crop, Z_val_crop, batch_size, num_epochs)
decoded_imgs = autoencoder.getDecodedImage(Z_test_crop)
# print(decoded_imgs)
np.save(savePredictionName + '.npy', decoded_imgs)
autoencoder._model.save(saveModelName + '.h5')
| [
"tensorflow.random.set_seed",
"autoencoder.autoencoder.AE_Upsampling_rezise",
"numpy.random.seed",
"autoencoder.autoencoder.AE_Upsampling_Sample_TypeB",
"autoencoder.autoencoder.AE_Upsampling_Sample_TypeA",
"numpy.load",
"numpy.save",
"tensorflow.__version__.startswith"
] | [((632, 654), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(22)'], {}), '(22)\n', (650, 654), True, 'import tensorflow as tf\n'), ((659, 677), 'numpy.random.seed', 'np.random.seed', (['(22)'], {}), '(22)\n', (673, 677), True, 'import numpy as np\n'), ((734, 765), 'tensorflow.__version__.startswith', 'tf.__version__.startswith', (['"""2."""'], {}), "('2.')\n", (759, 765), True, 'import tensorflow as tf\n'), ((2376, 2426), 'numpy.save', 'np.save', (["(savePredictionName + '.npy')", 'decoded_imgs'], {}), "(savePredictionName + '.npy', decoded_imgs)\n", (2383, 2426), True, 'import numpy as np\n'), ((865, 886), 'numpy.load', 'np.load', (['filenames[0]'], {}), '(filenames[0])\n', (872, 886), True, 'import numpy as np\n'), ((903, 924), 'numpy.load', 'np.load', (['filenames[1]'], {}), '(filenames[1])\n', (910, 924), True, 'import numpy as np\n'), ((942, 963), 'numpy.load', 'np.load', (['filenames[2]'], {}), '(filenames[2])\n', (949, 963), True, 'import numpy as np\n'), ((1803, 1825), 'autoencoder.autoencoder.AE_Upsampling_rezise', 'AE_Upsampling_rezise', ([], {}), '()\n', (1823, 1825), True, 'from autoencoder.autoencoder import AE_Upsampling_rezise as AE_Upsampling_rezise\n'), ((1951, 1979), 'autoencoder.autoencoder.AE_Upsampling_Sample_TypeB', 'AE_Upsampling_Sample_TypeB', ([], {}), '()\n', (1977, 1979), True, 'from autoencoder.autoencoder import AE_Upsampling_Sample_TypeB as AE_Upsampling_Sample_TypeB\n'), ((2105, 2133), 'autoencoder.autoencoder.AE_Upsampling_Sample_TypeA', 'AE_Upsampling_Sample_TypeA', ([], {}), '()\n', (2131, 2133), True, 'from autoencoder.autoencoder import AE_Upsampling_Sample_TypeA as AE_Upsampling_Sample_TypeA\n')] |
import argparse
from timeit import default_timer
from typing import Tuple
import numpy as np
from dgl import DGLGraph
from dgl.data import RedditDataset
from ogb.nodeproppred import DglNodePropPredDataset
from sklearn.metrics import accuracy_score, log_loss
from torch import Tensor
from xgboost import XGBClassifier
from pcapass import PCAPass
def process_dataset(
dataset: str,
dataset_root: str,
reverse_edges: bool,
self_loop: bool,
) -> Tuple[DGLGraph, Tensor]:
if dataset == 'Reddit':
dataset = RedditDataset(raw_dir=dataset_root)
g = dataset[0]
labels = g.ndata['label']
train_idx = g.ndata['train_mask']
valid_idx = g.ndata['val_mask']
test_idx = g.ndata['test_mask']
else:
dataset = DglNodePropPredDataset(
name=args.dataset, root=args.dataset_root)
split_idx = dataset.get_idx_split()
train_idx = split_idx['train']
valid_idx = split_idx['valid']
test_idx = split_idx['test']
g, labels = dataset[0]
if reverse_edges:
src, dst = g.all_edges()
g.add_edges(dst, src)
if self_loop:
g = g.remove_self_loop().add_self_loop()
else:
g = g.remove_self_loop()
return g, labels, train_idx, valid_idx, test_idx
def preprocess_features(
pcapass: PCAPass,
g: DGLGraph,
node_feats: Tensor,
) -> Tuple[Tensor, float]:
start = default_timer()
x = pcapass(g, node_feats)
stop = default_timer()
pcapass_time = stop - start
return x, pcapass_time
def train(
model: XGBClassifier,
train_feats: Tensor,
train_labels: Tensor,
valid_feats: Tensor,
valid_labels: Tensor,
early_stopping: int = 10,
) -> float:
start = default_timer()
model.fit(
train_feats,
train_labels,
eval_set=[(valid_feats, valid_labels)],
early_stopping_rounds=early_stopping,
)
stop = default_timer()
training_time = stop - start
return training_time
def evaluate(
model: XGBClassifier,
eval_feats: Tensor,
eval_labels: Tensor,
train_split_labels: Tensor,
) -> Tuple[float]:
start = default_timer()
logits = model.predict_proba(
eval_feats, iteration_range=(0, model.best_iteration + 1))
predictions = np.argmax(logits, axis=1)
loss = log_loss(eval_labels, logits, labels=train_split_labels)
accuracy = accuracy_score(eval_labels, predictions)
stop = default_timer()
eval_time = stop - start
return loss, accuracy, eval_time
def run(args: argparse.ArgumentParser) -> Tuple[float]:
g, labels, train_idx, valid_idx, test_idx = process_dataset(
args.dataset,
args.dataset_root,
args.reverse_edges,
args.self_loop,
)
pcapass = PCAPass(
args.khop,
args.hidden_feats,
seed=args.seed,
)
print(f'## Started PCAPass preprocessing ##')
node_feats, pcapass_time = preprocess_features(pcapass, g, g.ndata['feat'])
print(f'## Finished PCAPass preprocessing. Time: {pcapass_time:.2f} ##')
train_feats = node_feats[train_idx]
train_labels = labels[train_idx]
valid_feats = node_feats[valid_idx]
valid_labels = labels[valid_idx]
test_feats = node_feats[test_idx]
test_labels = labels[test_idx]
train_split_labels = np.unique(train_labels)
model = XGBClassifier(
n_estimators=args.n_estimators,
max_depth=args.max_depth,
learning_rate=args.lr,
verbosity=1,
objective='multi:softprob',
eval_metric='mlogloss',
booster='gbtree',
tree_method='hist',
gamma=args.gamma,
min_child_weight=args.min_child_weight,
max_delta_step=args.max_delta_step,
subsample=args.subsample,
colsample_bytree=args.colsample_bytree,
colsample_bylevel=args.colsample_bylevel,
colsample_bynode=args.colsample_bynode,
base_score=0.5,
random_state=args.seed,
use_label_encoder=False,
)
print(f'## Started XGBoost training ##')
training_time = train(model, train_feats, train_labels,
valid_feats, valid_labels)
print(f'## Finished XGBoost training. Time: {training_time:.2f} ##')
print(f'## Started valid inference ##')
valid_loss, valid_accuracy, valid_time = evaluate(
model, valid_feats, valid_labels, train_split_labels)
print(f'## Finished valid inference. Loss: {valid_loss:.2f} '
f'Accuracy: {valid_accuracy:.4f} Time: {valid_time:.2f} ##')
print(f'## Started test inference ##')
test_loss, test_accuracy, test_time = evaluate(
model, test_feats, test_labels, train_split_labels)
print(f'## Finished test inference. Loss: {test_loss:.2f} '
f'Accuracy: {test_accuracy:.4f} Time: {test_time:.2f} ##')
return valid_accuracy, test_accuracy
def run_submission(args: argparse.ArgumentParser) -> None:
valid_accuracies = []
test_accuracies = []
for seed in range(10):
print(f'## Started seed: {seed} ##')
args.seed = seed
valid_accuracy, test_accuracy = run(args)
valid_accuracies.append(valid_accuracy)
test_accuracies.append(test_accuracy)
print(f'## Finished seed: {args.seed} ##')
valid_accuracy_mean = np.mean(valid_accuracies)
valid_accuracy_std = np.std(valid_accuracies)
test_accuracy_mean = np.mean(test_accuracies)
test_accuracy_std = np.std(test_accuracies)
print('## Submission results ##')
print(f'Valid Accuracy: {valid_accuracy_mean} ± {valid_accuracy_std} '
f'Test Accuracy: {test_accuracy_mean} ± {test_accuracy_std}')
if __name__ == '__main__':
argparser = argparse.ArgumentParser('PCAPass + XGBoost')
argparser.add_argument('--dataset', default='ogbn-products', type=str,
choices=['ogbn-arxiv', 'ogbn-products', 'reddit'])
argparser.add_argument('--dataset-root', default='dataset', type=str)
argparser.add_argument('--reverse-edges', default=False,
action=argparse.BooleanOptionalAction)
argparser.add_argument('--self-loop', default=True,
action=argparse.BooleanOptionalAction)
argparser.add_argument('--khop', default=11, type=int)
argparser.add_argument('--hidden-feats', default=100, type=int)
argparser.add_argument('--n-estimators', default=7000, type=int)
argparser.add_argument('--max-depth', default=6, type=int)
argparser.add_argument('--lr', default=0.1, type=float)
argparser.add_argument('--gamma', default=0, type=float)
argparser.add_argument('--min-child-weight', default=1, type=float)
argparser.add_argument('--max-delta-step', default=0, type=float)
argparser.add_argument('--subsample', default=1, type=float)
argparser.add_argument('--colsample-bytree', default=1, type=float)
argparser.add_argument('--colsample-bylevel', default=1, type=float)
argparser.add_argument('--colsample-bynode', default=1, type=float)
argparser.add_argument('--seed', default=13, type=int)
argparser.add_argument('--submission', default=False,
action=argparse.BooleanOptionalAction)
args = argparser.parse_args()
if args.submission:
run_submission(args)
else:
run(args)
| [
"numpy.mean",
"pcapass.PCAPass",
"numpy.unique",
"argparse.ArgumentParser",
"timeit.default_timer",
"numpy.argmax",
"ogb.nodeproppred.DglNodePropPredDataset",
"sklearn.metrics.log_loss",
"numpy.std",
"dgl.data.RedditDataset",
"sklearn.metrics.accuracy_score",
"xgboost.XGBClassifier"
] | [((1431, 1446), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (1444, 1446), False, 'from timeit import default_timer\n'), ((1491, 1506), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (1504, 1506), False, 'from timeit import default_timer\n'), ((1762, 1777), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (1775, 1777), False, 'from timeit import default_timer\n'), ((1949, 1964), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (1962, 1964), False, 'from timeit import default_timer\n'), ((2178, 2193), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (2191, 2193), False, 'from timeit import default_timer\n'), ((2314, 2339), 'numpy.argmax', 'np.argmax', (['logits'], {'axis': '(1)'}), '(logits, axis=1)\n', (2323, 2339), True, 'import numpy as np\n'), ((2352, 2408), 'sklearn.metrics.log_loss', 'log_loss', (['eval_labels', 'logits'], {'labels': 'train_split_labels'}), '(eval_labels, logits, labels=train_split_labels)\n', (2360, 2408), False, 'from sklearn.metrics import accuracy_score, log_loss\n'), ((2424, 2464), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['eval_labels', 'predictions'], {}), '(eval_labels, predictions)\n', (2438, 2464), False, 'from sklearn.metrics import accuracy_score, log_loss\n'), ((2477, 2492), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (2490, 2492), False, 'from timeit import default_timer\n'), ((2805, 2858), 'pcapass.PCAPass', 'PCAPass', (['args.khop', 'args.hidden_feats'], {'seed': 'args.seed'}), '(args.khop, args.hidden_feats, seed=args.seed)\n', (2812, 2858), False, 'from pcapass import PCAPass\n'), ((3354, 3377), 'numpy.unique', 'np.unique', (['train_labels'], {}), '(train_labels)\n', (3363, 3377), True, 'import numpy as np\n'), ((3391, 3927), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {'n_estimators': 'args.n_estimators', 'max_depth': 'args.max_depth', 'learning_rate': 'args.lr', 'verbosity': '(1)', 'objective': '"""multi:softprob"""', 'eval_metric': '"""mlogloss"""', 'booster': '"""gbtree"""', 'tree_method': '"""hist"""', 'gamma': 'args.gamma', 'min_child_weight': 'args.min_child_weight', 'max_delta_step': 'args.max_delta_step', 'subsample': 'args.subsample', 'colsample_bytree': 'args.colsample_bytree', 'colsample_bylevel': 'args.colsample_bylevel', 'colsample_bynode': 'args.colsample_bynode', 'base_score': '(0.5)', 'random_state': 'args.seed', 'use_label_encoder': '(False)'}), "(n_estimators=args.n_estimators, max_depth=args.max_depth,\n learning_rate=args.lr, verbosity=1, objective='multi:softprob',\n eval_metric='mlogloss', booster='gbtree', tree_method='hist', gamma=\n args.gamma, min_child_weight=args.min_child_weight, max_delta_step=args\n .max_delta_step, subsample=args.subsample, colsample_bytree=args.\n colsample_bytree, colsample_bylevel=args.colsample_bylevel,\n colsample_bynode=args.colsample_bynode, base_score=0.5, random_state=\n args.seed, use_label_encoder=False)\n", (3404, 3927), False, 'from xgboost import XGBClassifier\n'), ((5345, 5370), 'numpy.mean', 'np.mean', (['valid_accuracies'], {}), '(valid_accuracies)\n', (5352, 5370), True, 'import numpy as np\n'), ((5396, 5420), 'numpy.std', 'np.std', (['valid_accuracies'], {}), '(valid_accuracies)\n', (5402, 5420), True, 'import numpy as np\n'), ((5447, 5471), 'numpy.mean', 'np.mean', (['test_accuracies'], {}), '(test_accuracies)\n', (5454, 5471), True, 'import numpy as np\n'), ((5496, 5519), 'numpy.std', 'np.std', (['test_accuracies'], {}), '(test_accuracies)\n', (5502, 5519), True, 'import numpy as np\n'), ((5751, 5795), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""PCAPass + XGBoost"""'], {}), "('PCAPass + XGBoost')\n", (5774, 5795), False, 'import argparse\n'), ((533, 568), 'dgl.data.RedditDataset', 'RedditDataset', ([], {'raw_dir': 'dataset_root'}), '(raw_dir=dataset_root)\n', (546, 568), False, 'from dgl.data import RedditDataset\n'), ((778, 843), 'ogb.nodeproppred.DglNodePropPredDataset', 'DglNodePropPredDataset', ([], {'name': 'args.dataset', 'root': 'args.dataset_root'}), '(name=args.dataset, root=args.dataset_root)\n', (800, 843), False, 'from ogb.nodeproppred import DglNodePropPredDataset\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from lottery.branch import base
import models.registry
from pruning.mask import Mask
from pruning.pruned_model import PrunedModel
from training import train
from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank
from platforms.platform import get_platform
from foundations import paths
import json
import os
import datasets.registry
import copy
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import tqdm
import seaborn as sns
import pandas as pd
class Branch(base.Branch):
def branch_function(self, seed: int, property: str = 'features_erank',
trials: int = 10000, conv_layers: bool = False, rand_data: str = 'natural',
no_activation: bool= False,
cross_domain_path: str = 'none',
cross_domain_data: str = 'none',
path_all_trials: str = 'none'):
# Randomize the mask.
mask = Mask.load(self.level_root)
start_step = self.lottery_desc.str_to_step('0ep')
base_model = models.registry.load(self.level_root.replace(f'level_{self.level}', 'level_0'), start_step,
self.lottery_desc.model_hparams)
orig_model = PrunedModel(copy.deepcopy(base_model), Mask.ones_like(base_model))
lth_model = PrunedModel(copy.deepcopy(base_model), mask)
prunable_tensors = set(orig_model.prunable_layer_names)
orig_tensors = {k: v for k, v in orig_model.state_dict().items() if k[6:] in prunable_tensors and
k not in orig_model.prunable_conv_names}
lth_tensors = {k: v for k, v in lth_model.state_dict().items() if k[6:] in prunable_tensors}
rand_properties = []
active_properties = []
if path_all_trials == 'none':
if not get_platform().exists(paths.properties(self.branch_root, property)):
train_loader = datasets.registry.get(self.lottery_desc.dataset_hparams, train=True)
input = list(train_loader)[0][0]
if rand_data == 'uniform':
input = 2 * torch.rand_like(input) - 1
elif rand_data == 'gaussian':
input = torch.randn_like(input)
# Calculate effective rank of LTH
if property == 'weight_erank':
lth_properties = [erank(v) for k, v in lth_tensors.items()]
elif property == 'weight_frobenius':
lth_properties = [v.norm().item() for k, v in lth_tensors.items()]
elif property == 'features_erank':
lth_properties = feature_erank(lth_model, input, conv_layers, no_activation)
elif property == 'activation':
train_loader = datasets.registry.get( self.lottery_desc.dataset_hparams, train=True)
input = list(train_loader)[0][0]
lth_properties = activation(lth_model, input, conv_layers, no_activation)
elif property == 'features_spectral':
lth_properties = features_spectral(lth_model, input, conv_layers, no_activation)
elif property == 'features_frobenius':
lth_properties = features_frobenius(lth_model, input, conv_layers, no_activation)
elif property == 'features_spectral_fro_ratio':
lth_properties = features_spectral_fro_ratio(lth_model, input, conv_layers, no_activation)
# Error.
else: raise ValueError(f'Invalid property: {property}')
cross_domain_prop = None
if cross_domain_path != 'none':
# load model + mask
path = os.path.join(cross_domain_path, f'level_{self.level}', 'main')
cross_mask = Mask.load(path)
start_step = self.lottery_desc.str_to_step('0ep')
state_step = start_step
if cross_domain_data == 'cifar100':
self.lottery_desc.model_hparams.model_name = self.lottery_desc.model_hparams.model_name.replace('cifar', 'cifar100')
elif cross_domain_data == 'cifar10':
self.lottery_desc.model_hparams.model_name = self.lottery_desc.model_hparams.model_name.replace('cifar100',
'cifar')
cross_model = PrunedModel(models.registry.load(path, state_step, self.lottery_desc.model_hparams), cross_mask)
if property == 'features_erank':
cross_domain_prop = feature_erank(cross_model, input, conv_layers, no_activation)
elif property == 'activation':
cross_domain_prop = activation(cross_model, input, conv_layers, no_activation)
elif property == 'features_spectral':
cross_domain_prop = features_spectral(cross_model, input, conv_layers, no_activation)
elif property == 'features_frobenius':
cross_domain_prop = features_frobenius(cross_model, input, conv_layers, no_activation)
elif property == 'features_spectral_fro_ratio':
cross_domain_prop = features_spectral_fro_ratio(cross_model, input, conv_layers, no_activation)
# generate random masks
for t in tqdm.tqdm(range(trials)):
# random mask
rand_mask = Mask(shuffle_state_dict(mask, seed=seed + t))
rand_model = PrunedModel(copy.deepcopy(base_model), rand_mask)
# curr_base_model = copy.deepcopy(base_model)
# active_mask = Mask.ones_like(curr_base_model)
# curr_model = PrunedModel(curr_base_model, active_mask)
# for i, (name, param) in enumerate(orig_tensors.items()):
# name = name[6:]
# features = [input] if len(orig_model.prunable_conv_names) == 0 else []
# features.extend(curr_model.intermediate(input))
# active_mask[name] = generate_mask_active(param, mask[name].float().mean().item(), seed+t, features[i]).int()
# curr_model = PrunedModel(curr_base_model, active_mask)
if property == 'features_erank':
rand_properties.append(feature_erank(rand_model, input, conv_layers, no_activation))
# active_properties.append(feature_erank(curr_model, input, conv_layers))
elif property == 'activation':
rand_properties.append(activation(rand_model, input))
# active_properties.append(activation(curr_model, input))
elif property == 'weight_frobenius':
rand_properties.append([v.norm().item() for k, v in rand_model.state_dict().items() if k[6:] in prunable_tensors])
elif property == 'weight_erank':
rand_properties.append([erank(v) for k, v in rand_model.state_dict().items() if k[6:] in prunable_tensors])
# rand_properties.append(weight_erank({k: v for k, v in rand_model.state_dict().items() if k[6:] in prunable_tensors}))
# active_properties.append(weight_erank({k: v for k, v in curr_model.state_dict().items() if k[6:] in prunable_tensors}))
elif property == 'features_spectral':
rand_properties.append(features_spectral(rand_model, input, conv_layers, no_activation))
elif property == 'features_frobenius':
rand_properties.append(features_frobenius(rand_model, input, conv_layers, no_activation))
elif property == 'features_spectral_fro_ratio':
rand_properties.append(features_spectral_fro_ratio(rand_model, input, conv_layers, no_activation))
# Save model
if not get_platform().is_primary_process: return
if not get_platform().exists(self.branch_root): get_platform().makedirs(self.branch_root)
with open(paths.properties(self.branch_root, property), 'w') as f:
json.dump({'lth': lth_properties, 'random': rand_properties, 'active': rand_properties, 'cross_lth': cross_domain_prop}, f)
else: # files already exits
with open(paths.properties(self.branch_root, property), 'r') as f:
propeties_all = json.load(f)
lth_properties = propeties_all['lth']
rand_properties = propeties_all['random']
# rand_properties = [[p1, p2, p3, p4+0.05] for p1, p2, p3, p4 in rand_properties]
if 'cross_lth' in propeties_all.keys():
cross_domain_prop = propeties_all['cross_lth']
else:
cross_domain_prop = None
else:
# with open(path_all_trials) as f:
# log = json.load(f)
# lth_properties = log['lth']
import numpy as np
log = np.load(path_all_trials)
rand_properties = log.T
if property == 'weight_erank':
lth_properties = [erank(v) for k, v in lth_tensors.items()]
cross_domain_prop = None
new_labels = []
layers = [i for i in range(len(rand_properties[0]))]
if self.desc.lottery_desc.model_hparams.model_name == 'cifar_conv6':
layers = [1, 4, 7, 10]
new_labels = [
'conv1',
'conv4',
'conv6',
'last fc'
]
if self.desc.lottery_desc.model_hparams.model_name == 'cifar_conv2':
new_labels = [
'conv2',
'fc1',
'fc2',
'fc3'
]
if self.desc.lottery_desc.model_hparams.model_name == 'cifar_vgg_19':
layers = [3, 6, 11, 16, 21, 22]
new_labels = [
'1st pool',
'2nd pool',
'3rd pool',
'4th pool',
'5th pool',
'fc',
]
data = pd.concat(
[pd.DataFrame({'layer': [layers[i]], property.replace('_', ' '): layer_prop})
for prop in rand_properties for i, layer_prop in enumerate(prop)],
ignore_index=True
)
sns.set_theme(style='white')
# sns.set_palette("Reds")
f = sns.displot(data=data, x=property.replace('_', ' '), hue='layer', bins=int(100), palette='dark', stat="probability")
for i in range(len(rand_properties[0])):
plt.axvline(lth_properties[i], linestyle='dashed', linewidth=1, color=f'C{i}')
if cross_domain_prop:
plt.axvline(cross_domain_prop[i], linewidth=1, color=f'C{i}')
if len(new_labels) > 0:
for t, x in zip(f.legend.texts, new_labels): t.set_text(x)
f.fig.savefig(os.path.join(self.branch_root, f'property_{property}_sns.pdf'))
# colors_list = ['blue', 'green', 'magenta', 'red', 'brown', 'cyan', 'purple', 'grey', 'orange', 'pink', 'lime']
# for i in range(len(rand_properties[0])):
# rand_i = [p[i] for p in rand_properties]
# # active_i = [p[i] for p in active_properties]
# # plt.hist([rand_i, active_i], bins=int(trials/100), label=[f'random l{i+1}', f'active l{i+1}'])
# plt.hist(rand_i, bins=int(100), label=f'random l{i+1}', color=colors_list[i])
# plt.axvline(lth_properties[i], linestyle='dashed', linewidth=1, color=colors_list[i])
# if cross_domain_prop:
# plt.axvline(cross_domain_prop[i], linewidth=1, color=colors_list[i])
# plt.legend()
# plt.savefig(os.path.join(self.branch_root, f'property_{property}.pdf'))
@staticmethod
def description():
return "Calculate histogram of properties."
@staticmethod
def name():
return 'property_distribution'
| [
"utils.tensor_utils.erank",
"copy.deepcopy",
"utils.tensor_utils.features_frobenius",
"json.dump",
"platforms.platform.get_platform",
"torch.rand_like",
"utils.tensor_utils.features_spectral",
"matplotlib.use",
"pruning.mask.Mask.ones_like",
"pruning.mask.Mask.load",
"utils.tensor_utils.features... | [((689, 710), 'matplotlib.use', 'matplotlib.use', (['"""pdf"""'], {}), "('pdf')\n", (703, 710), False, 'import matplotlib\n'), ((1267, 1293), 'pruning.mask.Mask.load', 'Mask.load', (['self.level_root'], {}), '(self.level_root)\n', (1276, 1293), False, 'from pruning.mask import Mask\n'), ((11066, 11094), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""white"""'}), "(style='white')\n", (11079, 11094), True, 'import seaborn as sns\n'), ((1573, 1598), 'copy.deepcopy', 'copy.deepcopy', (['base_model'], {}), '(base_model)\n', (1586, 1598), False, 'import copy\n'), ((1600, 1626), 'pruning.mask.Mask.ones_like', 'Mask.ones_like', (['base_model'], {}), '(base_model)\n', (1614, 1626), False, 'from pruning.mask import Mask\n'), ((1660, 1685), 'copy.deepcopy', 'copy.deepcopy', (['base_model'], {}), '(base_model)\n', (1673, 1685), False, 'import copy\n'), ((9689, 9713), 'numpy.load', 'np.load', (['path_all_trials'], {}), '(path_all_trials)\n', (9696, 9713), True, 'import numpy as np\n'), ((11319, 11397), 'matplotlib.pyplot.axvline', 'plt.axvline', (['lth_properties[i]'], {'linestyle': '"""dashed"""', 'linewidth': '(1)', 'color': 'f"""C{i}"""'}), "(lth_properties[i], linestyle='dashed', linewidth=1, color=f'C{i}')\n", (11330, 11397), True, 'import matplotlib.pyplot as plt\n'), ((11635, 11697), 'os.path.join', 'os.path.join', (['self.branch_root', 'f"""property_{property}_sns.pdf"""'], {}), "(self.branch_root, f'property_{property}_sns.pdf')\n", (11647, 11697), False, 'import os\n'), ((11448, 11509), 'matplotlib.pyplot.axvline', 'plt.axvline', (['cross_domain_prop[i]'], {'linewidth': '(1)', 'color': 'f"""C{i}"""'}), "(cross_domain_prop[i], linewidth=1, color=f'C{i}')\n", (11459, 11509), True, 'import matplotlib.pyplot as plt\n'), ((2169, 2213), 'foundations.paths.properties', 'paths.properties', (['self.branch_root', 'property'], {}), '(self.branch_root, property)\n', (2185, 2213), False, 'from foundations import paths\n'), ((4071, 4133), 'os.path.join', 'os.path.join', (['cross_domain_path', 'f"""level_{self.level}"""', '"""main"""'], {}), "(cross_domain_path, f'level_{self.level}', 'main')\n", (4083, 4133), False, 'import os\n'), ((4167, 4182), 'pruning.mask.Mask.load', 'Mask.load', (['path'], {}), '(path)\n', (4176, 4182), False, 'from pruning.mask import Mask\n'), ((8774, 8901), 'json.dump', 'json.dump', (["{'lth': lth_properties, 'random': rand_properties, 'active':\n rand_properties, 'cross_lth': cross_domain_prop}", 'f'], {}), "({'lth': lth_properties, 'random': rand_properties, 'active':\n rand_properties, 'cross_lth': cross_domain_prop}, f)\n", (8783, 8901), False, 'import json\n'), ((9058, 9070), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9067, 9070), False, 'import json\n'), ((9827, 9835), 'utils.tensor_utils.erank', 'erank', (['v'], {}), '(v)\n', (9832, 9835), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n'), ((2147, 2161), 'platforms.platform.get_platform', 'get_platform', ([], {}), '()\n', (2159, 2161), False, 'from platforms.platform import get_platform\n'), ((2541, 2564), 'torch.randn_like', 'torch.randn_like', (['input'], {}), '(input)\n', (2557, 2564), False, 'import torch\n'), ((2701, 2709), 'utils.tensor_utils.erank', 'erank', (['v'], {}), '(v)\n', (2706, 2709), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n'), ((5042, 5103), 'utils.tensor_utils.feature_erank', 'feature_erank', (['cross_model', 'input', 'conv_layers', 'no_activation'], {}), '(cross_model, input, conv_layers, no_activation)\n', (5055, 5103), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n'), ((5947, 5986), 'utils.tensor_utils.shuffle_state_dict', 'shuffle_state_dict', (['mask'], {'seed': '(seed + t)'}), '(mask, seed=seed + t)\n', (5965, 5986), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n'), ((6033, 6058), 'copy.deepcopy', 'copy.deepcopy', (['base_model'], {}), '(base_model)\n', (6046, 6058), False, 'import copy\n'), ((8522, 8536), 'platforms.platform.get_platform', 'get_platform', ([], {}), '()\n', (8534, 8536), False, 'from platforms.platform import get_platform\n'), ((8697, 8741), 'foundations.paths.properties', 'paths.properties', (['self.branch_root', 'property'], {}), '(self.branch_root, property)\n', (8713, 8741), False, 'from foundations import paths\n'), ((8965, 9009), 'foundations.paths.properties', 'paths.properties', (['self.branch_root', 'property'], {}), '(self.branch_root, property)\n', (8981, 9009), False, 'from foundations import paths\n'), ((2440, 2462), 'torch.rand_like', 'torch.rand_like', (['input'], {}), '(input)\n', (2455, 2462), False, 'import torch\n'), ((2971, 3030), 'utils.tensor_utils.feature_erank', 'feature_erank', (['lth_model', 'input', 'conv_layers', 'no_activation'], {}), '(lth_model, input, conv_layers, no_activation)\n', (2984, 3030), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n'), ((5199, 5257), 'utils.tensor_utils.activation', 'activation', (['cross_model', 'input', 'conv_layers', 'no_activation'], {}), '(cross_model, input, conv_layers, no_activation)\n', (5209, 5257), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n'), ((6893, 6953), 'utils.tensor_utils.feature_erank', 'feature_erank', (['rand_model', 'input', 'conv_layers', 'no_activation'], {}), '(rand_model, input, conv_layers, no_activation)\n', (6906, 6953), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n'), ((8587, 8601), 'platforms.platform.get_platform', 'get_platform', ([], {}), '()\n', (8599, 8601), False, 'from platforms.platform import get_platform\n'), ((8628, 8642), 'platforms.platform.get_platform', 'get_platform', ([], {}), '()\n', (8640, 8642), False, 'from platforms.platform import get_platform\n'), ((3273, 3329), 'utils.tensor_utils.activation', 'activation', (['lth_model', 'input', 'conv_layers', 'no_activation'], {}), '(lth_model, input, conv_layers, no_activation)\n', (3283, 3329), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n'), ((5360, 5425), 'utils.tensor_utils.features_spectral', 'features_spectral', (['cross_model', 'input', 'conv_layers', 'no_activation'], {}), '(cross_model, input, conv_layers, no_activation)\n', (5377, 5425), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n'), ((7151, 7180), 'utils.tensor_utils.activation', 'activation', (['rand_model', 'input'], {}), '(rand_model, input)\n', (7161, 7180), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n'), ((3421, 3484), 'utils.tensor_utils.features_spectral', 'features_spectral', (['lth_model', 'input', 'conv_layers', 'no_activation'], {}), '(lth_model, input, conv_layers, no_activation)\n', (3438, 3484), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n'), ((5529, 5595), 'utils.tensor_utils.features_frobenius', 'features_frobenius', (['cross_model', 'input', 'conv_layers', 'no_activation'], {}), '(cross_model, input, conv_layers, no_activation)\n', (5547, 5595), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n'), ((3577, 3641), 'utils.tensor_utils.features_frobenius', 'features_frobenius', (['lth_model', 'input', 'conv_layers', 'no_activation'], {}), '(lth_model, input, conv_layers, no_activation)\n', (3595, 3641), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n'), ((5708, 5783), 'utils.tensor_utils.features_spectral_fro_ratio', 'features_spectral_fro_ratio', (['cross_model', 'input', 'conv_layers', 'no_activation'], {}), '(cross_model, input, conv_layers, no_activation)\n', (5735, 5783), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n'), ((3743, 3816), 'utils.tensor_utils.features_spectral_fro_ratio', 'features_spectral_fro_ratio', (['lth_model', 'input', 'conv_layers', 'no_activation'], {}), '(lth_model, input, conv_layers, no_activation)\n', (3770, 3816), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n'), ((7561, 7569), 'utils.tensor_utils.erank', 'erank', (['v'], {}), '(v)\n', (7566, 7569), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n'), ((8040, 8104), 'utils.tensor_utils.features_spectral', 'features_spectral', (['rand_model', 'input', 'conv_layers', 'no_activation'], {}), '(rand_model, input, conv_layers, no_activation)\n', (8057, 8104), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n'), ((8212, 8277), 'utils.tensor_utils.features_frobenius', 'features_frobenius', (['rand_model', 'input', 'conv_layers', 'no_activation'], {}), '(rand_model, input, conv_layers, no_activation)\n', (8230, 8277), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n'), ((8394, 8468), 'utils.tensor_utils.features_spectral_fro_ratio', 'features_spectral_fro_ratio', (['rand_model', 'input', 'conv_layers', 'no_activation'], {}), '(rand_model, input, conv_layers, no_activation)\n', (8421, 8468), False, 'from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank\n')] |
import sys
import logging
import json
from collections import OrderedDict
from pathlib import Path
import matplotlib as mpl
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
from scipy.special import erf # noqa:E0611
import lmfit
import brewer2mpl
from colorama import Fore, init
import sira.tools.siraplot as spl
init()
rootLogger = logging.getLogger(__name__)
mpl.use('agg')
pd.options.display.float_format = '{:,.4f}'.format
# -----------------------------------------------------------------------------
# For plots: using the brewer2 color maps by <NAME>
# -----------------------------------------------------------------------------
set2 = brewer2mpl.get_map('Set2', 'qualitative', 5).mpl_colors
markers = ['o', '^', 's', 'D', 'x', '+']
class color:
"""From: https://stackoverflow.com/a/17303428"""
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# =============================================================================
#
# PROBABILITY of EXCEEDANCE MODEL FITTING
#
# -----------------------------------------------------------------------------
# LOGNORMAL CURVE FITTING
#
# Parameters in scipy LOGNORMAL distribution:
#
# shape = sigma = log(s) s = geometric standard deviation
# sigma = standard deviation of log(X)
#
# scale = M = exp(mu) M = geometric mean == median
# mu = mean of log(X) = log(scale)
#
# location (keyword 'loc') shifts the distribution to the left or right.
# Unless data contain negative values, the location parameter is fixed at 0.
# During curve fitting, this can be set by using floc=0.
#
# Note on the covariance matrix returned by scipy.optimize.curve_fit:
# The square root of the diagonal values are the 1-sigma uncertainties of
# the fit parameters.
# -----------------------------------------------------------------------------
def lognormal_cdf(x, median, beta, loc=0):
x = np.asarray(x)
logn_cdf = 0.5 + 0.5 * erf(
(np.log(x - loc) - np.log(median)) / (np.sqrt(2) * beta))
return logn_cdf
def normal_cdf(x, mean, stddev):
return 0.5 * (1 + erf((x - mean) / (stddev * np.sqrt(2))))
def rayleigh_cdf(x, loc, scale):
return stats.rayleigh.cdf(x, loc=loc, scale=scale)
# ====================================================================================
def display_dict(pdict, width=4, level=0, init_space=0):
"""Neatly prints a dict of params"""
ip = init_space
for key, value in pdict.items():
if isinstance(value, float):
val_fmt = f"{value:<.4f}"
else:
val_fmt = f"{str(value):<12}"
if not(isinstance(value, dict) or isinstance(value, OrderedDict)) and (level < 1):
print(' ' * init_space + '[' + str(key) + ']')
print(f"{' ' * (init_space + width)}{val_fmt}")
if isinstance(value, dict):
print(' ' * (init_space + width * level) + '[' + str(key) + ']')
display_dict(value, level=level + 1, init_space=ip)
elif level > 0:
print(f"{' '*(init_space + width*level)}{str(key):<10} = {val_fmt}")
def fit_cdf_model(x_sample, y_sample, dist, params_est="undefined", tag=None):
"""
Fits given array-like data using `lmfit.Model` method
:returns: A dictionary of fitted parameters.
The structure of the output:
fitted_model = OrderedDict(
function=str(),
parameters=OrderedDict(),
fit_statistics={}
)
"""
# Locate x-values where the y-values are changing
x_sample = np.asarray(x_sample)
y_sample = np.asarray(y_sample)
change_ndx = np.where(y_sample[:-1] != y_sample[1:])[0]
change_ndx = list(change_ndx)
if not (change_ndx[-1] == len(y_sample)):
change_ndx.insert(len(change_ndx), change_ndx[-1] + 1)
if change_ndx[0] != 0:
change_ndx.insert(0, change_ndx[0] - 1)
xs = x_sample[change_ndx]
# -------------------------------------------------------------------------
# NORMAL CDF -- set up model and params
# -------------------------------------------------------------------------
if dist.lower() in ["normal", "gaussian", "normal_cdf"]:
func = normal_cdf
model_dist = lmfit.Model(func)
model_params = model_dist.make_params()
if params_est == "undefined":
model_params.add(
'mean',
value=np.mean(xs), min=min(xs), max=np.mean(xs) * 2)
model_params.add(
'stddev',
value=np.std(xs), min=0, max=np.mean(xs) * 0.9)
else:
param = 'mean'
model_params.add(
param,
value=params_est[param].value,
min=params_est[param].min, max=params_est[param].max)
param = 'stddev'
model_params.add(
param,
value=params_est[param].value,
min=params_est[param].min, max=params_est[param].max)
# -------------------------------------------------------------------------
# LOGNORMAL CDF -- set up model and params
# -------------------------------------------------------------------------
elif dist.lower() in ["lognormal", "lognormal_cdf"]:
func = lognormal_cdf
model_dist = lmfit.Model(func)
init_med = np.mean(xs)
init_lsd = abs(np.std(xs))
model_params = model_dist.make_params()
if params_est == "undefined":
model_params.add('median', value=init_med, min=min(xs), max=init_med * 2)
model_params.add('beta', value=init_lsd, min=sys.float_info.min, max=init_med * 2)
model_params.add('loc', value=0, vary=False)
else:
param = 'median'
model_params.add(
param,
value=params_est[param].value,
min=params_est[param].min, max=params_est[param].max)
param = 'beta'
model_params.add(
param,
value=params_est[param].value,
min=params_est[param].min, max=params_est[param].max)
model_params.add('loc', value=0, vary=False)
# -------------------------------------------------------------------------
# RAYLEIGH CDF -- set up model and params
# -------------------------------------------------------------------------
elif dist.lower() in ["rayleigh", "rayleigh_cdf"]:
func = rayleigh_cdf
model_dist = lmfit.Model(func)
init_loc = xs[0]
init_scale = np.std(xs)
model_params = model_dist.make_params()
model_params.add('loc', value=init_loc, min=None, max=None)
model_params.add('scale', value=init_scale, min=None, max=None)
else:
raise ValueError(f"The distribution {dist} is not supported.")
# -------------------------------------------------------------------------
# Perform the fit
# -------------------------------------------------------------------------
fitresult = model_dist.fit(y_sample, params=model_params, x=x_sample,
nan_policy='omit', max_nfev=10000)
params_odict = OrderedDict()
params_odict['function'] = str(func.__name__).lower()
params_odict['parameters'] = fitresult.params.valuesdict()
params_odict['fit_statistics'] = OrderedDict()
params_odict['fit_statistics']['chisqr'] = fitresult.chisqr
params_odict['fit_statistics']['max_nfev'] = fitresult.nfev
# -------------------------------------------------------------------------
func_name = params_odict['function']
if tag is not None:
fit_data_header = f"{Fore.YELLOW}{tag} | Distribution: {func_name}{Fore.RESET}"
else:
fit_data_header = f"{Fore.RESET}Distribution: {func_name}{Fore.RESET}"
print("\n" + "-" * 88)
print(fit_data_header)
print("-" * 88)
# print(fitresult.fit_report(modelpars=fitresult.params))
display_dict(params_odict)
return params_odict
# ====================================================================================
def fit_prob_exceed_model(
xdata,
ydata_2d,
system_limit_states,
config_data_dict,
output_path,
distribution='lognormal_cdf',
CROSSOVER_THRESHOLD=0.005,
CROSSOVER_CORRECTION=True):
"""
Fit a Lognormal CDF model to simulated probability exceedance data
:param xdata: input values for hazard intensity (numpy array)
:param ydata_2d: probability of exceedance (2D numpy array)
its shape is (num_damage_states x num_hazards_points)
:param system_limit_states: discrete damage states (list of strings)
:param output_path: directory path for writing output (string)
:returns: fitted exceedance model parameters (PANDAS dataframe)
"""
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# INITIAL SETUP
# Assumption: the limit_states *INCLUDES* a "DS0 - No Damage" state
fitted_params_dict = {i: {} for i in range(1, len(system_limit_states))}
xdata = [float(x) for x in xdata]
borderA = "=" * 88
borderB = '-' * 88
msg_fitresults_init = \
f"\n\n{Fore.BLUE}Fitting system FRAGILITY data...{Fore.RESET}"
rootLogger.info(msg_fitresults_init)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Conduct fitting for given distribution
for dx in range(1, len(system_limit_states)):
x_sample = xdata
y_sample = ydata_2d[dx]
ds_level = system_limit_states[dx]
params_odict = fit_cdf_model(
x_sample, y_sample, dist=distribution, tag=f"Limit State: {ds_level}")
fitted_params_dict[dx] = params_odict
print(f"\n{borderA}\n")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Check for crossover, and correct as needed
if CROSSOVER_CORRECTION and (len(system_limit_states[1:]) >= 2):
fitted_params_dict = correct_crossover(
system_limit_states,
xdata,
ydata_2d,
fitted_params_dict,
CROSSOVER_THRESHOLD)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Calculated set of fitted models are saved as a JSON file
fitted_params_json = json.dumps(
{"system_fragility_model": fitted_params_dict}, default=str, indent=4)
msg_fitresults_final = \
f"\n{borderA}\n"\
f"\n{color.YELLOW}{color.BOLD}Set of Fitted Models:{color.END}\n"\
f"{fitted_params_json}\n"\
f"\n{borderB}\n"
rootLogger.info(msg_fitresults_final)
json_file = Path(output_path, 'system_model_fragility.json')
with open(json_file, 'w', encoding='utf-8') as f:
json.dump(
{"system_fragility_model": fitted_params_dict},
f, ensure_ascii=False, indent=4)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# config_data = dict(
# model_name=config_obj.MODEL_NAME,
# x_param=config_obj.HAZARD_INTENSITY_MEASURE_PARAM,
# x_unit=config_obj.HAZARD_INTENSITY_MEASURE_UNIT,
# scenario_metrics=config_obj.FOCAL_HAZARD_SCENARIOS,
# scneario_names=config_obj.FOCAL_HAZARD_SCENARIO_NAMES
# )
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Plot the simulation data
plot_data_model(xdata,
ydata_2d,
system_limit_states,
fitted_params_dict,
output_path,
file_name='fig_sys_pe_DATA.png',
PLOT_DATA=True,
PLOT_MODEL=False,
PLOT_EVENTS=False,
**config_data_dict)
plot_data_model(xdata,
ydata_2d,
system_limit_states,
fitted_params_dict,
output_path,
file_name='fig_sys_pe_MODEL.png',
PLOT_DATA=False,
PLOT_MODEL=True,
PLOT_EVENTS=False,
**config_data_dict)
plot_data_model(xdata,
ydata_2d,
system_limit_states,
fitted_params_dict,
output_path,
file_name='fig_sys_pe_MODEL_with_scenarios.png',
PLOT_DATA=False,
PLOT_MODEL=True,
PLOT_EVENTS=True,
**config_data_dict)
return fitted_params_dict
# ====================================================================================
def get_distribution_func(function_name):
if function_name.lower() in ["normal", "normal_cdf"]:
return normal_cdf
elif function_name.lower() in ["rayleigh", "rayleigh_cdf"]:
return rayleigh_cdf
elif function_name.lower() in ["lognormal", "lognormal_cdf"]:
return lognormal_cdf
else:
raise ValueError(f"The distribution {function_name} is not supported.")
def plot_data_model(x_vals,
y_vals,
SYS_DS,
model_params,
out_path,
file_name='fig_sys_pe.png',
PLOT_DATA=True,
PLOT_MODEL=True,
PLOT_EVENTS=False,
**conf_data):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if sum([PLOT_DATA, PLOT_MODEL, PLOT_EVENTS]) == 0:
raise AttributeError
model_name = conf_data.get('model_name')
x_param = conf_data.get('x_param')
x_unit = conf_data.get('x_unit')
scenario_metrics = conf_data.get('scenario_metrics')
scneario_names = conf_data.get('scneario_names')
plt.style.use('seaborn-darkgrid')
mpl.rc('grid', linewidth=0.7)
mpl.rc('font', family='sans-serif')
colours = spl.ColourPalettes()
COLR_DS = colours.FiveLevels[-1 * len(SYS_DS):]
fig = plt.figure(figsize=(9, 5))
ax = fig.add_subplot(111)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# [Plot 1 of 3] The Data Points
if PLOT_DATA:
spl.add_legend_subtitle("DATA")
for i in range(1, len(SYS_DS)):
ax.plot(x_vals, y_vals[i],
label=SYS_DS[i], clip_on=False, color=COLR_DS[i],
linestyle='', alpha=0.6, marker=markers[i - 1],
markersize=3, markeredgewidth=1, markeredgecolor=None,
zorder=10)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# [Plot 2 of 3] The Fitted Model
if PLOT_MODEL:
spl.add_legend_subtitle("FITTED MODEL")
xmax = max(x_vals)
xformodel = np.linspace(0, xmax, 101, endpoint=True)
dmg_mdl_arr = np.zeros((len(SYS_DS), len(xformodel)))
for dx in range(1, len(SYS_DS)):
function_name = model_params[dx]['function']
params = model_params[dx]['parameters']
distribution = get_distribution_func(function_name)
dmg_mdl_arr[dx] = distribution(xformodel, **params)
ax.plot(xformodel, dmg_mdl_arr[dx],
label=SYS_DS[dx], clip_on=False, color=COLR_DS[dx],
alpha=0.65, linestyle='-', linewidth=1.6,
zorder=9)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# [Plot 3 of 3] The Scenario Events
if PLOT_EVENTS:
spl.add_legend_subtitle("EVENTS")
for i, haz in enumerate(scenario_metrics):
event_num = str(i + 1)
event_intensity_str = "{:.3f}".format(float(haz))
event_color = colours.BrewerSpectral[i]
try:
event_label = event_num + ". " + \
scneario_names[i] + " : " + \
event_intensity_str
except ValueError:
event_label = event_num + " : " + event_intensity_str
ax.plot(float(haz), 0,
label=event_label,
color=event_color,
marker='',
markersize=2,
linestyle='-',
zorder=11)
ax.plot(float(haz), 1.04,
label='',
clip_on=False,
color=event_color,
marker='o',
fillstyle='none',
markersize=12,
linestyle='-',
markeredgewidth=1.0,
zorder=11)
ax.annotate(
event_num, # event_intensity_str,
xy=(float(haz), 0), xycoords='data',
xytext=(float(haz), 1.038), textcoords='data',
ha='center', va='center', rotation=0,
size=8, fontweight='bold',
color=event_color,
annotation_clip=False,
bbox=dict(boxstyle='round, pad=0.2', fc='yellow', alpha=0.0),
path_effects=[
PathEffects.withStroke(linewidth=2, foreground="w")],
arrowprops=dict(
arrowstyle='-|>, head_length=0.5, head_width=0.3',
shrinkA=3.0,
shrinkB=0.0,
connectionstyle='arc3,rad=0.0',
color=event_color,
alpha=0.8,
linewidth=1.0,
linestyle="-",
path_effects=[
PathEffects.withStroke(linewidth=2.5, foreground="w")
]
),
zorder=11
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ax.set_axisbelow('line')
outfig = Path(out_path, file_name)
figtitle = 'System Fragility: ' + model_name
x_lab = x_param + ' (' + x_unit + ')'
y_lab = 'P($D_s$ > $d_s$)'
y_tick_pos = np.linspace(0.0, 1.0, num=6, endpoint=True)
y_tick_val = ['{:.1f}'.format(i) for i in y_tick_pos]
x_tick_pos = np.linspace(0.0, max(x_vals), num=6, endpoint=True)
x_tick_val = ['{:.2f}'.format(i) for i in x_tick_pos]
ax.set_title(figtitle, loc='center', y=1.06, fontweight='bold', size=11)
ax.set_xlabel(x_lab, size=10, labelpad=10)
ax.set_ylabel(y_lab, size=10, labelpad=10)
ax.set_xlim(0, max(x_tick_pos))
ax.set_xticks(x_tick_pos)
ax.set_xticklabels(x_tick_val, size=9)
ax.set_ylim(0, max(y_tick_pos))
ax.set_yticks(y_tick_pos)
ax.set_yticklabels(y_tick_val, size=9)
ax.margins(0, 0)
ax.tick_params(axis='both', pad=7)
# Shrink current axis width by 15%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.85, box.height])
# Put a legend to the right of the current axis
ax.legend(title='', loc='upper left', ncol=1,
bbox_to_anchor=(1.02, 1.0), frameon=0, prop={'size': 9})
plt.savefig(outfig,
format='jpg', dpi=300, bbox_inches='tight')
plt.close(fig)
# ====================================================================================
def correct_crossover(
SYS_DS,
xdata,
ydata_2d,
fitted_params_set,
CROSSOVER_THRESHOLD=0.005):
"""
Corrects crossovers between sets of algorithms representing damage states.
This function works only for lognormal cdf's.
"""
msg_check_crossover = \
f"\nChecking for crossover [ THRESHOLD = {str(CROSSOVER_THRESHOLD)} ]"
rootLogger.info(Fore.GREEN + msg_check_crossover + Fore.RESET)
params_pe = lmfit.Parameters()
ds_iter = iter(range(2, len(SYS_DS)))
for dx in ds_iter:
############################################################################
# overlap_condition = np.sum(ydata_2d[dx] > ydata_2d[dx - 1])
# if overlap_condition == 0:
# msg_overlap_none = \
# f"{Fore.GREEN}"\
# "NO crossover detected in data for the pair: "\
# f"{SYS_DS[dx - 1]} - {SYS_DS[dx]}"\
# f"{Fore.RESET}"
# # rootLogger.info(msg_overlap_none)
# print(msg_overlap_none)
# continue
# else:
# msg_overlap_found = \
# f"{Fore.MAGENTA}"\
# "**Crossover detected in data for the pair : "\
# f"{SYS_DS[dx - 1]} - {SYS_DS[dx]}**"\
# f"{Fore.RESET}"
# # rootLogger.info(msg_overlap_found)
# print(msg_overlap_found)
############################################################################
x_sample = xdata
y_sample = ydata_2d[dx]
function_name = fitted_params_set[dx]['function']
distribution = get_distribution_func(function_name)
param_names = list(fitted_params_set[dx]['parameters'].keys())
param_1 = param_names[0]
param_2 = param_names[1]
# --------------------------------------------------------------------------
params_hi = fitted_params_set[dx]['parameters']
y_model_hi = distribution(x_sample, **params_hi)
mu_hi = fitted_params_set[dx]['parameters'][param_1]
sd_hi = fitted_params_set[dx]['parameters'][param_2]
MAX = 2 * params_hi[param_1]
params_pe.add(param_1, value=params_hi[param_1], min=0, max=MAX)
params_pe.add(param_2, value=params_hi[param_2], min=0, max=MAX)
# --------------------------------------------------------------------------
params_lo = fitted_params_set[dx - 1]['parameters']
y_model_lo = distribution(x_sample, **params_lo)
mu_lo = fitted_params_set[dx - 1]['parameters'][param_1]
sd_lo = fitted_params_set[dx - 1]['parameters'][param_2]
if abs(max(y_model_lo - y_model_hi)) > CROSSOVER_THRESHOLD:
# Test if higher curve is co-incident with, or exceeds lower curve
# Note: `loc` param for lognorm assumed zero
if (mu_hi <= mu_lo):
cx_msg_1 = f"\n {Fore.MAGENTA}*** Mean of higher curve too low: "\
f"resampling{Fore.RESET}"
cx_msg_2 = f"{Fore.MAGENTA}{param_1}: {str(mu_hi)} {str(mu_lo)} {Fore.RESET}"
rootLogger.info(cx_msg_1)
rootLogger.info(cx_msg_2)
params_pe.add(param_1, value=mu_hi, min=mu_lo)
fitted_params_set[dx] = fit_cdf_model(
x_sample, y_sample, dist=function_name, params_est=params_pe,
tag=f"Limit State: {SYS_DS[dx]} | crossover correction attempt")
(mu_hi, sd_hi) = (
fitted_params_set[dx]['parameters'][param_1],
fitted_params_set[dx]['parameters'][param_2])
# Thresholds for testing top or bottom crossover
delta_top = sd_lo - (mu_hi - mu_lo)
delta_btm = sd_lo + (mu_hi - mu_lo)
# Test for top crossover: resample if crossover detected
if (sd_hi < sd_lo) and (sd_hi <= delta_top):
rootLogger.info(
"%s*** Attempting to correct upper crossover%s",
Fore.MAGENTA, Fore.RESET)
params_pe.add(param_2, value=sd_hi, min=delta_top)
fitted_params_set[dx] = fit_cdf_model(
x_sample, y_sample, dist=function_name, params_est=params_pe,
tag=f"Limit State: {SYS_DS[dx]} | crossover correction attempt")
# Test for bottom crossover: resample if crossover detected
elif sd_hi >= delta_btm:
rootLogger.info(
"%s*** Attempting to correct lower crossover%s",
Fore.MAGENTA, Fore.RESET)
params_pe.add(param_2, value=sd_hi, max=delta_btm)
fitted_params_set[dx] = fit_cdf_model(
x_sample, y_sample, dist=function_name, params_est=params_pe,
tag=f"Limit State: {SYS_DS[dx]} | crossover correction attempt")
############################################################################
return fitted_params_set
# ====================================================================================
| [
"logging.getLogger",
"numpy.sqrt",
"brewer2mpl.get_map",
"numpy.log",
"scipy.stats.rayleigh.cdf",
"matplotlib.rc",
"matplotlib.patheffects.withStroke",
"lmfit.Parameters",
"colorama.init",
"lmfit.Model",
"numpy.mean",
"pathlib.Path",
"numpy.where",
"json.dumps",
"numpy.asarray",
"matpl... | [((410, 416), 'colorama.init', 'init', ([], {}), '()\n', (414, 416), False, 'from colorama import Fore, init\n'), ((430, 457), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (447, 457), False, 'import logging\n'), ((458, 472), 'matplotlib.use', 'mpl.use', (['"""agg"""'], {}), "('agg')\n", (465, 472), True, 'import matplotlib as mpl\n'), ((746, 790), 'brewer2mpl.get_map', 'brewer2mpl.get_map', (['"""Set2"""', '"""qualitative"""', '(5)'], {}), "('Set2', 'qualitative', 5)\n", (764, 790), False, 'import brewer2mpl\n'), ((2186, 2199), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (2196, 2199), True, 'import numpy as np\n'), ((2462, 2505), 'scipy.stats.rayleigh.cdf', 'stats.rayleigh.cdf', (['x'], {'loc': 'loc', 'scale': 'scale'}), '(x, loc=loc, scale=scale)\n', (2480, 2505), False, 'from scipy import stats\n'), ((3880, 3900), 'numpy.asarray', 'np.asarray', (['x_sample'], {}), '(x_sample)\n', (3890, 3900), True, 'import numpy as np\n'), ((3916, 3936), 'numpy.asarray', 'np.asarray', (['y_sample'], {}), '(y_sample)\n', (3926, 3936), True, 'import numpy as np\n'), ((7507, 7520), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7518, 7520), False, 'from collections import OrderedDict\n'), ((7679, 7692), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7690, 7692), False, 'from collections import OrderedDict\n'), ((10680, 10765), 'json.dumps', 'json.dumps', (["{'system_fragility_model': fitted_params_dict}"], {'default': 'str', 'indent': '(4)'}), "({'system_fragility_model': fitted_params_dict}, default=str,\n indent=4)\n", (10690, 10765), False, 'import json\n'), ((11021, 11069), 'pathlib.Path', 'Path', (['output_path', '"""system_model_fragility.json"""'], {}), "(output_path, 'system_model_fragility.json')\n", (11025, 11069), False, 'from pathlib import Path\n'), ((14183, 14216), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-darkgrid"""'], {}), "('seaborn-darkgrid')\n", (14196, 14216), True, 'import matplotlib.pyplot as plt\n'), ((14221, 14250), 'matplotlib.rc', 'mpl.rc', (['"""grid"""'], {'linewidth': '(0.7)'}), "('grid', linewidth=0.7)\n", (14227, 14250), True, 'import matplotlib as mpl\n'), ((14255, 14290), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {'family': '"""sans-serif"""'}), "('font', family='sans-serif')\n", (14261, 14290), True, 'import matplotlib as mpl\n'), ((14306, 14326), 'sira.tools.siraplot.ColourPalettes', 'spl.ColourPalettes', ([], {}), '()\n', (14324, 14326), True, 'import sira.tools.siraplot as spl\n'), ((14390, 14416), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 5)'}), '(figsize=(9, 5))\n', (14400, 14416), True, 'import matplotlib.pyplot as plt\n'), ((18234, 18259), 'pathlib.Path', 'Path', (['out_path', 'file_name'], {}), '(out_path, file_name)\n', (18238, 18259), False, 'from pathlib import Path\n'), ((18401, 18444), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(6)', 'endpoint': '(True)'}), '(0.0, 1.0, num=6, endpoint=True)\n', (18412, 18444), True, 'import numpy as np\n'), ((19396, 19459), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfig'], {'format': '"""jpg"""', 'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "(outfig, format='jpg', dpi=300, bbox_inches='tight')\n", (19407, 19459), True, 'import matplotlib.pyplot as plt\n'), ((19480, 19494), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (19489, 19494), True, 'import matplotlib.pyplot as plt\n'), ((20058, 20076), 'lmfit.Parameters', 'lmfit.Parameters', ([], {}), '()\n', (20074, 20076), False, 'import lmfit\n'), ((3954, 3993), 'numpy.where', 'np.where', (['(y_sample[:-1] != y_sample[1:])'], {}), '(y_sample[:-1] != y_sample[1:])\n', (3962, 3993), True, 'import numpy as np\n'), ((4561, 4578), 'lmfit.Model', 'lmfit.Model', (['func'], {}), '(func)\n', (4572, 4578), False, 'import lmfit\n'), ((11132, 11227), 'json.dump', 'json.dump', (["{'system_fragility_model': fitted_params_dict}", 'f'], {'ensure_ascii': '(False)', 'indent': '(4)'}), "({'system_fragility_model': fitted_params_dict}, f, ensure_ascii=\n False, indent=4)\n", (11141, 11227), False, 'import json\n'), ((14588, 14619), 'sira.tools.siraplot.add_legend_subtitle', 'spl.add_legend_subtitle', (['"""DATA"""'], {}), "('DATA')\n", (14611, 14619), True, 'import sira.tools.siraplot as spl\n'), ((15087, 15126), 'sira.tools.siraplot.add_legend_subtitle', 'spl.add_legend_subtitle', (['"""FITTED MODEL"""'], {}), "('FITTED MODEL')\n", (15110, 15126), True, 'import sira.tools.siraplot as spl\n'), ((15174, 15214), 'numpy.linspace', 'np.linspace', (['(0)', 'xmax', '(101)'], {'endpoint': '(True)'}), '(0, xmax, 101, endpoint=True)\n', (15185, 15214), True, 'import numpy as np\n'), ((15918, 15951), 'sira.tools.siraplot.add_legend_subtitle', 'spl.add_legend_subtitle', (['"""EVENTS"""'], {}), "('EVENTS')\n", (15941, 15951), True, 'import sira.tools.siraplot as spl\n'), ((5633, 5650), 'lmfit.Model', 'lmfit.Model', (['func'], {}), '(func)\n', (5644, 5650), False, 'import lmfit\n'), ((5670, 5681), 'numpy.mean', 'np.mean', (['xs'], {}), '(xs)\n', (5677, 5681), True, 'import numpy as np\n'), ((5705, 5715), 'numpy.std', 'np.std', (['xs'], {}), '(xs)\n', (5711, 5715), True, 'import numpy as np\n'), ((6819, 6836), 'lmfit.Model', 'lmfit.Model', (['func'], {}), '(func)\n', (6830, 6836), False, 'import lmfit\n'), ((6883, 6893), 'numpy.std', 'np.std', (['xs'], {}), '(xs)\n', (6889, 6893), True, 'import numpy as np\n'), ((4741, 4752), 'numpy.mean', 'np.mean', (['xs'], {}), '(xs)\n', (4748, 4752), True, 'import numpy as np\n'), ((4866, 4876), 'numpy.std', 'np.std', (['xs'], {}), '(xs)\n', (4872, 4876), True, 'import numpy as np\n'), ((2241, 2256), 'numpy.log', 'np.log', (['(x - loc)'], {}), '(x - loc)\n', (2247, 2256), True, 'import numpy as np\n'), ((2259, 2273), 'numpy.log', 'np.log', (['median'], {}), '(median)\n', (2265, 2273), True, 'import numpy as np\n'), ((2278, 2288), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2285, 2288), True, 'import numpy as np\n'), ((2402, 2412), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2409, 2412), True, 'import numpy as np\n'), ((4771, 4782), 'numpy.mean', 'np.mean', (['xs'], {}), '(xs)\n', (4778, 4782), True, 'import numpy as np\n'), ((4889, 4900), 'numpy.mean', 'np.mean', (['xs'], {}), '(xs)\n', (4896, 4900), True, 'import numpy as np\n'), ((17502, 17553), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(2)', 'foreground': '"""w"""'}), "(linewidth=2, foreground='w')\n", (17524, 17553), True, 'import matplotlib.patheffects as PathEffects\n'), ((17977, 18030), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(2.5)', 'foreground': '"""w"""'}), "(linewidth=2.5, foreground='w')\n", (17999, 18030), True, 'import matplotlib.patheffects as PathEffects\n')] |
from __future__ import print_function, division
#import os
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#import tensorflow
#tensorflow.logging.set_verbosity(tensorflow.logging.WARN)
from tensorflow.python.keras import backend as k
import scipy
from scipy.misc import imsave
from tensorflow.python.keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate
from tensorflow.python.keras.layers import BatchNormalization, Activation, ZeroPadding2D
from tensorflow.python.keras.layers.advanced_activations import LeakyReLU
from tensorflow.python.keras.layers.convolutional import UpSampling2D, Conv2D
from tensorflow.python.keras.models import Sequential, Model
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.applications import VGG19
from tensorflow.python.keras.preprocessing import image
import tensorflow as tf
import datetime
import cv2
import sys
from dataloader_new import DataLoader
import numpy as np
import os
from tiramisu import Tiramisu
from tensorflow.python.keras.applications.vgg19 import preprocess_input
def feature_extract(x):
print(x)
fc2_features = model_extractfeatures.predict(x,steps=1)
return fc2_features
def preprocess(img):
print(img)
#print(k.get_value(img))
resized_images = tf.image.resize_images(img, (224, 224))
print(resized_images)
#k.resize_images(img,height_factor=224,width_factor=224,data_format="channels_last")
#print(img.shape)
#x = image.img_to_array(img[0])
#x = np.expand_dims(x, axis=0)
#print(img)
x = preprocess_input(resized_images)
return x
HUBER_DELTA = 0.5
def smoothL1(y_true, y_pred):
x = k.abs(y_true - y_pred)
x = k.switch(x < HUBER_DELTA, 0.5 * x ** 2, HUBER_DELTA * (x - 0.5 * HUBER_DELTA))
return k.sum(x)
def total_loss(y_true, y_pred):
f1=preprocess(y_true)
f2=preprocess(y_pred)
fx1=feature_extract(f1)
fx2=feature_extract(f2)
loss1 = tf.reduce_mean(tf.squared_difference(fx1, fx2))
loss2=smoothL1(y_true,y_pred)
return k.eval(loss1)+k.eval(loss2)
###################################################################3
class Pix2Pix():
def __init__(self):
# Input shape
self.img_rows = 256
self.img_cols = 256
self.channels = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
# Configure data loader
self.dataset_name = 'facades'
self.data_loader = DataLoader(dataset_name=self.dataset_name,
img_res=(self.img_rows, self.img_cols))
# Calculate output shape of D (PatchGAN)
patch = int(self.img_rows / 2**4)
self.disc_patch = (patch, patch, 1)
# Number of filters in the first layer of G and D
self.gf = 64
self.df = 64
optimizer = Adam(0.0002, 0.5)
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='mse',
optimizer=optimizer,
metrics=['accuracy'])
#-------------------------
# Construct Computational
# Graph of Generator
#-------------------------
# Build the generator
self.generator = self.build_generator()
# Input images and their conditioning images
img_A = Input(shape=self.img_shape)
img_B = Input(shape=self.img_shape)
# By conditioning on B generate a fake version of A
fake_A = self.generator(img_B)
# For the combined model we will only train the generator
#self.discriminator.trainable = False
# Discriminators determines validity of translated images / condition pairs
valid = self.discriminator([fake_A, img_B])
self.combined = Model(inputs=[img_A, img_B], outputs=[valid, fake_A])
self.combined.compile(loss=['mse', smoothL1],
loss_weights=[1, 100],
optimizer=optimizer)
################# Perceptual loss and L1 loss ######################
self.vggmodel=VGG19(weights="vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5",include_top=False)
#print(vggmodel.get_layer('block4_pool'))
#print(self.combined.output[1])
#print(vggmodel.get_layer('block4_pool').output)
#lossOut = vggmodel(inputs=self.combined.output[1], output = vggmodel.get_layer('block4_pool').output)
lossOut = self.vggmodel(inputs=self.combined.output[1])
self.vggmodel.trainable = False
for l in self.vggmodel.layers:
l.trainable = False
self.vgg_combined = Model(inputs=self.combined.input, outputs=lossOut)
self.vgg_combined.compile(loss='mse',optimizer='adam')
valid.trainable = False
#self.combined.load_weights("Weights/199.h5")
def build_generator(self):
layer_per_block = [4, 4, 4, 4, 4, 15, 4, 4, 4, 4, 4]
tiramisu = Tiramisu(layer_per_block)
tiramisu.summary()
#d0 = Input(shape=self.img_shape)
return tiramisu
def build_discriminator(self):
def d_layer(layer_input, filters, f_size=4, bn=True):
"""Discriminator layer"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if bn:
d = BatchNormalization(momentum=0.8)(d)
return d
img_A = Input(shape=self.img_shape)
img_B = Input(shape=self.img_shape)
# Concatenate image and conditioning image by channels to produce input
combined_imgs = Concatenate(axis=-1)([img_A, img_B])
d1 = d_layer(combined_imgs, self.df, bn=False)
d2 = d_layer(d1, self.df*2)
d3 = d_layer(d2, self.df*4)
d4 = d_layer(d3, self.df*8)
validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)
return Model([img_A, img_B], validity)
def train(self, epochs, batch_size=1, sample_interval=50):
start_time = datetime.datetime.now()
# Adversarial loss ground truths
valid = np.ones((batch_size,) + self.disc_patch)
fake = np.zeros((batch_size,) + self.disc_patch)
for epoch in range(epochs):
for batch_i, (imgs_A, imgs_B) in enumerate(self.data_loader.load_batch(batch_size)):
# ---------------------
# Train Discriminator
# ---------------------
# Condition on B and generate a translated version
fake_A = self.generator.predict(imgs_B)
# Train the discriminators (original images = real / generated = Fake)
d_loss_real = self.discriminator.train_on_batch([imgs_A, imgs_B], valid)
d_loss_fake = self.discriminator.train_on_batch([fake_A, imgs_B], fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# -----------------
# Train Generator
# -----------------
# Train the generators
g_loss=self.combined.train_on_batch([imgs_A, imgs_B], [valid, imgs_A])
full_vgg = self.vggmodel.predict(imgs_A)
vgg_loss = self.vgg_combined.train_on_batch([imgs_A, imgs_B], full_vgg)
elapsed_time = datetime.datetime.now() - start_time
# Plot the progress
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %f] time: %s" % (epoch, epochs,
batch_i, self.data_loader.n_batches,
d_loss[0], 100*d_loss[1],
g_loss[0],
elapsed_time))
# If at save interval => save generated image samples
if batch_i % sample_interval == 0:
self.sample_images(epoch, batch_i)
self.combined.save_weights("Weights/"+str(epoch)+".h5")
def img_to_frame(self,imgA,imgB,fakeA):
no_images = imgA.shape[0]
img_height = imgA.shape[1]
img_width = imgA.shape[2]
pad = 20
title_pad=20
pad_top = pad+title_pad
frame=np.zeros((no_images*(img_height+pad_top),no_images*(img_width+pad),3))
count=0
gen_imgs = np.concatenate([imgB, fakeA, imgA])
gen_imgs = 0.5 * gen_imgs + 0.5
titles = ['Condition', 'Generated', 'Original']
for r in range(no_images):
for c in range(no_images):
im = gen_imgs[count]
count=count+1
y0 = r*(img_height+pad_top) + pad//2
x0 = c*(img_width+pad) + pad//2
# print(frame[y0:y0+img_height,x0:x0+img_width,:].shape)
frame[y0:y0+img_height,x0:x0+img_width,:] = im*255
frame = cv2.putText(frame, titles[r], (x0, y0-title_pad//4), cv2.FONT_HERSHEY_COMPLEX, .5, (255,255,255))
return frame
def sample_images(self, epoch, batch_i):
os.makedirs('images/%s' % self.dataset_name, exist_ok=True)
os.makedirs('images/dehazed', exist_ok=True)
os.makedirs('images/haze', exist_ok=True)
os.makedirs('images/original',exist_ok=True)
r, c = 3, 3
imgs_A, imgs_B, or_A, or_B = self.data_loader.load_data(batch_size=3, is_testing=True)
fake_A = self.generator.predict(imgs_B)
cv2.imwrite("images/dehazed"+"/"+"Img:"+str(epoch)+"_"+str(batch_i)+".jpg",(fake_A[0]*0.5+0.5)*255)
cv2.imwrite("images/haze"+"/"+"Img:"+str(epoch)+"_"+str(batch_i)+".jpg",(or_B[0]*0.5+0.5)*255)
cv2.imwrite("images/original"+"/"+"Img:"+str(epoch)+"_"+str(batch_i)+".jpg",(or_A[0]*0.5+0.5)*255)
frame=self.img_to_frame(imgs_A,imgs_B,fake_A)
cv2.imwrite("images/"+self.dataset_name+"/"+"Img:"+str(epoch)+"_"+str(batch_i)+".png",frame)
#imsave("images/"+self.dataset_name+"/"+"Scipy:Img:"+str(epoch)+"_"+str(batch_i)+".png",frame )
if __name__ == '__main__':
gan = Pix2Pix()
gan.train(epochs=200, batch_size=1, sample_interval=200)
| [
"tensorflow.image.resize_images",
"tensorflow.python.keras.backend.sum",
"tensorflow.python.keras.layers.Input",
"tensorflow.python.keras.models.Model",
"tensorflow.squared_difference",
"tensorflow.python.keras.backend.eval",
"tensorflow.python.keras.optimizers.Adam",
"tensorflow.python.keras.backend.... | [((1276, 1315), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (1298, 1315), True, 'import tensorflow as tf\n'), ((1548, 1580), 'tensorflow.python.keras.applications.vgg19.preprocess_input', 'preprocess_input', (['resized_images'], {}), '(resized_images)\n', (1564, 1580), False, 'from tensorflow.python.keras.applications.vgg19 import preprocess_input\n'), ((1652, 1674), 'tensorflow.python.keras.backend.abs', 'k.abs', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (1657, 1674), True, 'from tensorflow.python.keras import backend as k\n'), ((1684, 1762), 'tensorflow.python.keras.backend.switch', 'k.switch', (['(x < HUBER_DELTA)', '(0.5 * x ** 2)', '(HUBER_DELTA * (x - 0.5 * HUBER_DELTA))'], {}), '(x < HUBER_DELTA, 0.5 * x ** 2, HUBER_DELTA * (x - 0.5 * HUBER_DELTA))\n', (1692, 1762), True, 'from tensorflow.python.keras import backend as k\n'), ((1774, 1782), 'tensorflow.python.keras.backend.sum', 'k.sum', (['x'], {}), '(x)\n', (1779, 1782), True, 'from tensorflow.python.keras import backend as k\n'), ((1951, 1982), 'tensorflow.squared_difference', 'tf.squared_difference', (['fx1', 'fx2'], {}), '(fx1, fx2)\n', (1972, 1982), True, 'import tensorflow as tf\n'), ((2029, 2042), 'tensorflow.python.keras.backend.eval', 'k.eval', (['loss1'], {}), '(loss1)\n', (2035, 2042), True, 'from tensorflow.python.keras import backend as k\n'), ((2043, 2056), 'tensorflow.python.keras.backend.eval', 'k.eval', (['loss2'], {}), '(loss2)\n', (2049, 2056), True, 'from tensorflow.python.keras import backend as k\n'), ((2445, 2532), 'dataloader_new.DataLoader', 'DataLoader', ([], {'dataset_name': 'self.dataset_name', 'img_res': '(self.img_rows, self.img_cols)'}), '(dataset_name=self.dataset_name, img_res=(self.img_rows, self.\n img_cols))\n', (2455, 2532), False, 'from dataloader_new import DataLoader\n'), ((2825, 2842), 'tensorflow.python.keras.optimizers.Adam', 'Adam', (['(0.0002)', '(0.5)'], {}), '(0.0002, 0.5)\n', (2829, 2842), False, 'from tensorflow.python.keras.optimizers import Adam\n'), ((3345, 3372), 'tensorflow.python.keras.layers.Input', 'Input', ([], {'shape': 'self.img_shape'}), '(shape=self.img_shape)\n', (3350, 3372), False, 'from tensorflow.python.keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate\n'), ((3389, 3416), 'tensorflow.python.keras.layers.Input', 'Input', ([], {'shape': 'self.img_shape'}), '(shape=self.img_shape)\n', (3394, 3416), False, 'from tensorflow.python.keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate\n'), ((3801, 3854), 'tensorflow.python.keras.models.Model', 'Model', ([], {'inputs': '[img_A, img_B]', 'outputs': '[valid, fake_A]'}), '(inputs=[img_A, img_B], outputs=[valid, fake_A])\n', (3806, 3854), False, 'from tensorflow.python.keras.models import Sequential, Model\n'), ((4121, 4210), 'tensorflow.python.keras.applications.VGG19', 'VGG19', ([], {'weights': '"""vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5"""', 'include_top': '(False)'}), "(weights='vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',\n include_top=False)\n", (4126, 4210), False, 'from tensorflow.python.keras.applications import VGG19\n'), ((4669, 4719), 'tensorflow.python.keras.models.Model', 'Model', ([], {'inputs': 'self.combined.input', 'outputs': 'lossOut'}), '(inputs=self.combined.input, outputs=lossOut)\n', (4674, 4719), False, 'from tensorflow.python.keras.models import Sequential, Model\n'), ((4994, 5019), 'tiramisu.Tiramisu', 'Tiramisu', (['layer_per_block'], {}), '(layer_per_block)\n', (5002, 5019), False, 'from tiramisu import Tiramisu\n'), ((5498, 5525), 'tensorflow.python.keras.layers.Input', 'Input', ([], {'shape': 'self.img_shape'}), '(shape=self.img_shape)\n', (5503, 5525), False, 'from tensorflow.python.keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate\n'), ((5542, 5569), 'tensorflow.python.keras.layers.Input', 'Input', ([], {'shape': 'self.img_shape'}), '(shape=self.img_shape)\n', (5547, 5569), False, 'from tensorflow.python.keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate\n'), ((5968, 5999), 'tensorflow.python.keras.models.Model', 'Model', (['[img_A, img_B]', 'validity'], {}), '([img_A, img_B], validity)\n', (5973, 5999), False, 'from tensorflow.python.keras.models import Sequential, Model\n'), ((6086, 6109), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6107, 6109), False, 'import datetime\n'), ((6168, 6208), 'numpy.ones', 'np.ones', (['((batch_size,) + self.disc_patch)'], {}), '((batch_size,) + self.disc_patch)\n', (6175, 6208), True, 'import numpy as np\n'), ((6224, 6265), 'numpy.zeros', 'np.zeros', (['((batch_size,) + self.disc_patch)'], {}), '((batch_size,) + self.disc_patch)\n', (6232, 6265), True, 'import numpy as np\n'), ((8475, 8560), 'numpy.zeros', 'np.zeros', (['(no_images * (img_height + pad_top), no_images * (img_width + pad), 3)'], {}), '((no_images * (img_height + pad_top), no_images * (img_width + pad), 3)\n )\n', (8483, 8560), True, 'import numpy as np\n'), ((8581, 8616), 'numpy.concatenate', 'np.concatenate', (['[imgB, fakeA, imgA]'], {}), '([imgB, fakeA, imgA])\n', (8595, 8616), True, 'import numpy as np\n'), ((9295, 9354), 'os.makedirs', 'os.makedirs', (["('images/%s' % self.dataset_name)"], {'exist_ok': '(True)'}), "('images/%s' % self.dataset_name, exist_ok=True)\n", (9306, 9354), False, 'import os\n'), ((9363, 9407), 'os.makedirs', 'os.makedirs', (['"""images/dehazed"""'], {'exist_ok': '(True)'}), "('images/dehazed', exist_ok=True)\n", (9374, 9407), False, 'import os\n'), ((9416, 9457), 'os.makedirs', 'os.makedirs', (['"""images/haze"""'], {'exist_ok': '(True)'}), "('images/haze', exist_ok=True)\n", (9427, 9457), False, 'import os\n'), ((9466, 9511), 'os.makedirs', 'os.makedirs', (['"""images/original"""'], {'exist_ok': '(True)'}), "('images/original', exist_ok=True)\n", (9477, 9511), False, 'import os\n'), ((5675, 5695), 'tensorflow.python.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (5686, 5695), False, 'from tensorflow.python.keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate\n'), ((5896, 5947), 'tensorflow.python.keras.layers.convolutional.Conv2D', 'Conv2D', (['(1)'], {'kernel_size': '(4)', 'strides': '(1)', 'padding': '"""same"""'}), "(1, kernel_size=4, strides=1, padding='same')\n", (5902, 5947), False, 'from tensorflow.python.keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((5269, 5331), 'tensorflow.python.keras.layers.convolutional.Conv2D', 'Conv2D', (['filters'], {'kernel_size': 'f_size', 'strides': '(2)', 'padding': '"""same"""'}), "(filters, kernel_size=f_size, strides=2, padding='same')\n", (5275, 5331), False, 'from tensorflow.python.keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((5361, 5381), 'tensorflow.python.keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (5370, 5381), False, 'from tensorflow.python.keras.layers.advanced_activations import LeakyReLU\n'), ((9119, 9228), 'cv2.putText', 'cv2.putText', (['frame', 'titles[r]', '(x0, y0 - title_pad // 4)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.5)', '(255, 255, 255)'], {}), '(frame, titles[r], (x0, y0 - title_pad // 4), cv2.\n FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255))\n', (9130, 9228), False, 'import cv2\n'), ((5424, 5456), 'tensorflow.python.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (5442, 5456), False, 'from tensorflow.python.keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((6940, 6972), 'numpy.add', 'np.add', (['d_loss_real', 'd_loss_fake'], {}), '(d_loss_real, d_loss_fake)\n', (6946, 6972), True, 'import numpy as np\n'), ((7420, 7443), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7441, 7443), False, 'import datetime\n')] |
# Copyright (c) IBM Corp. 2018. All Rights Reserved.
# Project name: Constrained Exploration and Recovery from Experience Shaping
# This project is licensed under the MIT License, see LICENSE
import numpy as np
class QPSolver(object):
'''
A base class to interface with QP solvers
'''
def __init__(self, n_var=None, verbose=False):
self.n_var = n_var
self.verbose = verbose
self.reset()
def reset(self, do_reset_obj=True, do_reset_eq=True, do_reset_ineq=True):
if do_reset_obj:
self.reset_obj()
if do_reset_eq:
self.reset_eq()
if do_reset_ineq:
self.reset_ineq()
def update(self):
self.build_obj()
self.build_eq()
self.build_ineq()
self.update_solver_specific()
def reset_eq(self):
self.eq_mat_list = []
self.eq_vec_list = []
self.eq_mat = None
self.eq_vec = None
self.n_eq = 0
self.reset_eq_solver_specific()
def reset_ineq(self):
self.ineq_mat_list = []
self.ineq_vec_list = []
self.ineq_mat = None
self.ineq_vec = None
self.n_ineq = 0
self.reset_ineq_solver_specific()
def reset_obj(self):
self.obj_mat_list = []
self.obj_vec_list = []
self.obj_mat = None
self.obj_vec = None
self.n_obj = 0
self.reset_obj_solver_specific()
def check_mat_vec(self, mat, vec):
'''
Ensure that mat and vec are numpy arrays and of appropriate dimensions
'''
mat = np.array(mat)
vec = np.array(vec)
if self.n_var is None:
self.n_var = mat.shape[1]
else:
assert mat.shape[1] == self.n_var, 'Invalid constraint matrix size {0} for {1} variables'.format(mat.shape, self.n_var)
assert mat.ndim == 2, 'Invalid constraint matrix dimensions: expected 2, got {0}'.format(mat.ndim)
assert vec.ndim == 2, 'Invalid constraint vector dimensions: expected 2, got {0}'.format(vec.ndim)
assert mat.shape[0] == vec.shape[0], 'Inconsistent constraint matrix and vector sizes'
assert vec.shape[1] == 1, 'Invalid constraint vector size {0}, should have one column'.format(mat.shape)
return mat, vec
def add_obj(self, mat, vec, build=False):
mat, vec = self.check_mat_vec(mat, vec)
assert mat.shape[0] == mat.shape[1], 'Invalid objective matrix shape {0}, should be square'.format(mat.shape)
self.obj_mat_list.append(mat)
self.obj_vec_list.append(vec)
if build:
self.build_obj()
def build_obj(self):
self.n_obj = len(self.obj_mat_list)
assert self.n_obj > 0
self.obj_mat = sum(self.obj_mat_list)
self.obj_vec = sum(self.obj_vec_list)
self.build_obj_solver_specific()
def add_eq(self, mat, vec, build=False):
mat, vec = self.check_mat_vec(mat, vec)
self.eq_mat_list.append(mat)
self.eq_vec_list.append(vec)
if build:
self.build_eq()
def build_eq(self):
if len(self.eq_mat_list) > 0:
self.eq_mat = np.concatenate(self.eq_mat_list, axis=0)
self.eq_vec = np.concatenate(self.eq_vec_list, axis=0)
self.n_eq = self.eq_mat.shape[0]
else:
self.eq_mat = None
self.eq_vec = None
self.n_eq = 0
self.build_eq_solver_specific()
def add_ineq(self, mat, vec, build=False):
if (mat is None) or (vec is None):
assert (mat is None) and (vec is None), 'Constraint incomplete: mat={0}, vec={1}'.format(mat, vec)
return
mat, vec = self.check_mat_vec(mat, vec)
n_ineq_loc = mat.shape[0]
if n_ineq_loc > 0:
self.ineq_mat_list.append(mat)
self.ineq_vec_list.append(vec)
if build:
self.build_ineq()
def build_ineq(self):
if len(self.ineq_mat_list) > 0:
self.ineq_mat = np.concatenate(self.ineq_mat_list, axis=0)
self.ineq_vec = np.concatenate(self.ineq_vec_list, axis=0)
self.n_ineq = self.ineq_mat.shape[0]
else:
self.ineq_mat = None
self.ineq_vec = None
self.n_ineq = 0
self.build_ineq_solver_specific()
def reset_obj_solver_specific(self):
pass
def reset_eq_solver_specific(self):
pass
def reset_ineq_solver_specific(self):
pass
def build_obj_solver_specific(self):
pass
def build_eq_solver_specific(self):
pass
def build_ineq_solver_specific(self):
pass
def update_solver_specific(self):
pass
def solve(self):
raise NotImplementedError()
| [
"numpy.array",
"numpy.concatenate"
] | [((1588, 1601), 'numpy.array', 'np.array', (['mat'], {}), '(mat)\n', (1596, 1601), True, 'import numpy as np\n'), ((1616, 1629), 'numpy.array', 'np.array', (['vec'], {}), '(vec)\n', (1624, 1629), True, 'import numpy as np\n'), ((3163, 3203), 'numpy.concatenate', 'np.concatenate', (['self.eq_mat_list'], {'axis': '(0)'}), '(self.eq_mat_list, axis=0)\n', (3177, 3203), True, 'import numpy as np\n'), ((3230, 3270), 'numpy.concatenate', 'np.concatenate', (['self.eq_vec_list'], {'axis': '(0)'}), '(self.eq_vec_list, axis=0)\n', (3244, 3270), True, 'import numpy as np\n'), ((4025, 4067), 'numpy.concatenate', 'np.concatenate', (['self.ineq_mat_list'], {'axis': '(0)'}), '(self.ineq_mat_list, axis=0)\n', (4039, 4067), True, 'import numpy as np\n'), ((4096, 4138), 'numpy.concatenate', 'np.concatenate', (['self.ineq_vec_list'], {'axis': '(0)'}), '(self.ineq_vec_list, axis=0)\n', (4110, 4138), True, 'import numpy as np\n')] |
from numpy import cos, cosh, sin, sinh
import numpy as np
import matplotlib.pyplot as plt
import time
'''
TASK 15
'''
r1 = 1.87527632324985
r2 = 4.69409122046058
r3 = 7.855
q1 = 1.77748462
q2 = -0.84753308
q3 = -0.01671216
#r1 and q1 derived in previous task
def phi(x, r):
return sin(r*x) + sinh(r*x) + ((cos(r) + cosh(r))/(sin(r) + sinh(r)))*(cos(r*x)-cosh(r*x))
def psi(s, qi, qj, qk, ri, rj, rk):
return qi*phi(s, ri) + qj*phi(s,rj) + qk*phi(s,rk)
def trans_trapezoid(b=1, n=100):
a=0
ds = (b-a)/n
const = ds/2
total = sin(psi(a,q1, q2, q3, r1, r2, r3)) + sin(psi(b,q1, q2, q3, r1, r2, r3))
for i in range(1, n):
total += 2*sin(psi(a+i*ds, q1, q2, q3, r1, r2, r3))
return const*total
def long_trapezoid(b=1, n=100):
a=0
ds = (b-a)/n
const = ds/2
total = cos(psi(a, q1, q2, q3, r1, r2, r3)) + cos(psi(b, q1, q2, q3, r1, r2, r3))
for i in range(1, n):
total += 2*cos(psi(a+i*ds, q1, q2, q3, r1, r2, r3))
return const*total
xs = np.arange(0, 1.01, 0.01) #101 data points
trans_deflection = []
long_deflection = []
for x in xs:
trans_deflection.append(trans_trapezoid(x))
long_deflection.append(long_trapezoid(x) - x)
#plot trans deflection
plt.plot(xs, np.array(trans_deflection))
plt.xlabel("Position along the beam, x [m]")
plt.ylabel("Transverse deflection of the beam, w(x) [m]")
plt.grid(color='k', linestyle='--', linewidth=0.5)
plt.title("Transverse deflection at various positions along the beam")
plt.show()
#plot long deflection
plt.plot(xs, np.array(long_deflection))
plt.xlabel("Position along the beam, x [m]")
plt.ylabel("Longitudinal deflection of the beam, u(x) [m]")
plt.grid(color='k', linestyle='--', linewidth=0.5)
plt.title("Longitudinal deflection at various positions along the beam")
plt.show()
#plot overall deflection
ax = plt.figure().add_subplot(projection='3d')
ax.plot(xs, long_deflection, trans_deflection, label="overall deflection of the beam")
ax.set_xlabel("Position along the beam, x [m]")
ax.set_zlabel("w(x) [m]")
ax.set_ylabel("u(x) [m]")
ax.legend()
plt.show() | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.sinh",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.cosh",
"numpy.sin",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((1015, 1039), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(0.01)'], {}), '(0, 1.01, 0.01)\n', (1024, 1039), True, 'import numpy as np\n'), ((1279, 1323), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position along the beam, x [m]"""'], {}), "('Position along the beam, x [m]')\n", (1289, 1323), True, 'import matplotlib.pyplot as plt\n'), ((1324, 1381), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Transverse deflection of the beam, w(x) [m]"""'], {}), "('Transverse deflection of the beam, w(x) [m]')\n", (1334, 1381), True, 'import matplotlib.pyplot as plt\n'), ((1382, 1432), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'color': '"""k"""', 'linestyle': '"""--"""', 'linewidth': '(0.5)'}), "(color='k', linestyle='--', linewidth=0.5)\n", (1390, 1432), True, 'import matplotlib.pyplot as plt\n'), ((1433, 1503), 'matplotlib.pyplot.title', 'plt.title', (['"""Transverse deflection at various positions along the beam"""'], {}), "('Transverse deflection at various positions along the beam')\n", (1442, 1503), True, 'import matplotlib.pyplot as plt\n'), ((1504, 1514), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1512, 1514), True, 'import matplotlib.pyplot as plt\n'), ((1578, 1622), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position along the beam, x [m]"""'], {}), "('Position along the beam, x [m]')\n", (1588, 1622), True, 'import matplotlib.pyplot as plt\n'), ((1623, 1682), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Longitudinal deflection of the beam, u(x) [m]"""'], {}), "('Longitudinal deflection of the beam, u(x) [m]')\n", (1633, 1682), True, 'import matplotlib.pyplot as plt\n'), ((1683, 1733), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'color': '"""k"""', 'linestyle': '"""--"""', 'linewidth': '(0.5)'}), "(color='k', linestyle='--', linewidth=0.5)\n", (1691, 1733), True, 'import matplotlib.pyplot as plt\n'), ((1734, 1806), 'matplotlib.pyplot.title', 'plt.title', (['"""Longitudinal deflection at various positions along the beam"""'], {}), "('Longitudinal deflection at various positions along the beam')\n", (1743, 1806), True, 'import matplotlib.pyplot as plt\n'), ((1807, 1817), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1815, 1817), True, 'import matplotlib.pyplot as plt\n'), ((2090, 2100), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2098, 2100), True, 'import matplotlib.pyplot as plt\n'), ((1251, 1277), 'numpy.array', 'np.array', (['trans_deflection'], {}), '(trans_deflection)\n', (1259, 1277), True, 'import numpy as np\n'), ((1551, 1576), 'numpy.array', 'np.array', (['long_deflection'], {}), '(long_deflection)\n', (1559, 1576), True, 'import numpy as np\n'), ((1849, 1861), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1859, 1861), True, 'import matplotlib.pyplot as plt\n'), ((288, 298), 'numpy.sin', 'sin', (['(r * x)'], {}), '(r * x)\n', (291, 298), False, 'from numpy import cos, cosh, sin, sinh\n'), ((299, 310), 'numpy.sinh', 'sinh', (['(r * x)'], {}), '(r * x)\n', (303, 310), False, 'from numpy import cos, cosh, sin, sinh\n'), ((352, 362), 'numpy.cos', 'cos', (['(r * x)'], {}), '(r * x)\n', (355, 362), False, 'from numpy import cos, cosh, sin, sinh\n'), ((361, 372), 'numpy.cosh', 'cosh', (['(r * x)'], {}), '(r * x)\n', (365, 372), False, 'from numpy import cos, cosh, sin, sinh\n'), ((313, 319), 'numpy.cos', 'cos', (['r'], {}), '(r)\n', (316, 319), False, 'from numpy import cos, cosh, sin, sinh\n'), ((322, 329), 'numpy.cosh', 'cosh', (['r'], {}), '(r)\n', (326, 329), False, 'from numpy import cos, cosh, sin, sinh\n'), ((332, 338), 'numpy.sin', 'sin', (['r'], {}), '(r)\n', (335, 338), False, 'from numpy import cos, cosh, sin, sinh\n'), ((341, 348), 'numpy.sinh', 'sinh', (['r'], {}), '(r)\n', (345, 348), False, 'from numpy import cos, cosh, sin, sinh\n')] |
import sys
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtWidgets import QMessageBox
import numpy as np
from fotf import *
#gui
from pyGui import fotfviewergui, createnewfotfgui
STATUSBAR_TIME = 5000
#__all__ = ['loadsets','gg1','gg2','gg3']
class FotfViewForm(QMainWindow, fotfviewergui.Ui_MainWindow_fotfviewer):
def __init__(self):
QMainWindow.__init__(self)
fotfviewergui.Ui_MainWindow_fotfviewer.__init__(self)
self.setWindowIcon(QIcon('index.png'))
self.setupUi(self)
# Checks for frequency domain
self.lowerFreq = self.higherFreq = self.freqDataPoints = True
# Checks for time Domain
self._input = self._STOPTIME = self._STARTTIME = self._greaterthan = self._stepok = True
#Personal Edits and Method calls/Subscribed Events
self.foregroundRole()
self.reloadAllFOTransFunc()
self.pushButton_AddFotf.clicked.connect(self.addnewFotf)
self.pushButton_EditFOTF.clicked.connect(self.editFOTF)
self.pushButton_DeleteFOTF.clicked.connect(self.deleteFOTF)
self.pushButtonViewInConsole.clicked.connect(self.ViewInConsole)
self.pushButtonGetOustaloop.clicked.connect(self.OustaloopModel)
self.pushButton_StabilityTest.clicked.connect(self.StabilityTest)
self.pushButton_BodePlot.clicked.connect(self.BodePlot)
self.pushButtonSimulate.clicked.connect(self.Step)
self.comboBoxFOTF.currentIndexChanged.connect(self.ComboBoxFOTFempty)
self.lineEdit_LowerFreq.editingFinished.connect(self._LowerFreq)
self.lineEdit_HigherFreq.editingFinished.connect(self._HigherFreq)
self.lineEdit_FreqDataPoints.editingFinished.connect(self._FreqDataPoints)
self.lineEdit_inputTime.editingFinished.connect(self._isinputok)
self.lineEdit_StartTime.editingFinished.connect(self._isstarttimeok)
self.lineEdit_StepTime.editingFinished.connect(self._issteptimeok)
self.lineEdit_StopTime.editingFinished.connect(self._isstoptimeok)
self.isDialogActive =False
self.show()
def Exit(self):
reply = QMessageBox.question(self, "Exit?", "Would you like to exit?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
sys.exit()
def reloadAllFOTransFunc(self):
g1, g2, g3,g4 = loadsets()
xvalues = list(locals().items())
self.comboBoxFOTF.clear()
for i, j in xvalues:
if isinstance(j, FOTransFunc):
self.comboBoxFOTF.addItem(i, j)
#self.comboBoxTimeDomainType.addItems(["Step","Impulse"])
self.comboBoxTimeDomainType.addItems(["Step"])
def addnewFotf(self):
createnew = newfotfgui()
createnew.exec_()
try:
sysname, zero, pole, dt = createnew.lineEditSysName.text(), createnew.lineEdit_ZeroPoly.text(),\
createnew.lineEdit_PolePoly.text(), createnew.lineEdit_DelayText.text()
if (sysname or zero or pole or dt) != "":
self.comboBoxFOTF.addItem(createnew.lineEditSysName.text(),
newfotf(createnew.lineEdit_ZeroPoly.text(),
createnew.lineEdit_PolePoly.text(),
createnew.lineEdit_DelayText.text()))
self.comboBoxFOTF.setCurrentIndex(self.comboBoxFOTF.count()-1)
except:
self.statusbar.showMessage('pyfomcon.addnewFotf: FOTF Addition Failed', STATUSBAR_TIME)
print('\nfofopdtguiclass._addData: FOFOPDT Addition Failed\n')
def editFOTF(self):
createnew = newfotfgui()
createnew.foregroundRole()
createnew.lineEditSysName.setText(self.comboBoxFOTF.currentText())
currentindex = self.comboBoxFOTF.currentIndex()
x = self.comboBoxFOTF.currentData()
num, nnum,den,nden, dt = fotfparam(x)
if num.size > 1 and 1 < nnum.size == num.size:
Zeros= poly2str(num,nnum)
else:
a = num**nnum
Zeros = str(a[0])
if den.size > 1 and 1 < nden.size == den.size:
Poles = poly2str(den,nden)
else:
b = den**nden
Poles = str(b[0])
createnew.lineEdit_ZeroPoly.setText(Zeros),
createnew.lineEdit_PolePoly.setText(Poles),
createnew.lineEdit_DelayText.setText(str(dt))
createnew.exec_()
_sysname = createnew.lineEditSysName.text()
_zero = createnew.lineEdit_ZeroPoly.text()
_pole = createnew.lineEdit_PolePoly.text()
_dt = createnew.lineEdit_DelayText.text()
self.comboBoxFOTF.setCurrentIndex(currentindex)
if _sysname != "":
self.comboBoxFOTF.setCurrentText(_sysname)
if _zero != "" and _pole != "" and _dt != "":
try:
self.comboBoxFOTF.setItemData(currentindex, newfotf(_zero,_pole,float(_dt)))
except:
QMessageBox.question(self, 'Error',
"Input values are not correct.\nEDIT ABORTED!",
QMessageBox.StandardButtons(QMessageBox.Ok))
else:
QMessageBox.question(self, 'Error',
"Input values are not correct.\nEDIT ABORTED!",
QMessageBox.StandardButtons(QMessageBox.Ok))
else:
QMessageBox.question(self, 'Error',
"System Name Empty!.\nEDIT ABORTED!",
QMessageBox.StandardButtons(QMessageBox.Ok))
def deleteFOTF(self):
self.comboBoxFOTF.removeItem(self.comboBoxFOTF.currentIndex())
def ViewInConsole(self):
x = self.comboBoxFOTF.itemData(self.comboBoxFOTF.currentIndex())
sysname = self.comboBoxFOTF.currentText()
if x != None and isinstance(x,FOTransFunc):
self.statusbar.showMessage('View Console for Transfer Function of {}'.format(sysname), STATUSBAR_TIME)
print( sysname + ':')
print(x)
def OustaloopModel(self):
x = self.comboBoxFOTF.itemData(self.comboBoxFOTF.currentIndex())
if x != None and isinstance(x,FOTransFunc):
print(self.comboBoxFOTF.currentText() + '.Oustaloop():')
print(x.oustaloop())
def StabilityTest(self):
x = self.comboBoxFOTF.itemData(self.comboBoxFOTF.currentIndex())
if x != None and isinstance(x, FOTransFunc):
x.isstable(doPlot=True)
def BodePlot(self):
x = self.comboBoxFOTF.itemData(self.comboBoxFOTF.currentIndex())
if isinstance(x,FOTransFunc):
try:
lowExp = int(self.lineEdit_LowerFreq.text())
highExp = int(self.lineEdit_HigherFreq.text())
dataPoints= int(self.lineEdit_FreqDataPoints.text())
x.freqresp(lowExp,highExp,dataPoints)
except:
pass
else:
QMessageBox.question(self, 'Error',"There is no FOTF object in the Combo Box, Use the 'Add' button",
QMessageBox.StandardButtons(QMessageBox.Ok))
def Step(self):
#TODO:check that stop is greater than start
start = float(self.lineEdit_StartTime.text())
stop = float(self.lineEdit_StopTime.text())
step = float(self.lineEdit_StepTime.text())
intnumofsteps = int((stop-start)/step)
t = np.linspace(start,stop,intnumofsteps)
u = np.ones_like(t) * float(self.lineEdit_inputTime.text())
sys = self.comboBoxFOTF.itemData(self.comboBoxFOTF.currentIndex())
if self.comboBoxTimeDomainType.currentText() == "Step":
sys.step(t, u, output= False, plot=True)
elif self.comboBoxTimeDomainType.currentText() == "Impulse":
#TODO: Code for Impulse time domain
pass
def ComboBoxFOTFempty(self):
if self.comboBoxFOTF.count() == 0:
self.pushButtonSimulate.setDisabled(True)
self.pushButtonGetOustaloop.setDisabled(True)
self.pushButton_StabilityTest.setDisabled(True)
self.pushButton_BodePlot.setDisabled(True)
self.pushButtonViewInConsole.setDisabled(True)
self.pushButton_DeleteFOTF.setDisabled(True)
self.pushButton_EditFOTF.setDisabled(True)
else:
self.pushButtonGetOustaloop.setDisabled(False)
self.pushButton_StabilityTest.setDisabled(False)
self.pushButtonViewInConsole.setDisabled(False)
self.pushButton_DeleteFOTF.setDisabled(False)
self.pushButton_EditFOTF.setDisabled(False)
self._FreqCheck()
self._TimeCheck()
def _ShowError(self, message, obj = None, obj2 = None):
try:
if self.isDialogActive == False:
self.isDialogActive = True
if obj != None:
obj.setCursorPosition(0)
obj.setSelection(0, len(obj.text()))
self.statusbar.showMessage('Error: '+message, STATUSBAR_TIME)
raise ValueError(QMessageBox.question(self, 'Error', message, QMessageBox.StandardButtons(QMessageBox.Ok)))
except:
self.isDialogActive = False
# FREQUENCY DOMAIN VARIABLES CHECKS
def _LowerFreq(self):
if int(self.lineEdit_LowerFreq.text()) < 0:
self.lowerFreq =True
else:
self.lowerFreq = False
self._ShowError("'freq exponent (-int)' must be a -ve integer", self.lineEdit_LowerFreq)
self._FreqCheck()
def _HigherFreq(self):
if int(self.lineEdit_HigherFreq.text()) > 0:
self.higherFreq = True
else:
self.higherFreq = False
self._ShowError("'freq exponent (int)' must be a +ve integer",self.lineEdit_HigherFreq)
self._FreqCheck()
def _FreqDataPoints(self):
if int(self.lineEdit_FreqDataPoints.text()) >= 500:
self.freqDataPoints = True
else:
self.freqDataPoints = False
self._ShowError("'Data points (int)' must be a +ve integer >= 500", self.lineEdit_FreqDataPoints)
self._FreqCheck()
def _FreqCheck(self):
if self.lowerFreq and self.higherFreq and self.freqDataPoints:
self.pushButton_BodePlot.setEnabled(True)
else:
self.pushButton_BodePlot.setEnabled(False)
#TIME DOMAIN VARIABLES CHECKS
def _issteptimeok(self):
if 0 < float(self.lineEdit_StepTime.text()) <= 0.087:
self._stepok = True
else:
self._stepok = False
self._ShowError('0 < "Step(s)" < 0.087', self.lineEdit_StepTime)
self._TimeCheck()
def _isinputok(self):
if float(self.lineEdit_inputTime.text()) < 0:
self._input = False
self._ShowError('"input" must be > 0',self.lineEdit_inputTime)
else:
self._input = True
self._TimeCheck()
def _isstarttimeok(self):
if float(self.lineEdit_StartTime.text()) < 0:
self._STARTTIME =False
self._ShowError('"Start(s)" must be > 0',self.lineEdit_StartTime)
else:
self._STARTTIME = True
self._TimeCheck()
def _isstoptimeok(self):
if float(self.lineEdit_StopTime.text()) < 0:
self._STOPTIME = False
self._ShowError('"Stop)" must be > 0',self.lineEdit_StopTime)
else:
self._STOPTIME = True
self._TimeCheck()
def _TimeCheck(self):
if float(self.lineEdit_StartTime.text()) < float(self.lineEdit_StopTime.text()):
self._greaterthan =True
else:
self._greaterthan = False
self._ShowError('"Stop(s)" must be > Start(s)')
if self._input and self._STOPTIME and self._STARTTIME and self._greaterthan and self._stepok:
self.pushButtonSimulate.setEnabled(True)
else:
self.pushButtonSimulate.setEnabled(False)
def closeEvent(self,event):
reply = QMessageBox.question(self, "Exit?", "Are you sure you want to exit?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
sys.exit()
else:
event.ignore()
class newfotfgui(QDialog, createnewfotfgui.Ui_dialogCreateNewFOTF):
def __init__(self):
QDialog.__init__(self)
createnewfotfgui.Ui_dialogCreateNewFOTF.__init__(self)
self.setWindowIcon(QIcon('index.png'))
self.setupUi(self)
self.lineEditSysName.textChanged.connect(self._checkSysName)
self.lineEdit_ZeroPoly.textChanged.connect(self._zeroPolyChanged)
self.lineEdit_PolePoly.textChanged.connect(self._polePolyChanged)
self.lineEdit_DelayText.textChanged.connect(self._delayChanged)
self.pushButtonOK.clicked.connect(self.close)
self.pushButtonCancel.clicked.connect(self.close)
self.sysnamecheck = self.zeroCheck = self.poleCheck = False
self.delayCheck = True #in gui intitla delay is always 0
self.show()
#region Button OK Check
def _checkOkButton(self):
if self.sysnamecheck and self.zeroCheck and self.delayCheck and self.poleCheck:
self.pushButtonOK.setEnabled(True)
else:
self.pushButtonOK.setEnabled(False)
#endregion
#region lineEdit Values Check
def _checkSysName(self):
try:
self.sysnamecheck = len(self.lineEditSysName.text().strip(" ")) >= 1
self._checkOkButton()
except:
self.sysnamecheck = False
self._checkOkButton()
def _zeroPolyChanged(self):
try:
self.zeroCheck = str2poly(self.lineEdit_ZeroPoly.text())
self._checkOkButton()
except:
self.zeroCheck = False
self._checkOkButton()
def _delayChanged(self):
try:
self.delayCheck = float(self.lineEdit_DelayText.text()) >= 0
self._checkOkButton()
except:
self.delayCheck = False
self._checkOkButton()
def _polePolyChanged(self):
try:
self.poleCheck = str2poly(self.lineEdit_ZeroPoly.text())
self._checkOkButton()
except:
self.poleCheck = False
self._checkOkButton()
#endregion
def closeEvent(self, event):
sender = self.sender().text()
close = QMessageBox.question(self, "{0}?".format(sender),
"Are you sure you would like to '{0}' this form?".format(sender),
QMessageBox.Yes | QMessageBox.No)
if close == QMessageBox.Yes:
if sender == "OK":
pass
else:
self.lineEditSysName.clear()
self.lineEdit_ZeroPoly.clear()
self.lineEdit_PolePoly.clear()
self.lineEdit_DelayText.clear()
event.accept()
else:
event.ignore()
def loadsets():
return gg1(),gg2(), gg3(),gg4()
def gg1():
return newfotf(1., '14994s^{1.31}+6009.5s^{0.97}+1.69', 0)
def gg2():
return newfotf(1., '0.8s^{2.2}+0.5s^{0.9}+1', 0)
def gg3():
return newfotf('-2s^{0.63}+4', '2s^{3.501}+3.8s^{2.42}+2.6s^{1.798}+2.5s^{1.31}+1.5', 0)
def gg4():
return newfotf('s+1','s^2.5+0.5s^1.5+100',0)
if __name__ == "__main__":
app = QApplication(sys.argv)
fomcon = FotfViewForm()
app.exec_() | [
"numpy.ones_like",
"pyGui.fotfviewergui.Ui_MainWindow_fotfviewer.__init__",
"PyQt5.QtWidgets.QMessageBox.question",
"numpy.linspace",
"PyQt5.QtWidgets.QMessageBox.StandardButtons",
"pyGui.createnewfotfgui.Ui_dialogCreateNewFOTF.__init__",
"sys.exit",
"sys.step"
] | [((408, 461), 'pyGui.fotfviewergui.Ui_MainWindow_fotfviewer.__init__', 'fotfviewergui.Ui_MainWindow_fotfviewer.__init__', (['self'], {}), '(self)\n', (455, 461), False, 'from pyGui import fotfviewergui, createnewfotfgui\n'), ((2143, 2260), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', (['self', '"""Exit?"""', '"""Would you like to exit?"""', '(QMessageBox.Yes | QMessageBox.No)', 'QMessageBox.No'], {}), "(self, 'Exit?', 'Would you like to exit?', QMessageBox.\n Yes | QMessageBox.No, QMessageBox.No)\n", (2163, 2260), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((7595, 7634), 'numpy.linspace', 'np.linspace', (['start', 'stop', 'intnumofsteps'], {}), '(start, stop, intnumofsteps)\n', (7606, 7634), True, 'import numpy as np\n'), ((12233, 12357), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', (['self', '"""Exit?"""', '"""Are you sure you want to exit?"""', '(QMessageBox.Yes | QMessageBox.No)', 'QMessageBox.No'], {}), "(self, 'Exit?', 'Are you sure you want to exit?', \n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n", (12253, 12357), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((12613, 12667), 'pyGui.createnewfotfgui.Ui_dialogCreateNewFOTF.__init__', 'createnewfotfgui.Ui_dialogCreateNewFOTF.__init__', (['self'], {}), '(self)\n', (12661, 12667), False, 'from pyGui import fotfviewergui, createnewfotfgui\n'), ((2305, 2315), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2313, 2315), False, 'import sys\n'), ((7645, 7660), 'numpy.ones_like', 'np.ones_like', (['t'], {}), '(t)\n', (7657, 7660), True, 'import numpy as np\n'), ((7854, 7893), 'sys.step', 'sys.step', (['t', 'u'], {'output': '(False)', 'plot': '(True)'}), '(t, u, output=False, plot=True)\n', (7862, 7893), False, 'import sys\n'), ((12429, 12439), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12437, 12439), False, 'import sys\n'), ((5690, 5733), 'PyQt5.QtWidgets.QMessageBox.StandardButtons', 'QMessageBox.StandardButtons', (['QMessageBox.Ok'], {}), '(QMessageBox.Ok)\n', (5717, 5733), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((7259, 7302), 'PyQt5.QtWidgets.QMessageBox.StandardButtons', 'QMessageBox.StandardButtons', (['QMessageBox.Ok'], {}), '(QMessageBox.Ok)\n', (7286, 7302), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((5479, 5522), 'PyQt5.QtWidgets.QMessageBox.StandardButtons', 'QMessageBox.StandardButtons', (['QMessageBox.Ok'], {}), '(QMessageBox.Ok)\n', (5506, 5522), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((9326, 9369), 'PyQt5.QtWidgets.QMessageBox.StandardButtons', 'QMessageBox.StandardButtons', (['QMessageBox.Ok'], {}), '(QMessageBox.Ok)\n', (9353, 9369), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((5242, 5285), 'PyQt5.QtWidgets.QMessageBox.StandardButtons', 'QMessageBox.StandardButtons', (['QMessageBox.Ok'], {}), '(QMessageBox.Ok)\n', (5269, 5285), False, 'from PyQt5.QtWidgets import QMessageBox\n')] |
import numpy as np
import tensorflow as tf
from .. import activations
from .base import BaseModel
from ..history import History
from ..utils import linear, conv2d
class DQNAgent(BaseModel):
@property
def name(self):
return 'DQN'
def __init__(self, args, env, min_reward=-1.0, max_reward=1.0, sess=None):
args['memory_size'] = 30 * args.scale # 300,000
args['observation_space'] = env.observation_space
self.double_q = False
self.dueling = False
self.history = History(args)
super(DQNAgent, self).__init__(args, env, min_reward, max_reward, sess)
def greedy(self, s_t):
return self.q_action.eval({self.s_t: [s_t]})[0]
def get_q_update(self, s_t, action, reward, s_t_plus_1, terminal, op_list, return_q2_max=False):
if self.double_q:
# Double Q-learning
pred_action = self.q_action.eval({self.s_t: s_t_plus_1})
q_t_plus_1_with_pred_action = self.target_q_with_idx.eval({
self.target_s_t: s_t_plus_1,
self.target_q_idx: [[idx, pred_a] for idx, pred_a in enumerate(pred_action)]
})
max_q_t_plus_1 = (1. - terminal) * self.discount * q_t_plus_1_with_pred_action
target_q_t = max_q_t_plus_1 + reward
else:
q_t_plus_1 = self.target_q.eval({self.target_s_t: s_t_plus_1})
terminal = np.array(terminal) + 0.
max_q_t_plus_1 = (1. - terminal) * self.discount * np.max(q_t_plus_1, axis=1)
target_q_t = max_q_t_plus_1 + reward
result = self.sess.run(op_list, {
self.target_q_t: target_q_t,
self.action: action,
self.s_t: s_t,
self.learning_rate_step: self.step,
})
if return_q2_max:
result += [max_q_t_plus_1]
return result
def init_history(self, screen):
for _ in range(self.history_length):
self.history.add(screen)
def update_history(self, screen):
self.history.add(screen)
def get_state(self, screen):
return self.history.get()
def _build(self, args):
# initializer = tf.contrib.layers.xavier_initializer()
initializer = tf.truncated_normal_initializer(0, 0.02)
activation_fn = activations.get(args.activation)
# training network
with tf.variable_scope('prediction'):
if self.data_format == 'NHWC':
self.s_t = tf.placeholder('float32', [None, self.screen_height, self.screen_width, self.num_channels,
self.history_length], name='s_t')
inpt = tf.reshape(self.s_t,
[-1, self.screen_height, self.screen_width, self.history_length * self.num_channels])
else:
self.s_t = tf.placeholder('float32', [None, self.history_length, self.num_channels, self.screen_height,
self.screen_width], name='s_t')
inpt = tf.reshape(self.s_t,
[-1, self.history_length * self.num_channels, self.screen_height, self.screen_width])
layer_count = -1
for output_dim, kernel, strides in zip(args.num_conv_units, args.kernel_size, args.kernel_strides):
layer_count += 1
scope = 'l' + str(layer_count)
inpt, self.w[scope + '_w'], self.w[scope + '_b'] = conv2d(inpt, output_dim, kernel,
strides, initializer,
activation_fn, self.data_format,
scope=scope)
shape = inpt.get_shape().as_list()
inpt = tf.reshape(inpt, [-1, np.prod(shape[1:])]) # flatten the layer
if self.dueling:
self.value_hid, self.w['l4_val_w'], self.w['l4_val_b'] = linear(inpt, args.num_hidden[0],
activation_fn=activation_fn,
scope='value_hid')
self.adv_hid, self.w['l4_adv_w'], self.w['l4_adv_b'] = linear(inpt, args.num_hidden[0],
activation_fn=activation_fn,
scope='adv_hid')
self.value, self.w['val_w_out'], self.w['val_w_b'] = linear(self.value_hid, 1,
scope='value_out')
self.advantage, self.w['adv_w_out'], self.w['adv_w_b'] = linear(self.adv_hid,
self.num_actions,
scope='adv_out')
# Average Dueling
self.q = self.value + (self.advantage -
tf.reduce_mean(self.advantage, reduction_indices=1, keep_dims=True))
else:
for output_dim in args.num_hidden:
layer_count += 1
scope = 'l' + str(layer_count)
inpt, self.w[scope + '_w'], self.w[scope + '_b'] = linear(inpt, output_dim,
activation_fn=activation_fn,
scope=scope)
self.q, self.w['q_w'], self.w['q_b'] = linear(inpt, self.num_actions, scope='q')
self.q_action = tf.argmax(self.q, dimension=1)
q_summary = []
avg_q = tf.reduce_mean(self.q, 0)
for idx in range(self.num_actions):
q_summary.append(tf.histogram_summary('q/%s' % idx, avg_q[idx]))
self.q_summary = tf.merge_summary(q_summary, 'q_summary')
# target network
with tf.variable_scope('target'):
if self.data_format == 'NHWC':
self.target_s_t = tf.placeholder('float32',
[None, self.screen_height, self.screen_width, self.num_channels,
self.history_length], name='s_t')
inpt = tf.reshape(self.target_s_t,
[-1, self.screen_height, self.screen_width, self.history_length * self.num_channels])
else:
self.target_s_t = tf.placeholder('float32',
[None, self.history_length, self.num_channels, self.screen_height,
self.screen_width], name='s_t')
inpt = tf.reshape(self.target_s_t,
[-1, self.history_length * self.num_channels, self.screen_height, self.screen_width])
layer_count = -1
for output_dim, kernel, strides in zip(args.num_conv_units, args.kernel_size, args.kernel_strides):
layer_count += 1
scope = 'l' + str(layer_count)
inpt, self.t_w[scope + '_w'], self.t_w[scope + '_b'] = conv2d(inpt, output_dim, kernel,
strides, initializer,
activation_fn, self.data_format,
scope=scope)
shape = inpt.get_shape().as_list()
inpt = tf.reshape(inpt, [-1, np.prod(shape[1:])]) # flatten the layer
if self.dueling:
self.t_value_hid, self.t_w['l4_val_w'], self.t_w['l4_val_b'] = linear(inpt, args.num_hidden[0],
activation_fn=activation_fn,
scope='value_hid')
self.t_adv_hid, self.t_w['l4_adv_w'], self.t_w['l4_adv_b'] = linear(inpt, args.num_hidden[0],
activation_fn=activation_fn,
scope='adv_hid')
self.t_value, self.t_w['val_w_out'], self.t_w['val_w_b'] = linear(self.t_value_hid, 1,
scope='value_out')
self.t_advantage, self.t_w['adv_w_out'], self.t_w['adv_w_b'] = linear(self.t_adv_hid, self.num_actions,
scope='adv_out')
# Average Dueling
self.target_q = self.t_value + (self.t_advantage -
tf.reduce_mean(self.t_advantage, reduction_indices=1, keep_dims=True))
else:
for output_dim in args.num_hidden:
layer_count += 1
scope = 'l' + str(layer_count)
inpt, self.t_w[scope + '_w'], self.t_w[scope + '_b'] = linear(inpt, output_dim,
activation_fn=activation_fn,
scope=scope)
self.target_q, self.t_w['q_w'], self.t_w['q_b'] = linear(inpt, self.num_actions, scope='q')
self.target_q_idx = tf.placeholder('int32', [None, None], 'outputs_idx')
self.target_q_with_idx = tf.gather_nd(self.target_q, self.target_q_idx)
# optimizer
with tf.variable_scope('optimizer'):
self.target_q_t = tf.placeholder('float32', [None], name='target_q_t')
self.action = tf.placeholder('int64', [None], name='action')
action_one_hot = tf.one_hot(self.action, self.num_actions, 1.0, 0.0, name='action_one_hot')
q_acted = tf.reduce_sum(self.q * action_one_hot, reduction_indices=1, name='q_acted')
self.delta = self.target_q_t - q_acted
self.clipped_delta = tf.clip_by_value(self.delta, self.min_delta, self.max_delta, name='clipped_delta')
self.loss = tf.reduce_mean(tf.square(self.clipped_delta), name='loss')
self.learning_rate_step = tf.placeholder('int64', None, name='learning_rate_step')
self.learning_rate_op = tf.maximum(self.learning_rate_minimum,
tf.train.exponential_decay(
self.learning_rate,
self.learning_rate_step,
self.learning_rate_decay_step,
self.learning_rate_decay,
staircase=True))
self.optim = tf.train.RMSPropOptimizer(
self.learning_rate_op, momentum=0.95, epsilon=0.01).minimize(self.loss)
def play(self, n_step=10000, n_episode=100, test_ep=None, render=False):
if test_ep is None:
test_ep = self.ep_end
test_history = History(self.config)
if not render:
self.env.start_monitor()
from tqdm import tqdm
best_reward, best_idx = 0, 0
for idx in range(n_episode):
screen, reward, action, terminal = self.env.new_random_game()
current_reward = 0
for _ in range(test_history.length):
test_history.add(screen)
for _ in tqdm(range(n_step), ncols=70):
# 1. predict
action = self.predict(test_history.get(), test_ep)
# 2. act
screen, reward, terminal = self.env.act(action, is_training=False)
# 3. observe
test_history.add(screen)
current_reward += reward
if terminal:
break
if current_reward > best_reward:
best_reward = current_reward
best_idx = idx
print("=" * 30)
print(" [%d] Best reward : %d" % (best_idx, best_reward))
print("=" * 30)
if not render:
self.env.stop_monitor()
| [
"tensorflow.one_hot",
"numpy.prod",
"tensorflow.variable_scope",
"tensorflow.train.exponential_decay",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.reduce_sum",
"tensorflow.placeholder",
"tensorflow.truncated_normal_initializer",
"numpy.max",
"numpy.array",
"tensorflow.argmax",
"tensorflow... | [((2246, 2286), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', (['(0)', '(0.02)'], {}), '(0, 0.02)\n', (2277, 2286), True, 'import tensorflow as tf\n'), ((2385, 2416), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""prediction"""'], {}), "('prediction')\n", (2402, 2416), True, 'import tensorflow as tf\n'), ((5871, 5901), 'tensorflow.argmax', 'tf.argmax', (['self.q'], {'dimension': '(1)'}), '(self.q, dimension=1)\n', (5880, 5901), True, 'import tensorflow as tf\n'), ((5950, 5975), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.q', '(0)'], {}), '(self.q, 0)\n', (5964, 5975), True, 'import tensorflow as tf\n'), ((6134, 6174), 'tensorflow.merge_summary', 'tf.merge_summary', (['q_summary', '"""q_summary"""'], {}), "(q_summary, 'q_summary')\n", (6150, 6174), True, 'import tensorflow as tf\n'), ((6214, 6241), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""target"""'], {}), "('target')\n", (6231, 6241), True, 'import tensorflow as tf\n'), ((9863, 9915), 'tensorflow.placeholder', 'tf.placeholder', (['"""int32"""', '[None, None]', '"""outputs_idx"""'], {}), "('int32', [None, None], 'outputs_idx')\n", (9877, 9915), True, 'import tensorflow as tf\n'), ((9953, 9999), 'tensorflow.gather_nd', 'tf.gather_nd', (['self.target_q', 'self.target_q_idx'], {}), '(self.target_q, self.target_q_idx)\n', (9965, 9999), True, 'import tensorflow as tf\n'), ((10034, 10064), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""optimizer"""'], {}), "('optimizer')\n", (10051, 10064), True, 'import tensorflow as tf\n'), ((10096, 10148), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', '[None]'], {'name': '"""target_q_t"""'}), "('float32', [None], name='target_q_t')\n", (10110, 10148), True, 'import tensorflow as tf\n'), ((10175, 10221), 'tensorflow.placeholder', 'tf.placeholder', (['"""int64"""', '[None]'], {'name': '"""action"""'}), "('int64', [None], name='action')\n", (10189, 10221), True, 'import tensorflow as tf\n'), ((10252, 10326), 'tensorflow.one_hot', 'tf.one_hot', (['self.action', 'self.num_actions', '(1.0)', '(0.0)'], {'name': '"""action_one_hot"""'}), "(self.action, self.num_actions, 1.0, 0.0, name='action_one_hot')\n", (10262, 10326), True, 'import tensorflow as tf\n'), ((10349, 10424), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.q * action_one_hot)'], {'reduction_indices': '(1)', 'name': '"""q_acted"""'}), "(self.q * action_one_hot, reduction_indices=1, name='q_acted')\n", (10362, 10424), True, 'import tensorflow as tf\n'), ((10510, 10597), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.delta', 'self.min_delta', 'self.max_delta'], {'name': '"""clipped_delta"""'}), "(self.delta, self.min_delta, self.max_delta, name=\n 'clipped_delta')\n", (10526, 10597), True, 'import tensorflow as tf\n'), ((10715, 10771), 'tensorflow.placeholder', 'tf.placeholder', (['"""int64"""', 'None'], {'name': '"""learning_rate_step"""'}), "('int64', None, name='learning_rate_step')\n", (10729, 10771), True, 'import tensorflow as tf\n'), ((1419, 1437), 'numpy.array', 'np.array', (['terminal'], {}), '(terminal)\n', (1427, 1437), True, 'import numpy as np\n'), ((1506, 1532), 'numpy.max', 'np.max', (['q_t_plus_1'], {'axis': '(1)'}), '(q_t_plus_1, axis=1)\n', (1512, 1532), True, 'import numpy as np\n'), ((2488, 2616), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', '[None, self.screen_height, self.screen_width, self.num_channels, self.\n history_length]'], {'name': '"""s_t"""'}), "('float32', [None, self.screen_height, self.screen_width,\n self.num_channels, self.history_length], name='s_t')\n", (2502, 2616), True, 'import tensorflow as tf\n'), ((2690, 2801), 'tensorflow.reshape', 'tf.reshape', (['self.s_t', '[-1, self.screen_height, self.screen_width, self.history_length * self.\n num_channels]'], {}), '(self.s_t, [-1, self.screen_height, self.screen_width, self.\n history_length * self.num_channels])\n', (2700, 2801), True, 'import tensorflow as tf\n'), ((2876, 3004), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', '[None, self.history_length, self.num_channels, self.screen_height, self.\n screen_width]'], {'name': '"""s_t"""'}), "('float32', [None, self.history_length, self.num_channels,\n self.screen_height, self.screen_width], name='s_t')\n", (2890, 3004), True, 'import tensorflow as tf\n'), ((3078, 3189), 'tensorflow.reshape', 'tf.reshape', (['self.s_t', '[-1, self.history_length * self.num_channels, self.screen_height, self.\n screen_width]'], {}), '(self.s_t, [-1, self.history_length * self.num_channels, self.\n screen_height, self.screen_width])\n', (3088, 3189), True, 'import tensorflow as tf\n'), ((6320, 6448), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', '[None, self.screen_height, self.screen_width, self.num_channels, self.\n history_length]'], {'name': '"""s_t"""'}), "('float32', [None, self.screen_height, self.screen_width,\n self.num_channels, self.history_length], name='s_t')\n", (6334, 6448), True, 'import tensorflow as tf\n'), ((6567, 6685), 'tensorflow.reshape', 'tf.reshape', (['self.target_s_t', '[-1, self.screen_height, self.screen_width, self.history_length * self.\n num_channels]'], {}), '(self.target_s_t, [-1, self.screen_height, self.screen_width, \n self.history_length * self.num_channels])\n', (6577, 6685), True, 'import tensorflow as tf\n'), ((6767, 6895), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', '[None, self.history_length, self.num_channels, self.screen_height, self.\n screen_width]'], {'name': '"""s_t"""'}), "('float32', [None, self.history_length, self.num_channels,\n self.screen_height, self.screen_width], name='s_t')\n", (6781, 6895), True, 'import tensorflow as tf\n'), ((7014, 7131), 'tensorflow.reshape', 'tf.reshape', (['self.target_s_t', '[-1, self.history_length * self.num_channels, self.screen_height, self.\n screen_width]'], {}), '(self.target_s_t, [-1, self.history_length * self.num_channels,\n self.screen_height, self.screen_width])\n', (7024, 7131), True, 'import tensorflow as tf\n'), ((10633, 10662), 'tensorflow.square', 'tf.square', (['self.clipped_delta'], {}), '(self.clipped_delta)\n', (10642, 10662), True, 'import tensorflow as tf\n'), ((10894, 11042), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['self.learning_rate', 'self.learning_rate_step', 'self.learning_rate_decay_step', 'self.learning_rate_decay'], {'staircase': '(True)'}), '(self.learning_rate, self.learning_rate_step,\n self.learning_rate_decay_step, self.learning_rate_decay, staircase=True)\n', (10920, 11042), True, 'import tensorflow as tf\n'), ((3920, 3938), 'numpy.prod', 'np.prod', (['shape[1:]'], {}), '(shape[1:])\n', (3927, 3938), True, 'import numpy as np\n'), ((6057, 6103), 'tensorflow.histogram_summary', 'tf.histogram_summary', (["('q/%s' % idx)", 'avg_q[idx]'], {}), "('q/%s' % idx, avg_q[idx])\n", (6077, 6103), True, 'import tensorflow as tf\n'), ((7879, 7897), 'numpy.prod', 'np.prod', (['shape[1:]'], {}), '(shape[1:])\n', (7886, 7897), True, 'import numpy as np\n'), ((11321, 11398), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['self.learning_rate_op'], {'momentum': '(0.95)', 'epsilon': '(0.01)'}), '(self.learning_rate_op, momentum=0.95, epsilon=0.01)\n', (11346, 11398), True, 'import tensorflow as tf\n'), ((5224, 5291), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.advantage'], {'reduction_indices': '(1)', 'keep_dims': '(True)'}), '(self.advantage, reduction_indices=1, keep_dims=True)\n', (5238, 5291), True, 'import tensorflow as tf\n'), ((9187, 9256), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.t_advantage'], {'reduction_indices': '(1)', 'keep_dims': '(True)'}), '(self.t_advantage, reduction_indices=1, keep_dims=True)\n', (9201, 9256), True, 'import tensorflow as tf\n')] |
import sys
if "" not in sys.path: sys.path.append("")
import os
from glob import glob
import warnings
import numpy as np
from PIL import Image
from util.general import printProgressBar, print_result, print_warning
def generate_paired_lists(cxr_paths, mask_paths, subset_n, split_masks=False):
"""
This function is used for a few purposes.
Firstly, it checks whether every image has an accessory mask.
Secondly, it generates two sorted lists
(with image pairs in the proper order)
Finally, it also generates a list of filenames under which
to store the preprocessed data.
"""
cxr_sort = []
mask_sort = []
subject_names = []
missing_masks = 0
for subject_n in range(len(cxr_paths)):
# Find CXR filename and look for matches in the mask list
cxr_filename = os.path.split(cxr_paths[subject_n])[-1]
if not split_masks:
filename_matches = [mask_path for mask_path in mask_paths
if os.path.splitext(cxr_filename)[0]
in mask_path]
else:
filename_matches0 = [mask_path for mask_path in mask_paths[0]
if os.path.splitext(cxr_filename)[0]
in mask_path]
filename_matches1 = [mask_path for mask_path in mask_paths[1]
if os.path.splitext(cxr_filename)[0]
in mask_path]
if len(filename_matches0) == len(filename_matches1) == 1:
filename_matches = [[filename_matches0[0],
filename_matches1[0]]]
else:
warnings.warn("Missing either an R or L mask. "
"Omitting entire mask")
filename_matches = []
if type(filename_matches) == list and len(filename_matches) == 1:
cxr_sort.append(cxr_paths[subject_n])
subject_names.append("{:d}_{:03d}".format(subset_n, subject_n))
if not split_masks:
mask_sort.append(filename_matches[0])
else:
mask_sort.append([filename_matches[0][0],
filename_matches[0][1]])
elif type(filename_matches) == list and len(filename_matches) > 1:
warnings.warn("Multiple matches found for a single subject name!")
elif type(filename_matches) == list and len(filename_matches) == 0:
missing_masks += 1
else:
raise ValueError("Parameter 'filename_matches' "
"should return a list")
return cxr_sort, mask_sort, subject_names, missing_masks
def combine_masks(mask1_path, mask2_path):
"""
This function combines two masks into one.
It is primarily used to combine the seperate L/R masks of the Mntg dataset.
"""
mask1_img = Image.open(mask1_path)
mask2_img = Image.open(mask2_path)
mask1_array = np.asarray(mask1_img)
mask2_array = np.asarray(mask2_img)
if np.shape(mask1_array) == np.shape(mask2_array):
combined_mask = np.zeros(np.shape(mask1_array))
combined_mask[mask1_array is not False] = 255
combined_mask[mask2_array is not False] = 255
else:
raise ValueError("Masks to be combined aren't the same size")
return combined_mask
def check_for_inversion(cxr_img_norm):
"""
This function checks whether the image should be inverted
based on the rim intensity.
This is an important step of the normalization process.
"""
# Define image rim
rim_thickness = max(np.shape(cxr_img_norm)) // 20
rim_array = [
list(cxr_img_norm[:rim_thickness, :].flatten()),
list(cxr_img_norm[:, :rim_thickness].flatten()),
list(cxr_img_norm[-rim_thickness:, :].flatten()),
list(cxr_img_norm[:, -rim_thickness:].flatten())]
rim_list = [pixel for rim in rim_array for pixel in rim]
# Compare mean of rim to mean of whole image
img_mean = np.mean(cxr_img_norm)
rim_mean = np.mean(np.array(rim_list))
inversion_check = (rim_mean > img_mean)
return inversion_check
def intensity_normalization(img_as_np, im_type=""):
"""
This function normalizes the intensities of images and masks.
It also corrects for some images with 3 channels, and inverts
the intensity range if necessary.
"""
# Check for the dimensionality of the image. Some images have 3 channels
if np.ndim(img_as_np) == 2:
pass
elif np.ndim(img_as_np) == 3:
img_as_np = np.mean(img_as_np, axis=2)
else:
raise ValueError("Image has an invalid number of dimensions")
# Remove booleans
new_img_as_np = np.zeros(np.shape(img_as_np))
new_img_as_np[:, :] = img_as_np[:, :]
new_img_as_np[img_as_np is False] = 0.
# Rescale the image to the preferred intensity range.
img_min = np.min(new_img_as_np)
img_max = np.max(new_img_as_np)
new_min = 0
new_max = 255
array_type = np.uint8
img_as_np_corr = \
(((new_img_as_np - img_min) / img_max) * (new_max - new_min)) \
+ new_min
# If applicable, invert the image
if im_type.lower() == "cxr":
if check_for_inversion(img_as_np_corr):
# Perform inversion
img_as_np_fin = new_max - img_as_np_corr + new_min
else:
img_as_np_fin = img_as_np_corr
elif im_type.lower() == "mask":
img_as_np_fin = img_as_np_corr
else:
raise ValueError("The 'im_type' parameter should be a string "
"and either 'cxr' or 'mask'")
return img_as_np_fin.astype(array_type)
def reshape_image(img_as_np, to_shape=(256, 256)):
"""
This function pads and resamples an image. Output is in PIL Image format
It is used for the data normalization step of the preprocessing process.
"""
# Trim original image to have an even number of pixels (in both axes)
if np.shape(img_as_np)[0] % 2 != 0:
img_as_np = img_as_np[0:np.shape(img_as_np)[0] - 1, :]
if np.shape(img_as_np)[1] % 2 != 0:
img_as_np = img_as_np[:, 0:np.shape(img_as_np)[1] - 1]
# Define old and intermediate shapes
ori_shape = np.shape(img_as_np)
int_shape = (max(ori_shape), max(ori_shape))
x_pad, y_pad = (max([0, int_shape[0] - ori_shape[0]]) // 2,
max([0, int_shape[1] - ori_shape[1]]) // 2)
# Pad the image
new_img_as_np = np.zeros(int_shape)
new_img_as_np[x_pad:int_shape[0] - x_pad, y_pad:int_shape[0] - y_pad] = \
img_as_np[:, :]
# Resample the image to the required size
new_img = Image.fromarray(new_img_as_np.astype(np.uint8))
new_img_res = new_img.resize(to_shape, resample=Image.BILINEAR)
return new_img_res
def preprocess_subject(cxr_path, mask_path, split_masks=False):
"""
This function performs several preprocessing steps on the input data.
It returns the preprocessed images in PIL image format.
"""
# Load CXR image
cxr_img = Image.open(cxr_path)
cxr_array = np.array(cxr_img)
# Load mask image
if split_masks:
mask_array = combine_masks(mask_path[0], mask_path[1])
else:
mask_img = Image.open(mask_path)
mask_array = np.asarray(mask_img)
# Correct for intensities, datatypes etc.
cxr_array_int = intensity_normalization(cxr_array, im_type="cxr")
mask_array_int = intensity_normalization(mask_array, im_type="mask")
# Reshape images
cxr_img_pr = reshape_image(cxr_array_int)
mask_img_pr = reshape_image(mask_array_int)
return cxr_img_pr, mask_img_pr
def preprocessing(rawDir=os.path.join("data", "raw"),
preprocessedDir=os.path.join("data", "preprocessed"),
verbose=True, rerun=False):
"""
Main function for data preprocessing.
It handles the file structure conversion and calls on functions
to perform mask/image manipulation.
"""
# If applicable, print some info regarding the processing
if verbose:
print("--- Performing data preprocessing --- ")
print("Extracting data from:\t{}".format(os.path.abspath(rawDir)))
print("Outputting data to:\t{}".format(
os.path.abspath(preprocessedDir)))
# Predefine known raw data structure and wanted preprocessed data structure
cxr_dirs = ["CXR_ChinaSet", "CXR_Manual", "CXR_Mntg"]
mask_dirs = ["mask_ChinaSet", "masks_Manual",
["leftMask_Mntg", "rightMask_Mntg"]]
new_imageDir = os.path.join(preprocessedDir, "cxr")
new_maskDir = os.path.join(preprocessedDir, "masks")
# If required, create new directories
if not os.path.isdir(new_imageDir): os.mkdir(new_imageDir)
if not os.path.isdir(new_maskDir): os.mkdir(new_maskDir)
# Loop over sub-datasets and perform preprocessing
for subset_n in range(len(cxr_dirs)):
if verbose: print("\nExtracting data from subset "
"'{:12s}' ({:1d}/{:1d})... ".format(
cxr_dirs[subset_n], subset_n + 1, len(cxr_dirs)))
# Extract CXR images
cxr_paths = glob(os.path.join(rawDir, cxr_dirs[subset_n], "*.png"))
# Extract masks
if type(mask_dirs[subset_n]) == str:
split_masks = False
mask_paths = glob(os.path.join(rawDir,
mask_dirs[subset_n], "*.png"))
elif type(mask_dirs[subset_n]) == list:
split_masks = True
mask_paths = [[], []]
mask_paths[0] = glob(os.path.join(rawDir,
mask_dirs[subset_n][0], "*.png"))
mask_paths[1] = glob(os.path.join(rawDir,
mask_dirs[subset_n][1], "*.png"))
else:
raise ValueError("Incorrect format for mask directory paths")
# Sort lists to ensure retention of image-mask pairs
# Also, check whether there are as many masks as images
cxr_sort, mask_sort, subject_names, missing_mask = \
generate_paired_lists(cxr_paths, mask_paths, subset_n,
split_masks=split_masks)
# If applicable, initiate progress bar
if verbose:
print(f"(found {len(subject_names)} images)")
printProgressBar(0, len(subject_names), length=50)
# Loop over cxr images and perform file structure conversion
for subject_n in range(len(subject_names)):
cxr_path = cxr_sort[subject_n]
mask_path = mask_sort[subject_n]
subject_name = subject_names[subject_n]
cxr_target = os.path.join(new_imageDir, subject_name + ".png")
mask_target = os.path.join(new_maskDir, subject_name + ".png")
if not rerun and os.path.exists(cxr_target) \
and os.path.exists(mask_target):
pass
else:
cxr_img, mask_img = preprocess_subject(cxr_path, mask_path,
split_masks=split_masks)
cxr_img.save(cxr_target)
mask_img.save(mask_target)
if verbose: printProgressBar(subject_n + 1, len(subject_names),
length=50)
# If applicable, print result
if verbose:
print("\nResult: ", end="", flush=True)
if missing_mask == 0:
print_result(True)
else:
warning = f"Missed {missing_mask} mask files"
print_warning(warning)
return
if __name__ == "__main__":
preprocessing(rerun=False)
| [
"util.general.print_result",
"numpy.array",
"sys.path.append",
"numpy.mean",
"os.path.exists",
"numpy.asarray",
"numpy.ndim",
"numpy.max",
"os.path.split",
"os.path.isdir",
"os.mkdir",
"numpy.min",
"warnings.warn",
"os.path.splitext",
"numpy.shape",
"PIL.Image.open",
"os.path.join",
... | [((34, 53), 'sys.path.append', 'sys.path.append', (['""""""'], {}), "('')\n", (49, 53), False, 'import sys\n'), ((2920, 2942), 'PIL.Image.open', 'Image.open', (['mask1_path'], {}), '(mask1_path)\n', (2930, 2942), False, 'from PIL import Image\n'), ((2959, 2981), 'PIL.Image.open', 'Image.open', (['mask2_path'], {}), '(mask2_path)\n', (2969, 2981), False, 'from PIL import Image\n'), ((3001, 3022), 'numpy.asarray', 'np.asarray', (['mask1_img'], {}), '(mask1_img)\n', (3011, 3022), True, 'import numpy as np\n'), ((3041, 3062), 'numpy.asarray', 'np.asarray', (['mask2_img'], {}), '(mask2_img)\n', (3051, 3062), True, 'import numpy as np\n'), ((4053, 4074), 'numpy.mean', 'np.mean', (['cxr_img_norm'], {}), '(cxr_img_norm)\n', (4060, 4074), True, 'import numpy as np\n'), ((4946, 4967), 'numpy.min', 'np.min', (['new_img_as_np'], {}), '(new_img_as_np)\n', (4952, 4967), True, 'import numpy as np\n'), ((4982, 5003), 'numpy.max', 'np.max', (['new_img_as_np'], {}), '(new_img_as_np)\n', (4988, 5003), True, 'import numpy as np\n'), ((6269, 6288), 'numpy.shape', 'np.shape', (['img_as_np'], {}), '(img_as_np)\n', (6277, 6288), True, 'import numpy as np\n'), ((6507, 6526), 'numpy.zeros', 'np.zeros', (['int_shape'], {}), '(int_shape)\n', (6515, 6526), True, 'import numpy as np\n'), ((7082, 7102), 'PIL.Image.open', 'Image.open', (['cxr_path'], {}), '(cxr_path)\n', (7092, 7102), False, 'from PIL import Image\n'), ((7119, 7136), 'numpy.array', 'np.array', (['cxr_img'], {}), '(cxr_img)\n', (7127, 7136), True, 'import numpy as np\n'), ((7705, 7732), 'os.path.join', 'os.path.join', (['"""data"""', '"""raw"""'], {}), "('data', 'raw')\n", (7717, 7732), False, 'import os\n'), ((7768, 7804), 'os.path.join', 'os.path.join', (['"""data"""', '"""preprocessed"""'], {}), "('data', 'preprocessed')\n", (7780, 7804), False, 'import os\n'), ((8586, 8622), 'os.path.join', 'os.path.join', (['preprocessedDir', '"""cxr"""'], {}), "(preprocessedDir, 'cxr')\n", (8598, 8622), False, 'import os\n'), ((8641, 8679), 'os.path.join', 'os.path.join', (['preprocessedDir', '"""masks"""'], {}), "(preprocessedDir, 'masks')\n", (8653, 8679), False, 'import os\n'), ((3071, 3092), 'numpy.shape', 'np.shape', (['mask1_array'], {}), '(mask1_array)\n', (3079, 3092), True, 'import numpy as np\n'), ((3096, 3117), 'numpy.shape', 'np.shape', (['mask2_array'], {}), '(mask2_array)\n', (3104, 3117), True, 'import numpy as np\n'), ((4098, 4116), 'numpy.array', 'np.array', (['rim_list'], {}), '(rim_list)\n', (4106, 4116), True, 'import numpy as np\n'), ((4516, 4534), 'numpy.ndim', 'np.ndim', (['img_as_np'], {}), '(img_as_np)\n', (4523, 4534), True, 'import numpy as np\n'), ((4767, 4786), 'numpy.shape', 'np.shape', (['img_as_np'], {}), '(img_as_np)\n', (4775, 4786), True, 'import numpy as np\n'), ((7272, 7293), 'PIL.Image.open', 'Image.open', (['mask_path'], {}), '(mask_path)\n', (7282, 7293), False, 'from PIL import Image\n'), ((7315, 7335), 'numpy.asarray', 'np.asarray', (['mask_img'], {}), '(mask_img)\n', (7325, 7335), True, 'import numpy as np\n'), ((8734, 8761), 'os.path.isdir', 'os.path.isdir', (['new_imageDir'], {}), '(new_imageDir)\n', (8747, 8761), False, 'import os\n'), ((8763, 8785), 'os.mkdir', 'os.mkdir', (['new_imageDir'], {}), '(new_imageDir)\n', (8771, 8785), False, 'import os\n'), ((8797, 8823), 'os.path.isdir', 'os.path.isdir', (['new_maskDir'], {}), '(new_maskDir)\n', (8810, 8823), False, 'import os\n'), ((8825, 8846), 'os.mkdir', 'os.mkdir', (['new_maskDir'], {}), '(new_maskDir)\n', (8833, 8846), False, 'import os\n'), ((827, 862), 'os.path.split', 'os.path.split', (['cxr_paths[subject_n]'], {}), '(cxr_paths[subject_n])\n', (840, 862), False, 'import os\n'), ((3152, 3173), 'numpy.shape', 'np.shape', (['mask1_array'], {}), '(mask1_array)\n', (3160, 3173), True, 'import numpy as np\n'), ((3648, 3670), 'numpy.shape', 'np.shape', (['cxr_img_norm'], {}), '(cxr_img_norm)\n', (3656, 3670), True, 'import numpy as np\n'), ((4563, 4581), 'numpy.ndim', 'np.ndim', (['img_as_np'], {}), '(img_as_np)\n', (4570, 4581), True, 'import numpy as np\n'), ((4608, 4634), 'numpy.mean', 'np.mean', (['img_as_np'], {'axis': '(2)'}), '(img_as_np, axis=2)\n', (4615, 4634), True, 'import numpy as np\n'), ((9202, 9251), 'os.path.join', 'os.path.join', (['rawDir', 'cxr_dirs[subset_n]', '"""*.png"""'], {}), "(rawDir, cxr_dirs[subset_n], '*.png')\n", (9214, 9251), False, 'import os\n'), ((10739, 10788), 'os.path.join', 'os.path.join', (['new_imageDir', "(subject_name + '.png')"], {}), "(new_imageDir, subject_name + '.png')\n", (10751, 10788), False, 'import os\n'), ((10815, 10863), 'os.path.join', 'os.path.join', (['new_maskDir', "(subject_name + '.png')"], {}), "(new_maskDir, subject_name + '.png')\n", (10827, 10863), False, 'import os\n'), ((1701, 1769), 'warnings.warn', 'warnings.warn', (['"""Missing either an R or L mask. Omitting entire mask"""'], {}), "('Missing either an R or L mask. Omitting entire mask')\n", (1714, 1769), False, 'import warnings\n'), ((2351, 2417), 'warnings.warn', 'warnings.warn', (['"""Multiple matches found for a single subject name!"""'], {}), "('Multiple matches found for a single subject name!')\n", (2364, 2417), False, 'import warnings\n'), ((6012, 6031), 'numpy.shape', 'np.shape', (['img_as_np'], {}), '(img_as_np)\n', (6020, 6031), True, 'import numpy as np\n'), ((6115, 6134), 'numpy.shape', 'np.shape', (['img_as_np'], {}), '(img_as_np)\n', (6123, 6134), True, 'import numpy as np\n'), ((8202, 8225), 'os.path.abspath', 'os.path.abspath', (['rawDir'], {}), '(rawDir)\n', (8217, 8225), False, 'import os\n'), ((8288, 8320), 'os.path.abspath', 'os.path.abspath', (['preprocessedDir'], {}), '(preprocessedDir)\n', (8303, 8320), False, 'import os\n'), ((9385, 9435), 'os.path.join', 'os.path.join', (['rawDir', 'mask_dirs[subset_n]', '"""*.png"""'], {}), "(rawDir, mask_dirs[subset_n], '*.png')\n", (9397, 9435), False, 'import os\n'), ((10894, 10920), 'os.path.exists', 'os.path.exists', (['cxr_target'], {}), '(cxr_target)\n', (10908, 10920), False, 'import os\n'), ((10947, 10974), 'os.path.exists', 'os.path.exists', (['mask_target'], {}), '(mask_target)\n', (10961, 10974), False, 'import os\n'), ((11546, 11564), 'util.general.print_result', 'print_result', (['(True)'], {}), '(True)\n', (11558, 11564), False, 'from util.general import printProgressBar, print_result, print_warning\n'), ((11661, 11683), 'util.general.print_warning', 'print_warning', (['warning'], {}), '(warning)\n', (11674, 11683), False, 'from util.general import printProgressBar, print_result, print_warning\n'), ((9626, 9679), 'os.path.join', 'os.path.join', (['rawDir', 'mask_dirs[subset_n][0]', '"""*.png"""'], {}), "(rawDir, mask_dirs[subset_n][0], '*.png')\n", (9638, 9679), False, 'import os\n'), ((9760, 9813), 'os.path.join', 'os.path.join', (['rawDir', 'mask_dirs[subset_n][1]', '"""*.png"""'], {}), "(rawDir, mask_dirs[subset_n][1], '*.png')\n", (9772, 9813), False, 'import os\n'), ((1001, 1031), 'os.path.splitext', 'os.path.splitext', (['cxr_filename'], {}), '(cxr_filename)\n', (1017, 1031), False, 'import os\n'), ((1205, 1235), 'os.path.splitext', 'os.path.splitext', (['cxr_filename'], {}), '(cxr_filename)\n', (1221, 1235), False, 'import os\n'), ((1396, 1426), 'os.path.splitext', 'os.path.splitext', (['cxr_filename'], {}), '(cxr_filename)\n', (1412, 1426), False, 'import os\n'), ((6077, 6096), 'numpy.shape', 'np.shape', (['img_as_np'], {}), '(img_as_np)\n', (6085, 6096), True, 'import numpy as np\n'), ((6183, 6202), 'numpy.shape', 'np.shape', (['img_as_np'], {}), '(img_as_np)\n', (6191, 6202), True, 'import numpy as np\n')] |
'''
Created on Nov 1, 2016
@author: <NAME> and <NAME>
'''
import logging
import pickle
import pandas as pd
import datetime
import os, subprocess
import sklearn.metrics as skm
import numpy as np
import taggers.lample_lstm_tagger.lstm_wrapper as lstm_wrapper
from baselines.dawid_and_skene import ds, ibccvb
from evaluation.metrics import calculate_scores
from evaluation.plots import SCORE_NAMES
from baselines.hmmcrowd import HMM_crowd
from baselines.util import crowd_data, data_to_hmm_crowd_format, subset_hmm_crowd_data
from baselines import ibcc, clustering, majority_voting
from bsc import bsc
logging.basicConfig(level=logging.DEBUG)
data_root_dir = '../../../data/bayesian_sequence_combination/'
def _append_to_csv(outputdir, method, method_idx, new_data, filename, file_identifier):
filename = os.path.join(outputdir, '%s_%s.csv' % (filename, file_identifier))
new_data = pd.DataFrame(new_data[:, method_idx], columns=[str(method).strip('[]')])
if os.path.isfile(filename):
data = pd.read_csv(filename, delimiter=',')
expanded_data = pd.concat((data, new_data), axis=1)
else:
expanded_data = new_data
expanded_data.to_csv(filename, index=False)
class Experiment(object):
def __init__(self, outputdir, nclasses, annotations, gold, doc_start, text,
annos_val=None, gold_val=None, doc_start_val=None, text_val=None,
gold_nocrowd=None, doc_start_nocrowd=None, text_nocrowd=None,
alpha0_factor=1.0, alpha0_diags=1.0, beta0_factor=0.1, begin_factor=1.0,
max_iter=20, crf_probs=False, rep=0, bootstrapping=False
):
'''
:param outputdir: the directory where results, predictions and any model files will be stored
:param annotations: rows correspond to tokens, columns to annotators.
:param gold: class labels for computing the performance metrics. Missing values should be set to -1.
:param doc_start: binary vector indicating the start of each sequence/document/sentence.
:param text: strings associated with each token.
:param annos_val: crowdsourced labels for a validation set.
:param gold_val: for tuning hyperparameters
:param doc_start_val:
:param text_val:
:param gold_nocrowd: for evaluating on a second set of data points where crowd labels are not available
:param doc_start_nocrowd:
:param text_nocrowd: the features for testing the model trained on crowdsourced data to classify data with no labels at all
:param nclasses: number of classes
:param alpha0_factor: smoothing hyperparameter for annotator models.
:param alpha0_diags: correctness bias hyperparameter for annotator models.
:param beta0_factor: smoothing hyperparameter for prior over class labels.
:param begin_factor: additional correctness bias for B tokens.
:param max_iter: maximum iterations for iterative aggregation methods.
:param crf_probs: LSTM method produces probabilities using a CRF output layer.
:param rep: repetition number for an experiment that is carried out multiple times; sets a different random seed for each repetition.
:param bootstrapping: calculate performance metrics using bootstrapping for small datasets
'''
self.methods = None
self.num_classes = nclasses
self.postprocess = False # previous papers did not use this so we leave out to make results comparable.
self.random_sampling = False
self.outputdir = outputdir
if not os.path.exists(outputdir):
os.mkdir(outputdir)
outputdir += '/'
# Data -------------------------------------------------
self.annos_test = annotations
self.gold_test = gold
self.doc_start_test = doc_start
self.text_test = text
self.annos_val = annos_val
self.gold_val = gold_val
self.doc_start_val = doc_start_val
self.text_val = text_val
self.gold_nocrowd = gold_nocrowd
self.doc_start_nocrowd = doc_start_nocrowd
self.text_nocrowd = text_nocrowd
# Method hyperparameters and settings ---------------------------------
self.crf_probs = crf_probs
self.opt_hyper = False
self.use_lb = False
self.alpha0_factor = alpha0_factor
self.alpha0_diags = alpha0_diags
self.begin_factor = begin_factor
self.beta0_factor = beta0_factor
self.max_internal_iter = 3
self.max_iter = max_iter # allow all methods to use a maximum no. iterations
np.random.seed(3849)
self.seed = np.random.randint(1, 1000, 100)[rep] # seeds for AL
# Results -------------------------------------------------------
# save results from methods here. If we use compound methods, we can reuse these results in different
# combinations of methods.
self.aggs = {}
self.probs = {}
self.bootstrapping = bootstrapping
self.scores = None
self.scores_nocrowd = None
def tune_alpha0(self, alpha0diag_proposals, alpha0factor_proposals, beta0factor_proposals,
method, metric_idx_to_optimise=8, new_data=False):
self.methods = [method]
scores = np.zeros((len(beta0factor_proposals) * len(alpha0diag_proposals), len(alpha0factor_proposals)))
best_scores = np.zeros(4) - np.inf
best_idxs = np.zeros(4)
for h, beta0factor in enumerate(beta0factor_proposals):
self.beta0_factor = beta0factor
for i, alpha0diag in enumerate(alpha0diag_proposals):
self.alpha0_diags = alpha0diag
for j, alpha0factor in enumerate(alpha0factor_proposals):
self.alpha0_factor = alpha0factor
# reset saved data so that models are run again.
self.aggs = {}
self.probs = {}
outputdir_ij = self.outputdir + ('_%i_%i_%i_' % (h, i, j)) + method + '/'
all_scores, _, _, _, _, _ = self.run_methods(outputdir_ij, new_data=new_data)
if metric_idx_to_optimise != 5: # maximise these scores
scores[(h*len(alpha0diag_proposals)) + i, j] = all_scores[metric_idx_to_optimise, :]
else: # minimise this score
scores[(h*len(alpha0diag_proposals)) + i, j] = -all_scores[metric_idx_to_optimise, :]
print('Scores for %f, %f, %f: %f' % (beta0factor, alpha0diag, alpha0factor,
scores[(h*len(alpha0diag_proposals)) + i, j]))
if scores[(h*len(alpha0diag_proposals)) + i, j] > best_scores[0]:
best_scores[0] = scores[(h*len(alpha0diag_proposals)) + i, j]
best_scores[1] = beta0factor
best_scores[2] = alpha0diag
best_scores[3] = alpha0factor
best_idxs[0] = scores[(h*len(alpha0diag_proposals)) + i, j]
best_idxs[1] = h
best_idxs[2] = i
best_idxs[3] = j
print('Saving scores for this setting to %s' % (self.outputdir + '/%s_scores.csv' % method))
np.savetxt(self.outputdir + '/%s_scores.csv' % method, scores, fmt='%s', delimiter=',',
header=str(self.methods).strip('[]'))
np.savetxt(self.outputdir + '/%s_bestscores.csv' % method, best_scores, fmt='%s', delimiter=',',
header=str(self.methods).strip('[]'))
self.aggs = {}
self.probs = {}
return best_idxs
def run_methods(self, test_on_dev=False, new_data=False,
active_learning=False, AL_batch_fraction=0.05, max_AL_iters=10,
save_with_timestamp=True):
'''
Run the aggregation methods and evaluate them.
:param test_on_dev: use test rather than dev set
:param new_data: set to true if all cached data files should be overwritten
:param active_learning: set to true to run an AL simulation
:param AL_batch_fraction: what proportion of labels are sampled at each AL iteration
:param max_AL_iters: maximum number of AL rounds.
:return:
'''
if active_learning:
print('Running active learning on the test dataset.')
if not test_on_dev:
self.annos_all = self.annos_test
self.annos = self.annos_test
self.doc_start_all = self.doc_start_test
self.doc_start = self.doc_start_test
self.text_all = self.text_test
self.text = self.text_test
self.gold = self.gold_test
else:
self.annos_all = self.annos_val
self.annos = self.annos_val
self.doc_start_all = self.doc_start_val
self.doc_start = self.doc_start_val
self.text_all = self.text_val
self.text = self.text_val
self.gold = self.gold_val
# a second test set with no crowd labels was supplied directly
if self.gold_nocrowd is not None and self.text_nocrowd is not None and self.doc_start_nocrowd is not None:
self.N_nocrowd = self.gold_nocrowd.shape[0]
else:
self.N_nocrowd = 0
print('Running experiment on %s set%s' % ('dev' if test_on_dev else 'test', '.' if self.N_nocrowd==0 else
' and predicting on additional test data with no crowd labels'))
Ntoks = self.annos.shape[0]
Ndocs = np.sum(self.doc_start)
# get total annotation count
Nannos = np.sum(self.annos_all[(self.doc_start == 1).flatten(), :] != -1)
print('Data set: %i documents, %i tokens, %i documents without crowd labels.' % (Ndocs, Ntoks, self.N_nocrowd))
preds = -np.ones((Ntoks, len(self.methods)))
probs_allmethods = -np.ones((Ntoks, self.num_classes, len(self.methods)))
preds_nocrowd = -np.ones((self.N_nocrowd, len(self.methods)))
probs_allmethods_nocrowd = -np.ones((self.N_nocrowd, self.num_classes, len(self.methods)))
# timestamp for when we started a run. Can be compared to file versions to check what was run.
timestamp = datetime.datetime.now().strftime('started-%Y-%m-%d-%H-%M-%S')
for method_idx, method in enumerate(self.methods):
print('Running method: %s' % method)
Nseen = 0
niter = 0
if active_learning:
# get the number of labels to select each iteration
self.batch_size = int(np.ceil(AL_batch_fraction * Nannos))
np.random.seed(self.seed) # for repeating with different methods with same initial set
self.annos = None
selected_docs, selected_toks, nselected_by_doc = self._uncertainty_sampling(
np.ones(Ndocs, dtype=float) / self.num_classes, None, None)
else:
selected_docs = None
nselected_by_doc = None
while Nseen < Nannos and niter < max_AL_iters:
print('Learning round %i' % niter)
# the active learning loop
if active_learning: # treat the unseen instances similarly to the no crowd instances -- get their posterior probabilities
unseen_docs = np.arange(Ndocs)
unseen_docs = unseen_docs[np.invert(np.in1d(unseen_docs, selected_docs))]
unselected_toks = np.argwhere(np.in1d(np.cumsum(self.doc_start_test) - 1, unseen_docs)).flatten()
self.N_unseentoks = len(unselected_toks)
doc_start_unseen = self.doc_start_test[unselected_toks]
text_unseen = self.text_test[unselected_toks]
else:
self.N_unseentoks = 0
doc_start_unseen = None
text_unseen = None
agg, probs, most_likely_seq_probs, agg_nocrowd, probs_nocrowd, agg_unseen, probs_unseen = \
self._run_method(method, timestamp, doc_start_unseen, text_unseen, selected_docs, nselected_by_doc,
new_data if niter==0 else False)
if np.any(self.gold != -1): # don't run this in the case that crowd-labelled data has no gold labels
if active_learning and len(agg) < len(self.gold):
agg_all = np.ones(len(self.gold))
agg_all[selected_toks] = agg.flatten()
agg_all[unselected_toks] = agg_unseen.flatten()
agg = agg_all
probs_all = np.zeros((len(self.gold), self.num_classes))
probs_all[selected_toks, :] = probs
probs_all[unselected_toks, :] = probs_unseen
probs = probs_all
self.calculate_experiment_scores(agg, probs, method_idx)
preds[:, method_idx] = agg.flatten()
probs_allmethods[:,:,method_idx] = probs
if self.N_nocrowd > 0:
self.calculate_nocrowd_scores(agg_nocrowd, probs_nocrowd, method_idx)
preds_nocrowd[:, method_idx] = agg_nocrowd.flatten()
probs_allmethods_nocrowd[:,:,method_idx] = probs_nocrowd
print('...done')
# Save the results so far after each method has completed.
Nseen = np.sum(self.annos[self.doc_start.flatten()==1] != -1) # update the number of documents processed so far
print('Nseen = %i' % Nseen)
# change the timestamps to include AL loop numbers
if save_with_timestamp:
file_id = timestamp + ('-Nseen%i' % Nseen)
else:
file_id = ''
_append_to_csv(self.outputdir, method, method_idx, self.scores, 'result', file_id)
_append_to_csv(self.outputdir, method, method_idx, self.score_std, 'result_std', file_id)
_append_to_csv(self.outputdir, method, method_idx, preds, 'pred', file_id)
if self.N_nocrowd > 0:
_append_to_csv(self.outputdir, method, method_idx, self.scores_nocrowd, 'result_nocrowd', file_id)
_append_to_csv(self.outputdir, method, method_idx, self.score_std_nocrowd, 'result_std_nocrowd', file_id)
_append_to_csv(self.outputdir, method, method_idx, preds_nocrowd, 'pred_nocrowd', file_id)
with open(os.path.join(self.outputdir, 'probs_%s.pkl' % file_id), 'wb') as fh:
pickle.dump(probs_allmethods, fh)
if self.N_nocrowd > 0:
with open(os.path.join(self.outputdir, 'probs_nocrowd_%s.pkl' % file_id), 'wb') as fh:
pickle.dump(probs_allmethods_nocrowd, fh)
# if model is not None and not active_learning and return_model:
# with open(self.outputdir + 'model_%s.pkl' % method, 'wb') as fh:
# pickle.dump(model, fh)
if active_learning and Nseen < Nannos:
# non-sequential methods just provide independent label probabilities.
if most_likely_seq_probs is None:
most_likely_seq_probs = [np.prod(seq_prob) for seq_prob in np.split(probs, self.doc_start.flatten())]
selected_docs, selected_toks, nselected_by_doc = self._uncertainty_sampling(
most_likely_seq_probs, selected_toks, selected_docs)
print('**** Active learning: No. annos = %i' % self.annos.shape[0])
else:
selected_docs = None
niter += 1
return self.scores, preds, probs_allmethods, \
self.scores_nocrowd, preds_nocrowd, probs_allmethods_nocrowd
def _run_method(self, method, timestamp, doc_start_unseen, text_unseen, selected_docs, nselected_by_doc, new_data):
# Initialise the results because not all methods will fill these in
most_likely_seq_probs = None # some methods can compute this
agg_unseen = np.zeros(self.N_unseentoks)
# default maximum entropy situation
probs_unseen = np.ones((self.N_unseentoks, self.num_classes), dtype=float) / self.num_classes
agg_nocrowd = np.zeros(self.N_nocrowd)
probs_nocrowd = np.ones((self.N_nocrowd, self.num_classes))
if method.split('_')[0] == 'best':
agg, probs = self._run_best_worker()
elif method.split('_')[0] == 'worst':
agg, probs = self._run_worst_worker()
elif method.split('_')[0] == 'clustering':
agg, probs = self._run_clustering()
elif method.split('_')[0] == 'majority':
agg, probs = majority_voting.MajorityVoting(self.annos, self.num_classes).vote()
elif method.split('_')[0] == 'mace':
agg, probs = self._run_mace(timestamp)
elif method.split('_')[0] == 'ds':
agg, probs = self._run_ds()
elif method.split('_')[0] == 'ibcc':
agg, probs = self._run_ibcc()
elif method.split('_')[0] == 'ibcc2': # this is the newer and simpler implementation
agg, probs = self._run_ibcc2()
elif method.split('_')[0] == 'bsc' or method.split('_')[0] == 'bac':
if method not in self.aggs:
agg, probs, most_likely_seq_probs, agg_nocrowd, probs_nocrowd, agg_unseen, probs_unseen \
= self._run_bsc(
method,
doc_start_unseen=doc_start_unseen,
text_unseen=text_unseen
)
else:
agg = self.aggs[method.replace('_thenLSTM', '')]
probs = self.probs[method.replace('_thenLSTM', '')]
elif 'HMM_crowd' in method:
if 'HMM_crowd' not in self.aggs:
# we pass all annos here so they can be saved and reloaded from a single file in HMMCrowd
# format, then the relevant subset selected from that.
agg, probs, model = self._run_hmmcrowd(selected_docs, nselected_by_doc, overwrite_data_file=new_data)
most_likely_seq_probs = model.res_prob
else:
agg = self.aggs['HMM_crowd']
probs = self.probs['HMM_crowd']
elif 'gt' in method:
agg = self.gold.flatten()
probs = np.zeros((len(self.gold), self.num_classes))
probs[range(len(agg)), agg.astype(int)] = 1.0
if '_thenLSTM' in method:
agg, probs, agg_nocrowd, probs_nocrowd, agg_unseen, probs_unseen = self._run_LSTM(
agg, timestamp, doc_start_unseen, text_unseen)
self.aggs[method] = agg
self.probs[method] = probs
return agg, probs, most_likely_seq_probs, agg_nocrowd, probs_nocrowd, agg_unseen, probs_unseen
def calculate_experiment_scores(self, agg, probs, method_idx):
if self.scores is None:
self.scores = np.zeros((len(SCORE_NAMES), len(self.methods)))
self.score_std = np.zeros((len(SCORE_NAMES) - 3, len(self.methods)))
self.scores[:, method_idx][:, None], self.score_std[:, method_idx] = calculate_scores(
self.postprocess, agg, self.gold.flatten(), probs, self.doc_start_all,
self.bootstrapping, print_per_class_results=True)
def calculate_nocrowd_scores(self, agg, probs, method_idx):
if self.scores_nocrowd is None:
# for the additional test set with no crowd labels
self.scores_nocrowd = np.zeros((len(SCORE_NAMES), len(self.methods)))
self.score_std_nocrowd = np.zeros((len(SCORE_NAMES) - 3, len(self.methods)))
self.scores_nocrowd[:,method_idx][:,None], self.score_std_nocrowd[:, method_idx] = calculate_scores(
self.postprocess, agg, self.gold_nocrowd.flatten(), probs, self.doc_start_nocrowd,
self.bootstrapping, print_per_class_results=True)
# Methods -----------------------------------------------------------------
def _run_best_worker(self):
# choose the best classifier by f1-score
f1scores = np.zeros_like(self.annos) - 1.0
print('F1 scores for individual workers:')
individual_scores = []
for w in range(self.annos.shape[1]):
valididxs = self.annos[:, w] != -1
if not np.any(valididxs):
continue
f1_by_class = skm.f1_score(self.gold.flatten()[valididxs], self.annos[valididxs, w], labels=range(self.num_classes),
average=None)
f1_w = np.mean(f1_by_class[np.unique(self.gold[valididxs]).astype(int)])
#print(f1_w)
individual_scores.append(f1_w)
f1scores[valididxs, w] = f1_w
#print(sorted(individual_scores))
best_idxs = np.argmax(f1scores, axis=1)
agg = self.annos[np.arange(self.annos.shape[0]), best_idxs]
probs = np.zeros((self.gold.shape[0], self.num_classes))
for k in range(self.gold.shape[0]):
probs[k, int(agg[k])] = 1
return agg, probs
def _run_worst_worker(self):
# choose the weakest classifier by f1-score
f1scores = np.zeros_like(self.annos) + np.inf
for w in range(self.annos.shape[1]):
valididxs = self.annos[:, w] != -1
if not np.any(valididxs):
continue
f1_by_class = skm.f1_score(self.gold.flatten()[valididxs], self.annos[valididxs, w], labels=range(self.num_classes),
average=None)
f1scores[valididxs, w] = np.mean(f1_by_class[np.unique(self.gold[valididxs]).astype(int)])
worst_idxs = np.argmin(f1scores, axis=1)
agg = self.annos[np.arange(self.annos.shape[0]), worst_idxs]
probs = np.zeros((self.gold.shape[0], self.num_classes))
for k in range(self.gold.shape[0]):
probs[k, int(agg[k])] = 1
return agg, probs
def _run_clustering(self):
cl = clustering.Clustering(self.gold, self.annos, self.doc_start)
agg = cl.run()
probs = np.zeros((self.gold.shape[0], self.num_classes))
for k in range(self.gold.shape[0]):
probs[k, int(agg[k])] = 1
return agg, probs
def _run_ds(self):
probs = ds(self.annos, self.num_classes, self.beta0_factor, self.max_iter)
agg = np.argmax(probs, axis=1)
return agg, probs
def _run_ibcc2(self):
probs, _ = ibccvb(self.annos, self.num_classes, self.beta0_factor,
self.alpha0_factor, self.alpha0_diags, self.begin_factor, self.max_iter)
agg = np.argmax(probs, axis=1)
return agg, probs
def _run_ibcc(self, use_ml=False):
if use_ml:
alpha0 = np.ones((self.num_classes, self.num_classes)) + 0.1 # no prior information at all, just add a small regularization term
ibc = ibcc.IBCC(nclasses=self.num_classes, nscores=self.num_classes, nu0=np.ones(self.num_classes),
alpha0=alpha0, uselowerbound=True, use_ml=True)
else:
self.ibcc_beta0 = np.ones(self.num_classes) * self.beta0_factor
self.ibcc_alpha0 = (self.alpha0_factor/float(self.num_classes-1)) * np.ones((self.num_classes, self.num_classes)) \
+ (self.alpha0_diags + self.alpha0_factor *(1-1/float(self.num_classes-1))) * np.eye(self.num_classes)
ibc = ibcc.IBCC(nclasses=self.num_classes, nscores=self.num_classes, nu0=self.ibcc_beta0,
alpha0=self.ibcc_alpha0, uselowerbound=True)
ibc.verbose = True
ibc.max_iterations = self.max_iter
# ibc.optimise_alpha0_diagonals = True
if self.opt_hyper:
probs = ibc.combine_classifications(self.annos, table_format=True, optimise_hyperparams=True,
maxiter=10000)
else:
probs = ibc.combine_classifications(self.annos, table_format=True) # posterior class probabilities
agg = probs.argmax(axis=1) # aggregated class labels
return agg, probs
def _run_mace(self, timestamp):
anno_path = os.path.join(self.outputdir, 'annos_tmp_%s' % timestamp)
annotations = pd.DataFrame(self.annos)
annotations.replace(-1, np.nan)
annotations.to_csv(anno_path, sep=',', header=False, index=False)
subprocess.call(['java', '-jar', './src/baselines/MACE/MACE.jar', '--distribution', '--prefix',
os.path.join(self.outputdir, 'mace'),
anno_path]) # , stdout = devnull, stderr = devnull)
result = np.genfromtxt(os.path.join(self.outputdir, 'mace.prediction'))
probs = np.zeros((self.gold.shape[0], self.num_classes))
for i in range(result.shape[0]):
for j in range(0, self.num_classes * 2, 2):
probs[i, int(result[i, j])] = result[i, j + 1]
os.remove(anno_path) # clean up tmp file
agg = np.argmax(probs, 1)
return agg, probs
def _run_bsc(self, method, doc_start_unseen=None, text_unseen=None):
method_bits = method.split('_')
if method_bits[-1] == 'noHMM':
transition_model = 'None'
else:
transition_model = 'HMM'
# needs to run integrate method for task 2 as well
if len(method_bits) > 2 and 'integrateLSTM' in method_bits:
if len(method_bits) > 3 and 'atEnd' in method_bits:
use_LSTM = 2
else:
use_LSTM = 1
else:
use_LSTM = 0
if len(method_bits) > 2 and 'integrateIF' in method_bits:
use_IF = True
else:
use_IF = False
worker_model = method_bits[1]
L = self.num_classes
num_types = (self.num_classes - 1) / 2
outside_label = 1
inside_labels = (np.arange(num_types) * 2 + 1).astype(int)
inside_labels[0] = 0
begin_labels = (np.arange(num_types) * 2 + 2).astype(int)
data_model = []
dev_sentences = []
if use_LSTM > 0:
data_model.append('LSTM')
if self.gold_val is not None and self.doc_start_val is not None and self.text_val is not None:
dev_sentences, _, _ = lstm_wrapper.data_to_lstm_format(self.text_val,
self.doc_start_val, self.gold_val)
if use_IF:
no_words = False
else:
no_words = True
bsc_model = bsc.BSC(L=L, K=self.annos.shape[1], max_iter=self.max_iter, # eps=-1,
inside_labels=inside_labels, outside_label=outside_label, beginning_labels=begin_labels,
alpha0_diags=self.alpha0_diags, alpha0_factor=self.alpha0_factor,
alpha0_B_factor=self.begin_factor,
beta0_factor=self.beta0_factor, worker_model=worker_model, tagging_scheme='IOB2',
data_model=data_model, transition_model=transition_model, no_words=no_words,
use_lowerbound=False, model_dir=self.outputdir)
bsc_model.verbose = True
bsc_model.max_internal_iters = self.max_internal_iter
if self.opt_hyper:
np.random.seed(592) # for reproducibility
probs, agg = bsc_model.optimize(self.annos, self.doc_start, self.text, maxfun=1000,
converge_workers_first=use_LSTM==2, dev_sentences=dev_sentences)
else:
probs, agg, pseq = bsc_model.run(self.annos, self.doc_start, self.text,
converge_workers_first=use_LSTM==2, crf_probs=self.crf_probs, dev_sentences=dev_sentences)
if self.gold_nocrowd is not None and '_thenLSTM' not in method:
probs_nocrowd, agg_nocrowd = bsc_model.predict(self.doc_start_nocrowd, self.text_nocrowd)
else:
probs_nocrowd = None
agg_nocrowd = None
if '_thenLSTM' not in method and doc_start_unseen is not None and len(doc_start_unseen) > 0:
probs_unseen, agg_unseen = bsc_model.predict(doc_start_unseen, text_unseen)
else:
probs_unseen = None
agg_unseen = None
return agg, probs, pseq, agg_nocrowd, probs_nocrowd, agg_unseen, probs_unseen
def _run_hmmcrowd(self, doc_subset, nselected_by_doc,
overwrite_data_file=False):
sentences, crowd_labels, nfeats = data_to_hmm_crowd_format(self.annos_all, self.text_all, self.doc_start_all,
self.outputdir, overwrite=overwrite_data_file)
sentences, crowd_labels = subset_hmm_crowd_data(sentences, crowd_labels, doc_subset, nselected_by_doc)
data = crowd_data(sentences, crowd_labels)
hc = HMM_crowd(self.num_classes, nfeats, data, None, None, n_workers=self.annos_all.shape[1],
vb=[self.beta0_factor, self.alpha0_factor], smooth=self.alpha0_factor)
hc.init(init_type='dw', wm_rep='cv', dw_em=5, wm_smooth=self.alpha0_factor)
print('Running HMM-crowd inference...')
hc.em(self.max_iter) # performance goes down with more iterations...?!
print('Computing most likely sequence...')
hc.mls()
print('HMM-crowd complete.')
# agg = np.array(hc.res).flatten()
agg = np.concatenate(hc.res)[:, None]
probs = []
for sentence_post_arr in hc.sen_posterior:
for tok_post_arr in sentence_post_arr:
probs.append(tok_post_arr)
probs = np.array(probs)
return agg.flatten(), probs, hc
def _run_LSTM(self, train_labs, timestamp, doc_start_unseen=None, text_unseen=None):
valididxs = np.any(self.annos != -1, axis=1)
labelled_sentences, IOB_map, IOB_label = lstm_wrapper.data_to_lstm_format(
self.text[valididxs], self.doc_start[valididxs], train_labs.flatten()[valididxs], self.num_classes)
np.random.seed(592) # for reproducibility
if self.gold_val is None or self.doc_start_val is None or self.text_val is None:
# If validation set is unavailable, select a random subset of combined data to use for validation
# Simulates a scenario where all we have available are crowd labels.
train_sentences, dev_sentences, self.gold_val = lstm_wrapper.split_train_to_dev(labelled_sentences)
all_sentences = labelled_sentences
else:
train_sentences = labelled_sentences
dev_sentences, _, _ = lstm_wrapper.data_to_lstm_format(self.text_val,
self.doc_start_val, self.gold_val)
all_sentences = np.concatenate((train_sentences, dev_sentences), axis=0)
lstm = lstm_wrapper.LSTMWrapper(os.path.join(self.outputdir, 'models_LSTM_%s' % timestamp))
print('Running LSTM with crf probs = %s' % self.crf_probs)
lstm.train_LSTM(all_sentences, train_sentences, dev_sentences, self.gold_val, IOB_map,
IOB_label, self.num_classes, freq_eval=5, n_epochs=self.max_internal_iter,
crf_probs=self.crf_probs, max_niter_no_imprv=2)
# now make predictions for all sentences
test_sentences, _, _ = lstm_wrapper.data_to_lstm_format(
self.text, self.doc_start, np.ones(len(train_labs)), self.num_classes)
agg, probs = lstm.predict_LSTM(test_sentences)
if self.N_nocrowd > 0:
test_sentences, _, _ = lstm_wrapper.data_to_lstm_format(
self.text_nocrowd, self.doc_start_nocrowd, np.ones(self.N_nocrowd), self.num_classes)
agg_nocrowd, probs_nocrowd = lstm.predict_LSTM(test_sentences)
else:
agg_nocrowd = None
probs_nocrowd = None
if doc_start_unseen is not None:
N_unseen = len(doc_start_unseen)
test_sentences, _, _ = lstm_wrapper.data_to_lstm_format(
text_unseen, doc_start_unseen, np.ones(N_unseen), self.num_classes)
agg_unseen, probs_unseen = lstm.predict_LSTM(test_sentences)
else:
agg_unseen = None
probs_unseen = None
return agg, probs, agg_nocrowd, probs_nocrowd, agg_unseen, probs_unseen
def _uncertainty_sampling(self, most_likely_probs, selected_toks, selected_docs):
unfinished_toks = np.ones(self.annos_test.shape[0], dtype=bool)
if self.annos is not None:
unfinished_toks[selected_toks] = (np.sum(self.annos_test[selected_toks] != -1, axis=1) - np.sum(self.annos != -1, axis=1)) > 0
unfinished_toks = np.sort(np.argwhere(unfinished_toks).flatten())
# random selection
#new_selection = np.random.choice(unseen_docs, batch_size, replace=False)
# probs = probs[unfinished_toks, :]
# negentropy = np.log(probs) * probs
# negentropy[probs == 0] = 0
# negentropy = np.sum(negentropy, axis=1)
docids_by_tok = (np.cumsum(self.doc_start_test) - 1)[unfinished_toks]
if self.random_sampling:
new_selected_docs = np.random.choice(np.unique(docids_by_tok), self.batch_size, replace=False)
else:
# Ndocs = np.max(docids_by_tok) + 1
# negentropy_docs = np.zeros(Ndocs, dtype=float)
# count_unseen_toks = np.zeros(Ndocs, dtype=float)
#
# # now sum up the entropy for each doc and normalise by length (otherwise we'll never label the short ones)
# for i, _ in enumerate(unfinished_toks):
# # find doc ID for this tok
# docid = docids_by_tok[i]
#
# negentropy_docs[docid] += negentropy[i]
# count_unseen_toks[docid] += 1
#
# negentropy_docs /= count_unseen_toks
#
# # assume that batch size is less than number of docs...
# new_selected_docs = np.argsort(negentropy_docs)[:batch_size]
new_selected_docs = np.argsort(most_likely_probs)[:self.batch_size]
new_selected_toks = np.in1d(np.cumsum(self.doc_start_test) - 1, new_selected_docs)
new_label_count = np.zeros(self.annos_test.shape[0])
new_label_count[new_selected_toks] += 1
# add to previously selected toks and docs
if self.annos is not None:
# how many labels do we have for these tokens so far?
current_label_count = np.sum(self.annos != -1, axis=1)
# add one for the new round
new_label_count[selected_toks] += current_label_count
selected_docs = np.sort(np.unique(np.concatenate((selected_docs, new_selected_docs)) ))
new_selected_toks[selected_toks] = True
selected_toks = new_selected_toks
else:
selected_toks = new_selected_toks
selected_docs = new_selected_docs
nselected_by_doc = new_label_count[selected_toks]
nselected_by_doc = nselected_by_doc[self.doc_start_test[selected_toks].flatten() == 1]
# find the columns for each token that we should get from the full set of crowdsource annos
mask = np.cumsum(self.annos_test[selected_toks] != -1, axis=1) <= new_label_count[selected_toks][:, None]
# make a new label set
annotations = np.zeros((np.sum(selected_toks), self.annos_test.shape[1])) - 1
# copy in the data from self.annos_test
annotations[mask] = self.annos_test[selected_toks, :][mask]
self.annos = annotations
self.doc_start = self.doc_start_test[selected_toks]
self.text = self.text_test[selected_toks]
return selected_docs, np.argwhere(selected_toks).flatten(), nselected_by_doc
| [
"taggers.lample_lstm_tagger.lstm_wrapper.split_train_to_dev",
"baselines.util.data_to_hmm_crowd_format",
"numpy.prod",
"pandas.read_csv",
"numpy.argsort",
"numpy.array",
"baselines.majority_voting.MajorityVoting",
"baselines.util.crowd_data",
"baselines.dawid_and_skene.ibccvb",
"numpy.arange",
"... | [((603, 643), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (622, 643), False, 'import logging\n'), ((815, 881), 'os.path.join', 'os.path.join', (['outputdir', "('%s_%s.csv' % (filename, file_identifier))"], {}), "(outputdir, '%s_%s.csv' % (filename, file_identifier))\n", (827, 881), False, 'import os, subprocess\n'), ((979, 1003), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (993, 1003), False, 'import os, subprocess\n'), ((1020, 1056), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'delimiter': '""","""'}), "(filename, delimiter=',')\n", (1031, 1056), True, 'import pandas as pd\n'), ((1081, 1116), 'pandas.concat', 'pd.concat', (['(data, new_data)'], {'axis': '(1)'}), '((data, new_data), axis=1)\n', (1090, 1116), True, 'import pandas as pd\n'), ((4657, 4677), 'numpy.random.seed', 'np.random.seed', (['(3849)'], {}), '(3849)\n', (4671, 4677), True, 'import numpy as np\n'), ((5503, 5514), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (5511, 5514), True, 'import numpy as np\n'), ((9772, 9794), 'numpy.sum', 'np.sum', (['self.doc_start'], {}), '(self.doc_start)\n', (9778, 9794), True, 'import numpy as np\n'), ((16501, 16528), 'numpy.zeros', 'np.zeros', (['self.N_unseentoks'], {}), '(self.N_unseentoks)\n', (16509, 16528), True, 'import numpy as np\n'), ((16698, 16722), 'numpy.zeros', 'np.zeros', (['self.N_nocrowd'], {}), '(self.N_nocrowd)\n', (16706, 16722), True, 'import numpy as np\n'), ((16747, 16790), 'numpy.ones', 'np.ones', (['(self.N_nocrowd, self.num_classes)'], {}), '((self.N_nocrowd, self.num_classes))\n', (16754, 16790), True, 'import numpy as np\n'), ((21309, 21336), 'numpy.argmax', 'np.argmax', (['f1scores'], {'axis': '(1)'}), '(f1scores, axis=1)\n', (21318, 21336), True, 'import numpy as np\n'), ((21421, 21469), 'numpy.zeros', 'np.zeros', (['(self.gold.shape[0], self.num_classes)'], {}), '((self.gold.shape[0], self.num_classes))\n', (21429, 21469), True, 'import numpy as np\n'), ((22184, 22211), 'numpy.argmin', 'np.argmin', (['f1scores'], {'axis': '(1)'}), '(f1scores, axis=1)\n', (22193, 22211), True, 'import numpy as np\n'), ((22298, 22346), 'numpy.zeros', 'np.zeros', (['(self.gold.shape[0], self.num_classes)'], {}), '((self.gold.shape[0], self.num_classes))\n', (22306, 22346), True, 'import numpy as np\n'), ((22502, 22562), 'baselines.clustering.Clustering', 'clustering.Clustering', (['self.gold', 'self.annos', 'self.doc_start'], {}), '(self.gold, self.annos, self.doc_start)\n', (22523, 22562), False, 'from baselines import ibcc, clustering, majority_voting\n'), ((22603, 22651), 'numpy.zeros', 'np.zeros', (['(self.gold.shape[0], self.num_classes)'], {}), '((self.gold.shape[0], self.num_classes))\n', (22611, 22651), True, 'import numpy as np\n'), ((22802, 22868), 'baselines.dawid_and_skene.ds', 'ds', (['self.annos', 'self.num_classes', 'self.beta0_factor', 'self.max_iter'], {}), '(self.annos, self.num_classes, self.beta0_factor, self.max_iter)\n', (22804, 22868), False, 'from baselines.dawid_and_skene import ds, ibccvb\n'), ((22883, 22907), 'numpy.argmax', 'np.argmax', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (22892, 22907), True, 'import numpy as np\n'), ((22981, 23113), 'baselines.dawid_and_skene.ibccvb', 'ibccvb', (['self.annos', 'self.num_classes', 'self.beta0_factor', 'self.alpha0_factor', 'self.alpha0_diags', 'self.begin_factor', 'self.max_iter'], {}), '(self.annos, self.num_classes, self.beta0_factor, self.alpha0_factor,\n self.alpha0_diags, self.begin_factor, self.max_iter)\n', (22987, 23113), False, 'from baselines.dawid_and_skene import ds, ibccvb\n'), ((23150, 23174), 'numpy.argmax', 'np.argmax', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (23159, 23174), True, 'import numpy as np\n'), ((24698, 24754), 'os.path.join', 'os.path.join', (['self.outputdir', "('annos_tmp_%s' % timestamp)"], {}), "(self.outputdir, 'annos_tmp_%s' % timestamp)\n", (24710, 24754), False, 'import os, subprocess\n'), ((24778, 24802), 'pandas.DataFrame', 'pd.DataFrame', (['self.annos'], {}), '(self.annos)\n', (24790, 24802), True, 'import pandas as pd\n'), ((25259, 25307), 'numpy.zeros', 'np.zeros', (['(self.gold.shape[0], self.num_classes)'], {}), '((self.gold.shape[0], self.num_classes))\n', (25267, 25307), True, 'import numpy as np\n'), ((25477, 25497), 'os.remove', 'os.remove', (['anno_path'], {}), '(anno_path)\n', (25486, 25497), False, 'import os, subprocess\n'), ((25534, 25553), 'numpy.argmax', 'np.argmax', (['probs', '(1)'], {}), '(probs, 1)\n', (25543, 25553), True, 'import numpy as np\n'), ((27099, 27584), 'bsc.bsc.BSC', 'bsc.BSC', ([], {'L': 'L', 'K': 'self.annos.shape[1]', 'max_iter': 'self.max_iter', 'inside_labels': 'inside_labels', 'outside_label': 'outside_label', 'beginning_labels': 'begin_labels', 'alpha0_diags': 'self.alpha0_diags', 'alpha0_factor': 'self.alpha0_factor', 'alpha0_B_factor': 'self.begin_factor', 'beta0_factor': 'self.beta0_factor', 'worker_model': 'worker_model', 'tagging_scheme': '"""IOB2"""', 'data_model': 'data_model', 'transition_model': 'transition_model', 'no_words': 'no_words', 'use_lowerbound': '(False)', 'model_dir': 'self.outputdir'}), "(L=L, K=self.annos.shape[1], max_iter=self.max_iter, inside_labels=\n inside_labels, outside_label=outside_label, beginning_labels=\n begin_labels, alpha0_diags=self.alpha0_diags, alpha0_factor=self.\n alpha0_factor, alpha0_B_factor=self.begin_factor, beta0_factor=self.\n beta0_factor, worker_model=worker_model, tagging_scheme='IOB2',\n data_model=data_model, transition_model=transition_model, no_words=\n no_words, use_lowerbound=False, model_dir=self.outputdir)\n", (27106, 27584), False, 'from bsc import bsc\n'), ((29088, 29214), 'baselines.util.data_to_hmm_crowd_format', 'data_to_hmm_crowd_format', (['self.annos_all', 'self.text_all', 'self.doc_start_all', 'self.outputdir'], {'overwrite': 'overwrite_data_file'}), '(self.annos_all, self.text_all, self.doc_start_all,\n self.outputdir, overwrite=overwrite_data_file)\n', (29112, 29214), False, 'from baselines.util import crowd_data, data_to_hmm_crowd_format, subset_hmm_crowd_data\n'), ((29273, 29349), 'baselines.util.subset_hmm_crowd_data', 'subset_hmm_crowd_data', (['sentences', 'crowd_labels', 'doc_subset', 'nselected_by_doc'], {}), '(sentences, crowd_labels, doc_subset, nselected_by_doc)\n', (29294, 29349), False, 'from baselines.util import crowd_data, data_to_hmm_crowd_format, subset_hmm_crowd_data\n'), ((29366, 29401), 'baselines.util.crowd_data', 'crowd_data', (['sentences', 'crowd_labels'], {}), '(sentences, crowd_labels)\n', (29376, 29401), False, 'from baselines.util import crowd_data, data_to_hmm_crowd_format, subset_hmm_crowd_data\n'), ((29415, 29584), 'baselines.hmmcrowd.HMM_crowd', 'HMM_crowd', (['self.num_classes', 'nfeats', 'data', 'None', 'None'], {'n_workers': 'self.annos_all.shape[1]', 'vb': '[self.beta0_factor, self.alpha0_factor]', 'smooth': 'self.alpha0_factor'}), '(self.num_classes, nfeats, data, None, None, n_workers=self.\n annos_all.shape[1], vb=[self.beta0_factor, self.alpha0_factor], smooth=\n self.alpha0_factor)\n', (29424, 29584), False, 'from baselines.hmmcrowd import HMM_crowd\n'), ((30186, 30201), 'numpy.array', 'np.array', (['probs'], {}), '(probs)\n', (30194, 30201), True, 'import numpy as np\n'), ((30354, 30386), 'numpy.any', 'np.any', (['(self.annos != -1)'], {'axis': '(1)'}), '(self.annos != -1, axis=1)\n', (30360, 30386), True, 'import numpy as np\n'), ((30591, 30610), 'numpy.random.seed', 'np.random.seed', (['(592)'], {}), '(592)\n', (30605, 30610), True, 'import numpy as np\n'), ((33038, 33083), 'numpy.ones', 'np.ones', (['self.annos_test.shape[0]'], {'dtype': 'bool'}), '(self.annos_test.shape[0], dtype=bool)\n', (33045, 33083), True, 'import numpy as np\n'), ((34849, 34883), 'numpy.zeros', 'np.zeros', (['self.annos_test.shape[0]'], {}), '(self.annos_test.shape[0])\n', (34857, 34883), True, 'import numpy as np\n'), ((3614, 3639), 'os.path.exists', 'os.path.exists', (['outputdir'], {}), '(outputdir)\n', (3628, 3639), False, 'import os, subprocess\n'), ((3657, 3676), 'os.mkdir', 'os.mkdir', (['outputdir'], {}), '(outputdir)\n', (3665, 3676), False, 'import os, subprocess\n'), ((4698, 4729), 'numpy.random.randint', 'np.random.randint', (['(1)', '(1000)', '(100)'], {}), '(1, 1000, 100)\n', (4715, 4729), True, 'import numpy as np\n'), ((5462, 5473), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (5470, 5473), True, 'import numpy as np\n'), ((16596, 16655), 'numpy.ones', 'np.ones', (['(self.N_unseentoks, self.num_classes)'], {'dtype': 'float'}), '((self.N_unseentoks, self.num_classes), dtype=float)\n', (16603, 16655), True, 'import numpy as np\n'), ((20592, 20617), 'numpy.zeros_like', 'np.zeros_like', (['self.annos'], {}), '(self.annos)\n', (20605, 20617), True, 'import numpy as np\n'), ((21684, 21709), 'numpy.zeros_like', 'np.zeros_like', (['self.annos'], {}), '(self.annos)\n', (21697, 21709), True, 'import numpy as np\n'), ((23957, 24090), 'baselines.ibcc.IBCC', 'ibcc.IBCC', ([], {'nclasses': 'self.num_classes', 'nscores': 'self.num_classes', 'nu0': 'self.ibcc_beta0', 'alpha0': 'self.ibcc_alpha0', 'uselowerbound': '(True)'}), '(nclasses=self.num_classes, nscores=self.num_classes, nu0=self.\n ibcc_beta0, alpha0=self.ibcc_alpha0, uselowerbound=True)\n', (23966, 24090), False, 'from baselines import ibcc, clustering, majority_voting\n'), ((25193, 25240), 'os.path.join', 'os.path.join', (['self.outputdir', '"""mace.prediction"""'], {}), "(self.outputdir, 'mace.prediction')\n", (25205, 25240), False, 'import os, subprocess\n'), ((27870, 27889), 'numpy.random.seed', 'np.random.seed', (['(592)'], {}), '(592)\n', (27884, 27889), True, 'import numpy as np\n'), ((29974, 29996), 'numpy.concatenate', 'np.concatenate', (['hc.res'], {}), '(hc.res)\n', (29988, 29996), True, 'import numpy as np\n'), ((30974, 31025), 'taggers.lample_lstm_tagger.lstm_wrapper.split_train_to_dev', 'lstm_wrapper.split_train_to_dev', (['labelled_sentences'], {}), '(labelled_sentences)\n', (31005, 31025), True, 'import taggers.lample_lstm_tagger.lstm_wrapper as lstm_wrapper\n'), ((31170, 31257), 'taggers.lample_lstm_tagger.lstm_wrapper.data_to_lstm_format', 'lstm_wrapper.data_to_lstm_format', (['self.text_val', 'self.doc_start_val', 'self.gold_val'], {}), '(self.text_val, self.doc_start_val, self.\n gold_val)\n', (31202, 31257), True, 'import taggers.lample_lstm_tagger.lstm_wrapper as lstm_wrapper\n'), ((31349, 31405), 'numpy.concatenate', 'np.concatenate', (['(train_sentences, dev_sentences)'], {'axis': '(0)'}), '((train_sentences, dev_sentences), axis=0)\n', (31363, 31405), True, 'import numpy as np\n'), ((31447, 31505), 'os.path.join', 'os.path.join', (['self.outputdir', "('models_LSTM_%s' % timestamp)"], {}), "(self.outputdir, 'models_LSTM_%s' % timestamp)\n", (31459, 31505), False, 'import os, subprocess\n'), ((35119, 35151), 'numpy.sum', 'np.sum', (['(self.annos != -1)'], {'axis': '(1)'}), '(self.annos != -1, axis=1)\n', (35125, 35151), True, 'import numpy as np\n'), ((35835, 35890), 'numpy.cumsum', 'np.cumsum', (['(self.annos_test[selected_toks] != -1)'], {'axis': '(1)'}), '(self.annos_test[selected_toks] != -1, axis=1)\n', (35844, 35890), True, 'import numpy as np\n'), ((10464, 10487), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10485, 10487), False, 'import datetime\n'), ((10872, 10897), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (10886, 10897), True, 'import numpy as np\n'), ((12489, 12512), 'numpy.any', 'np.any', (['(self.gold != -1)'], {}), '(self.gold != -1)\n', (12495, 12512), True, 'import numpy as np\n'), ((20821, 20838), 'numpy.any', 'np.any', (['valididxs'], {}), '(valididxs)\n', (20827, 20838), True, 'import numpy as np\n'), ((21362, 21392), 'numpy.arange', 'np.arange', (['self.annos.shape[0]'], {}), '(self.annos.shape[0])\n', (21371, 21392), True, 'import numpy as np\n'), ((21832, 21849), 'numpy.any', 'np.any', (['valididxs'], {}), '(valididxs)\n', (21838, 21849), True, 'import numpy as np\n'), ((22237, 22267), 'numpy.arange', 'np.arange', (['self.annos.shape[0]'], {}), '(self.annos.shape[0])\n', (22246, 22267), True, 'import numpy as np\n'), ((23282, 23327), 'numpy.ones', 'np.ones', (['(self.num_classes, self.num_classes)'], {}), '((self.num_classes, self.num_classes))\n', (23289, 23327), True, 'import numpy as np\n'), ((23630, 23655), 'numpy.ones', 'np.ones', (['self.num_classes'], {}), '(self.num_classes)\n', (23637, 23655), True, 'import numpy as np\n'), ((25046, 25082), 'os.path.join', 'os.path.join', (['self.outputdir', '"""mace"""'], {}), "(self.outputdir, 'mace')\n", (25058, 25082), False, 'import os, subprocess\n'), ((26833, 26920), 'taggers.lample_lstm_tagger.lstm_wrapper.data_to_lstm_format', 'lstm_wrapper.data_to_lstm_format', (['self.text_val', 'self.doc_start_val', 'self.gold_val'], {}), '(self.text_val, self.doc_start_val, self.\n gold_val)\n', (26865, 26920), True, 'import taggers.lample_lstm_tagger.lstm_wrapper as lstm_wrapper\n'), ((32255, 32278), 'numpy.ones', 'np.ones', (['self.N_nocrowd'], {}), '(self.N_nocrowd)\n', (32262, 32278), True, 'import numpy as np\n'), ((32655, 32672), 'numpy.ones', 'np.ones', (['N_unseen'], {}), '(N_unseen)\n', (32662, 32672), True, 'import numpy as np\n'), ((33647, 33677), 'numpy.cumsum', 'np.cumsum', (['self.doc_start_test'], {}), '(self.doc_start_test)\n', (33656, 33677), True, 'import numpy as np\n'), ((33783, 33807), 'numpy.unique', 'np.unique', (['docids_by_tok'], {}), '(docids_by_tok)\n', (33792, 33807), True, 'import numpy as np\n'), ((34681, 34710), 'numpy.argsort', 'np.argsort', (['most_likely_probs'], {}), '(most_likely_probs)\n', (34691, 34710), True, 'import numpy as np\n'), ((34767, 34797), 'numpy.cumsum', 'np.cumsum', (['self.doc_start_test'], {}), '(self.doc_start_test)\n', (34776, 34797), True, 'import numpy as np\n'), ((10819, 10854), 'numpy.ceil', 'np.ceil', (['(AL_batch_fraction * Nannos)'], {}), '(AL_batch_fraction * Nannos)\n', (10826, 10854), True, 'import numpy as np\n'), ((11589, 11605), 'numpy.arange', 'np.arange', (['Ndocs'], {}), '(Ndocs)\n', (11598, 11605), True, 'import numpy as np\n'), ((14935, 14968), 'pickle.dump', 'pickle.dump', (['probs_allmethods', 'fh'], {}), '(probs_allmethods, fh)\n', (14946, 14968), False, 'import pickle\n'), ((23487, 23512), 'numpy.ones', 'np.ones', (['self.num_classes'], {}), '(self.num_classes)\n', (23494, 23512), True, 'import numpy as np\n'), ((23756, 23801), 'numpy.ones', 'np.ones', (['(self.num_classes, self.num_classes)'], {}), '((self.num_classes, self.num_classes))\n', (23763, 23801), True, 'import numpy as np\n'), ((23913, 23937), 'numpy.eye', 'np.eye', (['self.num_classes'], {}), '(self.num_classes)\n', (23919, 23937), True, 'import numpy as np\n'), ((33166, 33218), 'numpy.sum', 'np.sum', (['(self.annos_test[selected_toks] != -1)'], {'axis': '(1)'}), '(self.annos_test[selected_toks] != -1, axis=1)\n', (33172, 33218), True, 'import numpy as np\n'), ((33221, 33253), 'numpy.sum', 'np.sum', (['(self.annos != -1)'], {'axis': '(1)'}), '(self.annos != -1, axis=1)\n', (33227, 33253), True, 'import numpy as np\n'), ((33294, 33322), 'numpy.argwhere', 'np.argwhere', (['unfinished_toks'], {}), '(unfinished_toks)\n', (33305, 33322), True, 'import numpy as np\n'), ((35306, 35356), 'numpy.concatenate', 'np.concatenate', (['(selected_docs, new_selected_docs)'], {}), '((selected_docs, new_selected_docs))\n', (35320, 35356), True, 'import numpy as np\n'), ((35998, 36019), 'numpy.sum', 'np.sum', (['selected_toks'], {}), '(selected_toks)\n', (36004, 36019), True, 'import numpy as np\n'), ((36344, 36370), 'numpy.argwhere', 'np.argwhere', (['selected_toks'], {}), '(selected_toks)\n', (36355, 36370), True, 'import numpy as np\n'), ((11107, 11134), 'numpy.ones', 'np.ones', (['Ndocs'], {'dtype': 'float'}), '(Ndocs, dtype=float)\n', (11114, 11134), True, 'import numpy as np\n'), ((14846, 14900), 'os.path.join', 'os.path.join', (['self.outputdir', "('probs_%s.pkl' % file_id)"], {}), "(self.outputdir, 'probs_%s.pkl' % file_id)\n", (14858, 14900), False, 'import os, subprocess\n'), ((15140, 15181), 'pickle.dump', 'pickle.dump', (['probs_allmethods_nocrowd', 'fh'], {}), '(probs_allmethods_nocrowd, fh)\n', (15151, 15181), False, 'import pickle\n'), ((26434, 26454), 'numpy.arange', 'np.arange', (['num_types'], {}), '(num_types)\n', (26443, 26454), True, 'import numpy as np\n'), ((26529, 26549), 'numpy.arange', 'np.arange', (['num_types'], {}), '(num_types)\n', (26538, 26549), True, 'import numpy as np\n'), ((11662, 11697), 'numpy.in1d', 'np.in1d', (['unseen_docs', 'selected_docs'], {}), '(unseen_docs, selected_docs)\n', (11669, 11697), True, 'import numpy as np\n'), ((15039, 15101), 'os.path.join', 'os.path.join', (['self.outputdir', "('probs_nocrowd_%s.pkl' % file_id)"], {}), "(self.outputdir, 'probs_nocrowd_%s.pkl' % file_id)\n", (15051, 15101), False, 'import os, subprocess\n'), ((15650, 15667), 'numpy.prod', 'np.prod', (['seq_prob'], {}), '(seq_prob)\n', (15657, 15667), True, 'import numpy as np\n'), ((21087, 21118), 'numpy.unique', 'np.unique', (['self.gold[valididxs]'], {}), '(self.gold[valididxs])\n', (21096, 21118), True, 'import numpy as np\n'), ((22116, 22147), 'numpy.unique', 'np.unique', (['self.gold[valididxs]'], {}), '(self.gold[valididxs])\n', (22125, 22147), True, 'import numpy as np\n'), ((17156, 17216), 'baselines.majority_voting.MajorityVoting', 'majority_voting.MajorityVoting', (['self.annos', 'self.num_classes'], {}), '(self.annos, self.num_classes)\n', (17186, 17216), False, 'from baselines import ibcc, clustering, majority_voting\n'), ((11759, 11789), 'numpy.cumsum', 'np.cumsum', (['self.doc_start_test'], {}), '(self.doc_start_test)\n', (11768, 11789), True, 'import numpy as np\n')] |
import sys
sys.path.insert(0, './condiciones_iniciales')
import initial_cond_cover as icc
import fun_ode as ode
from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random # puede eliminarse cuando se tengan los valores "reales" de las tasas de cambio de coberturas
data0_cover = sorted(icc.initial_cover())
# initial conditions for soil covers
x0_cover = [row[1] for row in data0_cover]
name_cover = [row[0] for row in data0_cover]
nc = len(x0_cover)
cover_rates = [[None for x0_cober in range(nc)] for x0_cover in range(nc)]
# generates a matrix with the values of the coverage transformation rates
# for the case i = j, the rate is one - a cover does not transform itself
for i in range(nc):
for j in range(nc):
if i == j:
cover_rates[i][j] = 0
else:
cover_rates[i][j] = random.uniform(0.01,0.02)
# INTEGRATOR CONFIGURATION
tmin = 2020
tmax = 2030
# time = [tmin, tmax]
time = np.arange(tmin, tmax, 1)
x0_cover = np.array(x0_cover)
cover_rates = np.array(cover_rates)
cover_rates_t = cover_rates.transpose()
nx0 = len(x0_cover)
# Integration by RK45
Ys = odeint(ode.differential_equations, x0_cover, time, args=(cover_rates, cover_rates_t))
Yt = np.sum(Ys,axis=1) # axis=1 --> add rows, axis=0 --> add columns
# Export time series as .csv file
cover_time_series = pd.DataFrame(Ys, columns=name_cover)
cover_time_series.to_csv('./outputs/cover_time_series.csv', float_format='%.2f')
# OPTIONAL - PLOT TIME SERIES
# for i in range(nx0):
# j = i + 1
# plt.plot(time, Ys[:, i], label=name_cover[i])
# plt.scatter(time, Ys[:, i])
# plt.plot(time, Yt, label='Área total')
# plt.legend(loc='best')
# plt.xlabel('tiempo')
# plt.grid()
# plt.show() | [
"random.uniform",
"sys.path.insert",
"scipy.integrate.odeint",
"numpy.array",
"numpy.sum",
"initial_cond_cover.initial_cover",
"pandas.DataFrame",
"numpy.arange"
] | [((12, 57), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./condiciones_iniciales"""'], {}), "(0, './condiciones_iniciales')\n", (27, 57), False, 'import sys\n'), ((992, 1016), 'numpy.arange', 'np.arange', (['tmin', 'tmax', '(1)'], {}), '(tmin, tmax, 1)\n', (1001, 1016), True, 'import numpy as np\n'), ((1029, 1047), 'numpy.array', 'np.array', (['x0_cover'], {}), '(x0_cover)\n', (1037, 1047), True, 'import numpy as np\n'), ((1062, 1083), 'numpy.array', 'np.array', (['cover_rates'], {}), '(cover_rates)\n', (1070, 1083), True, 'import numpy as np\n'), ((1172, 1261), 'scipy.integrate.odeint', 'odeint', (['ode.differential_equations', 'x0_cover', 'time'], {'args': '(cover_rates, cover_rates_t)'}), '(ode.differential_equations, x0_cover, time, args=(cover_rates,\n cover_rates_t))\n', (1178, 1261), False, 'from scipy.integrate import odeint\n'), ((1263, 1281), 'numpy.sum', 'np.sum', (['Ys'], {'axis': '(1)'}), '(Ys, axis=1)\n', (1269, 1281), True, 'import numpy as np\n'), ((1382, 1418), 'pandas.DataFrame', 'pd.DataFrame', (['Ys'], {'columns': 'name_cover'}), '(Ys, columns=name_cover)\n', (1394, 1418), True, 'import pandas as pd\n'), ((351, 370), 'initial_cond_cover.initial_cover', 'icc.initial_cover', ([], {}), '()\n', (368, 370), True, 'import initial_cond_cover as icc\n'), ((884, 910), 'random.uniform', 'random.uniform', (['(0.01)', '(0.02)'], {}), '(0.01, 0.02)\n', (898, 910), False, 'import random\n')] |
"""
INF 552 Homework 5
Part 1
Group Members: <NAME> (zhan198), <NAME> (minyihua), <NAME> (jeffyjac)
Date: 3/27/2018
Programming Language: Python 3.6
"""
import numpy as np
import imageio
N_FEATURES = 30 * 32
N_HIDDEN_SIZE = 100
weight_random_low = -1
weight_random_high = 1
TRAINING_SIZE = 184
TESTING_SIZE = 83
EPOCHS = 1000
LR = 1 # learning rate
train_size = 184
test_size = 83
epochs = 1000
TRAINING_LIST_NAME = 'downgesture_train.list'
TESTING_LIST_NAME = 'downgesture_test.list'
def getXandY(filename, sample_size):
with open(filename) as file:
training_list = file.read().splitlines()
# print(training_list)
training_set_size = len(training_list)
X = np.empty((0, N_FEATURES), float)
for sample in training_list[:sample_size]:
im = imageio.imread(sample) / 255.0
X = np.vstack((X, im.flatten()))
Y = np.zeros((training_set_size, 1))
for i in range(training_set_size):
if "down" in training_list[i]:
Y[i] = 1
Y = Y[:sample_size]
return X, Y
class NNClassifier:
def __init__(self, epochs=epochs, n_hidden_size=N_HIDDEN_SIZE, learning_rate=LR):
self.epochs = epochs
self.n_hidden_size = n_hidden_size
self.n_features = N_FEATURES
self.LR = learning_rate
self.w1, self.w2 = self._init_weights()
self.X_biased = self.S1 = self.act1 = self.S2 = self.act2= np.array([])
self.grad1 = self.grad2 = self.delta1 = self.delta2 = np.array([])
def _init_weights(self):
w1 = np.random.uniform(weight_random_low, weight_random_high,
size=self.n_hidden_size * (self.n_features + 1))
w1 = w1.reshape(self.n_features + 1, self.n_hidden_size)
w2 = np.random.uniform(weight_random_low, weight_random_high,
size=1 * (self.n_hidden_size + 1))
w2 = w2.reshape(self.n_hidden_size + 1, 1)
return w1, w2
def sigmod(self, s):
return 1.0 / (1.0 + np.exp(-s))
def sigmod_derivative(self, sig):
# return 1.0 - sig ** 2
return sig * (1.0 - sig)
def add_bias(self, X):
intercept = np.ones((X.shape[0], 1))
X_new = np.hstack((intercept, X))
return X_new
def forward(self, X):
self.X_biased = self.add_bias(X)
self.S1 = np.dot(self.X_biased, self.w1)
self.act1 = self.sigmod(self.S1)
self.act1 = self.add_bias(self.act1)
self.S2 = np.dot(self.act1, self.w2)
self.act2 = self.sigmod(self.S2)
def backprop(self, Y):
self.delta2 = (self.act2 - Y) * self.sigmod_derivative(self.act2) * 2
self.delta1 = self.sigmod_derivative(self.act1) * np.dot(self.delta2, self.w2.T)
self.delta1 = self.delta1[:, 1:]
grad2 = np.dot(self.act1.T, self.delta2) / self.delta2.shape[0]
self.w2 -= self.LR * grad2
grad1 = np.dot(self.X_biased.T, self.delta1) / self.delta1.shape[0]
self.w1 -= self.LR * grad1
def fit(self, X, Y):
for i in range(self.epochs):
self.forward(X)
self.backprop(Y)
if (i%100 == 0 ) and (i!= 0):
error = np.mean((self.act2 - Y)**2)
# print("error of {i} is {error}".format(i = i, error= error))
return self
def predict_probab(self, X):
X_biased = self.add_bias(X)
S1 = np.dot(X_biased, self.w1)
act1 = self.sigmod(S1)
act1 = self.add_bias(act1)
S2 = np.dot(act1, self.w2)
act2 = self.sigmod(S2)
probability = act2
return probability
def predict_hard(self, X):
probability = self.predict_probab(X)
prediction = np.round(probability)
return prediction
def score(self, X, Y):
prediction = self.predict_hard(X)
print("\nTrue Labels\n{a}".format(a=Y.ravel()))
print("\nPredict Labels\n{a}".format(a=prediction.ravel()))
accuracy = (prediction == Y).sum().astype(float) / len(Y)
return accuracy
X_train, Y_train = getXandY(TRAINING_LIST_NAME, train_size)
X_test, Y_test = getXandY(TESTING_LIST_NAME, test_size)
nn = NNClassifier()
nn.fit(X_train, Y_train)
# print("\nScore on training set:\n{a}".format(a=nn.score(X_train, Y_train)))
print("\nScore\n{a}".format(a=nn.score(X_test, Y_test)))
| [
"numpy.mean",
"numpy.ones",
"numpy.hstack",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.empty",
"numpy.random.uniform",
"imageio.imread",
"numpy.round"
] | [((699, 731), 'numpy.empty', 'np.empty', (['(0, N_FEATURES)', 'float'], {}), '((0, N_FEATURES), float)\n', (707, 731), True, 'import numpy as np\n'), ((874, 906), 'numpy.zeros', 'np.zeros', (['(training_set_size, 1)'], {}), '((training_set_size, 1))\n', (882, 906), True, 'import numpy as np\n'), ((1410, 1422), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1418, 1422), True, 'import numpy as np\n'), ((1485, 1497), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1493, 1497), True, 'import numpy as np\n'), ((1541, 1651), 'numpy.random.uniform', 'np.random.uniform', (['weight_random_low', 'weight_random_high'], {'size': '(self.n_hidden_size * (self.n_features + 1))'}), '(weight_random_low, weight_random_high, size=self.\n n_hidden_size * (self.n_features + 1))\n', (1558, 1651), True, 'import numpy as np\n'), ((1756, 1852), 'numpy.random.uniform', 'np.random.uniform', (['weight_random_low', 'weight_random_high'], {'size': '(1 * (self.n_hidden_size + 1))'}), '(weight_random_low, weight_random_high, size=1 * (self.\n n_hidden_size + 1))\n', (1773, 1852), True, 'import numpy as np\n'), ((2170, 2194), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (2177, 2194), True, 'import numpy as np\n'), ((2211, 2236), 'numpy.hstack', 'np.hstack', (['(intercept, X)'], {}), '((intercept, X))\n', (2220, 2236), True, 'import numpy as np\n'), ((2344, 2374), 'numpy.dot', 'np.dot', (['self.X_biased', 'self.w1'], {}), '(self.X_biased, self.w1)\n', (2350, 2374), True, 'import numpy as np\n'), ((2479, 2505), 'numpy.dot', 'np.dot', (['self.act1', 'self.w2'], {}), '(self.act1, self.w2)\n', (2485, 2505), True, 'import numpy as np\n'), ((3397, 3422), 'numpy.dot', 'np.dot', (['X_biased', 'self.w1'], {}), '(X_biased, self.w1)\n', (3403, 3422), True, 'import numpy as np\n'), ((3502, 3523), 'numpy.dot', 'np.dot', (['act1', 'self.w2'], {}), '(act1, self.w2)\n', (3508, 3523), True, 'import numpy as np\n'), ((3707, 3728), 'numpy.round', 'np.round', (['probability'], {}), '(probability)\n', (3715, 3728), True, 'import numpy as np\n'), ((793, 815), 'imageio.imread', 'imageio.imread', (['sample'], {}), '(sample)\n', (807, 815), False, 'import imageio\n'), ((2711, 2741), 'numpy.dot', 'np.dot', (['self.delta2', 'self.w2.T'], {}), '(self.delta2, self.w2.T)\n', (2717, 2741), True, 'import numpy as np\n'), ((2799, 2831), 'numpy.dot', 'np.dot', (['self.act1.T', 'self.delta2'], {}), '(self.act1.T, self.delta2)\n', (2805, 2831), True, 'import numpy as np\n'), ((2906, 2942), 'numpy.dot', 'np.dot', (['self.X_biased.T', 'self.delta1'], {}), '(self.X_biased.T, self.delta1)\n', (2912, 2942), True, 'import numpy as np\n'), ((2006, 2016), 'numpy.exp', 'np.exp', (['(-s)'], {}), '(-s)\n', (2012, 2016), True, 'import numpy as np\n'), ((3187, 3216), 'numpy.mean', 'np.mean', (['((self.act2 - Y) ** 2)'], {}), '((self.act2 - Y) ** 2)\n', (3194, 3216), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
from numpy import pi
from ..Contour import Contour
from ..Paths import ComplexArc
from .Annulus import Annulus
class Circle(Contour):
"""
A positively oriented circle in the complex plane.
Parameters
----------
center : complex
The center of the circle.
radius : float
The radius of the circle.
Examples
--------
.. plot::
:include-source:
from cxroots import Circle
circle = Circle(center=1, radius=0.5)
circle.show()
"""
def __init__(self, center, radius):
self.center = center
self.radius = radius
self.axisName = ('r')
segments = [ComplexArc(center, radius, 0, 2*pi)]
super(Circle, self).__init__(segments)
def __str__(self):
return 'Circle: center={center.real:.3f}{center.imag:+.3f}i, radius={radius:.3f}'.format(center=self.center, radius=self.radius)
def contains(self, z):
""" Returns True if the point z lies within the contour, False if otherwise """
return abs(z - self.center) < self.radius
@property
def centralPoint(self):
return self.center
@property
def area(self):
return pi*self.radius**2
def subdivide(self, axis='r', divisionFactor=0.5):
"""
Subdivide the contour
Parameters
----------
axis : str, can only be 'r' (argument kept for consistency with 'subdivisions' method in parent Contour class)
The axis along which the line subdividing the contour is a constant.
divisionFactor : float in range (0,1), optional
Determines the point along 'axis' at which the line dividing the box is placed
Returns
-------
box1 : Annulus
With inner radius determined by the divisionFactor and outer radius equal to that of the original circle
box2 : Circle
With radius equal to the inner radius of box1
"""
if axis == 'r' or self.axisName[axis] == 'r':
box1 = Annulus(self.center, [self.radius*divisionFactor, self.radius])
box2 = Circle(self.center, self.radius*divisionFactor)
box1.segments[0] = self.segments[0]
box1.segments[1]._reversePath = box2.segments[0]
box2.segments[0]._reversePath = box1.segments[1]
for box in [box1, box2]:
box._createdBySubdivisionAxis = axis
box._parentBox = self
self._childBoxes = [box1, box2]
return box1, box2
def randomPoint(self):
""" Returns a random point inside the Circle """
r = np.random.uniform(0,self.radius)
phi = np.random.uniform(0,2*pi)
return r*exp(1j*phi) + self.center
| [
"numpy.random.uniform"
] | [((2318, 2351), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self.radius'], {}), '(0, self.radius)\n', (2335, 2351), True, 'import numpy as np\n'), ((2359, 2387), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * pi)'], {}), '(0, 2 * pi)\n', (2376, 2387), True, 'import numpy as np\n')] |
"""
This module computes some 'importance' of words for a corpus with the tdf-idf-measure as a whole document.
>>> test = tdfidf('This seems to be a document about Abraham. this seems to be another document Abraham. that seems not to be a document about Abraham. An this seems to be just shit Abrahamn Obrobobom Abraham Abraham.')
>>> print (test.get_vector('this'))
0.29559878344928797
>>> print (test.importance_of_word('this'))
0.7044012165507121
>>> print (test.sentence2vec(["this","seems","not"]))
[0.70440122 0.60586829 0.90146707]
"""
import numpy as np
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
class tdfidf:
def __init__(self, corpus_lemmata):
from sklearn.feature_extraction.text import TfidfTransformer
transformer = TfidfTransformer(smooth_idf=False)
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
X = vectorizer.fit_transform([corpus_lemmata])
counts = X.toarray()
self.tdf_idf = transformer.fit_transform(counts)
self.tdf_idf_dict = {v:i for i, v in enumerate(vectorizer.get_feature_names())}
def get_vector(self, word):
return self.tdf_idf[0,self.tdf_idf_dict[word]]
def importance_of_word(self, word):
try:
return 1- self.tdf_idf[0,self.tdf_idf_dict[word]]
except KeyError:
# if not (word.isdigit()) and (word not in ['.',',','?','!',':',';', '-PRON-', 'a']):
# logging.warning ("'%s' not in tdfidf-vocabulary." % word)
return 0.2
def sentence2vec(self, str_list):
return np.array([self.importance_of_word(w) for w in str_list]) ** 3
def sentence2relevantwords(self, str_list, min_thr, max_thr):
return [w
for w in str_list
if self.importance_of_word(w) > min_thr
and self.importance_of_word(w) < max_thr]
def half_importance(self, str_list):
return np.array([0.5 for w in str_list])
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"logging.NullHandler",
"sklearn.feature_extraction.text.TfidfTransformer",
"logging.getLogger",
"sklearn.feature_extraction.text.CountVectorizer",
"numpy.array",
"doctest.testmod"
] | [((623, 644), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (642, 644), False, 'import logging\n'), ((2086, 2103), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (2101, 2103), False, 'import doctest\n'), ((584, 611), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (601, 611), False, 'import logging\n'), ((792, 826), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {'smooth_idf': '(False)'}), '(smooth_idf=False)\n', (808, 826), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((916, 933), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (931, 933), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((1999, 2034), 'numpy.array', 'np.array', (['[(0.5) for w in str_list]'], {}), '([(0.5) for w in str_list])\n', (2007, 2034), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
"""
Synthesizes the results of fits into a single file per harmonic.
"""
import re
import os
import math
import numpy as np
import cycle
import sys
if len(sys.argv)>1:
cycidf = sys.argv[1]
else:
cycidf = cycle.select() # cycle identifier
cycdir = cycle.directory(cycidf) # cycle directory
groups = cycle.groups(cycidf, 'fits') # groups
def load(impstm):
"""Load data from fit from a sample."""
with open(impstm, 'r') as f:
f.readline()
dst = eval(f.readline())
for i in range(4):
f.readline()
dat = np.loadtxt(f).T
res = {
'j': dat[0],
'L': dat[1],
'd': dat[3],
'R': dat[4],
'D': dst,
}
if len(dat) > 5:
res['f'] = dat[5]
return res
def analyze(dat, j):
"""Analyze a model."""
msk = dat['j']==j
d = dat['d'][msk]
R = dat['R'][msk]
res = {
'avg-d': np.abs(np.mean(d)*1e18-dat['D'])/dat['D'],
'avg-R': np.mean(R),
'std-d': np.std(d)*1e18/dat['D'],
'std-R': np.std(R),
}
if 'f' in dat:
f = dat['f'][msk]
res['avg-f'] = np.mean(f)
res['std-f'] = np.std(f)
return res
def sample(impstm, j):
"""Analyze a sample."""
res = {}
for mod in ('GUW1', 'GUW2', 'W1', 'W2'):
pth = os.path.join(impstm, f"fits_data_{mod}.dat")
dat = load(pth)
res[mod] = analyze(dat, j)
return res
def adjust(tab):
"""Make the items of a column the same width."""
for j in range(len(tab[0])):
w = max([len(tab[i][j]) for i in range(len(tab))])
for i in range(len(tab)):
tab[i][j] = format(tab[i][j], f'>{w}')
for group in groups:
expdir = os.path.join(cycdir, f"synthesis_{group}")
if not os.path.isdir(expdir):
os.makedirs(expdir)
for j in (1, 2):
lines = {'avg':[], 'std':[]}
for mtd in lines:
lines[mtd].append([
f'distribution',
f'GUW1-{mtd}-f',
f'GUW1-{mtd}-d',
f'GUW2-{mtd}-d',
f'W1-{mtd}-d',
f'W2-{mtd}-d',
f'GUW1-{mtd}-R',
f'GUW2-{mtd}-R',
f'W1-{mtd}-R',
f'W2-{mtd}-R',
])
pth0 = os.path.join(cycdir, f"fits_{group}")
for smp in os.listdir(pth0):
pth1 = os.path.join(pth0, smp)
res = sample(pth1, j)
for mtd in lines:
lines[mtd].append([
smp,
f"{res['GUW1'][f'{mtd}-f']:12.7e}",
f"{res['GUW1'][f'{mtd}-d']:12.7e}",
f"{res['GUW2'][f'{mtd}-d']:12.7e}",
f"{res['W1'][f'{mtd}-d']:12.7e}",
f"{res['W2'][f'{mtd}-d']:12.7e}",
f"{res['GUW1'][f'{mtd}-R']:12.7e}",
f"{res['GUW2'][f'{mtd}-d']:12.7e}",
f"{res['W1'][f'{mtd}-R']:12.7e}",
f"{res['W2'][f'{mtd}-R']:12.7e}",
])
for mtd in lines:
adjust(lines[mtd])
with open(os.path.join(expdir, f"{mtd}_j{j}.csv"), "w") as f:
for l in lines[mtd]:
f.write("; ".join(l)+"\n")
| [
"numpy.mean",
"os.listdir",
"os.makedirs",
"cycle.groups",
"os.path.join",
"os.path.isdir",
"numpy.std",
"cycle.directory",
"numpy.loadtxt",
"cycle.select"
] | [((296, 319), 'cycle.directory', 'cycle.directory', (['cycidf'], {}), '(cycidf)\n', (311, 319), False, 'import cycle\n'), ((347, 375), 'cycle.groups', 'cycle.groups', (['cycidf', '"""fits"""'], {}), "(cycidf, 'fits')\n", (359, 375), False, 'import cycle\n'), ((253, 267), 'cycle.select', 'cycle.select', ([], {}), '()\n', (265, 267), False, 'import cycle\n'), ((1744, 1786), 'os.path.join', 'os.path.join', (['cycdir', 'f"""synthesis_{group}"""'], {}), "(cycdir, f'synthesis_{group}')\n", (1756, 1786), False, 'import os\n'), ((1003, 1013), 'numpy.mean', 'np.mean', (['R'], {}), '(R)\n', (1010, 1013), True, 'import numpy as np\n'), ((1074, 1083), 'numpy.std', 'np.std', (['R'], {}), '(R)\n', (1080, 1083), True, 'import numpy as np\n'), ((1159, 1169), 'numpy.mean', 'np.mean', (['f'], {}), '(f)\n', (1166, 1169), True, 'import numpy as np\n'), ((1193, 1202), 'numpy.std', 'np.std', (['f'], {}), '(f)\n', (1199, 1202), True, 'import numpy as np\n'), ((1342, 1386), 'os.path.join', 'os.path.join', (['impstm', 'f"""fits_data_{mod}.dat"""'], {}), "(impstm, f'fits_data_{mod}.dat')\n", (1354, 1386), False, 'import os\n'), ((1798, 1819), 'os.path.isdir', 'os.path.isdir', (['expdir'], {}), '(expdir)\n', (1811, 1819), False, 'import os\n'), ((1829, 1848), 'os.makedirs', 'os.makedirs', (['expdir'], {}), '(expdir)\n', (1840, 1848), False, 'import os\n'), ((2321, 2358), 'os.path.join', 'os.path.join', (['cycdir', 'f"""fits_{group}"""'], {}), "(cycdir, f'fits_{group}')\n", (2333, 2358), False, 'import os\n'), ((2378, 2394), 'os.listdir', 'os.listdir', (['pth0'], {}), '(pth0)\n', (2388, 2394), False, 'import os\n'), ((601, 614), 'numpy.loadtxt', 'np.loadtxt', (['f'], {}), '(f)\n', (611, 614), True, 'import numpy as np\n'), ((2415, 2438), 'os.path.join', 'os.path.join', (['pth0', 'smp'], {}), '(pth0, smp)\n', (2427, 2438), False, 'import os\n'), ((1032, 1041), 'numpy.std', 'np.std', (['d'], {}), '(d)\n', (1038, 1041), True, 'import numpy as np\n'), ((3159, 3198), 'os.path.join', 'os.path.join', (['expdir', 'f"""{mtd}_j{j}.csv"""'], {}), "(expdir, f'{mtd}_j{j}.csv')\n", (3171, 3198), False, 'import os\n'), ((950, 960), 'numpy.mean', 'np.mean', (['d'], {}), '(d)\n', (957, 960), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import tensorflow as tf
import numpy as np
import os
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
def sample_z(mu, log_var):
eps = tf.random_normal(shape=tf.shape(mu))
res = mu + tf.exp(log_var/2)*eps
return res
def compute_kernel(x, y):
x_size = tf.shape(x)[0]
y_size = tf.shape(y)[0]
dim = tf.shape(x)[1]
tiled_x = tf.tile(tf.reshape(x, tf.stack([x_size, 1, dim])), tf.stack([1, y_size, 1]))
tiled_y = tf.tile(tf.reshape(y, tf.stack([1, y_size, dim])), tf.stack([x_size, 1, 1]))
return tf.exp(-tf.reduce_mean(tf.square(tiled_x - tiled_y), axis=2) / tf.cast(dim, tf.float32))
def compute_mmd(x, y, sigma_sqr=1.0):
x_kernel = compute_kernel(x, x)
y_kernel = compute_kernel(y, y)
xy_kernel = compute_kernel(x, y)
return tf.reduce_mean(x_kernel) + tf.reduce_mean(y_kernel) - 2 * tf.reduce_mean(xy_kernel)
def get_z_sample( batch_size, z_dim ):
return tf.random_normal(tf.stack([batch_size, z_dim]))
class AE_Stochastic_Mnist( object ):
def __init__(self, opts):
self.sess = tf.Session()
self.opts = opts
self.init()
self.saver = tf.train.Saver( max_to_keep = 100 )
def init( self ):
self.add_input_placeholder()
self.construct_clf()
self.construct_loss()
self.sess.run( [ tf.global_variables_initializer() ] )
os.mkdir( self.opts.cpt_path ) if not os.path.exists( self.opts.cpt_path ) else print()
def add_input_placeholder( self ):
opts= self.opts
with tf.variable_scope( "nn" ) as scope:
self.in_sample = tf.placeholder( tf.float32, [ None ] + opts.sample_shape )
self.in_label = tf.placeholder( tf.float32, [ None ] + opts.label_shape )
def construct_clf( self ):
with tf.variable_scope( "gating" ) as scope:
self.lc0 = tf.layers.dense( self.in_sample, 784, kernel_initializer=tf.random_normal_initializer, activation = tf.nn.sigmoid, name = "gating_0" )
self.l = self.in_sample #* self.lc0
with tf.variable_scope( "nn" ) as scope:
self.l0g0 = tf.layers.dense( self.l, 256, kernel_initializer=tf.random_normal_initializer, activation = tf.nn.relu, name = "dense_1" )
with tf.variable_scope( "gating" ) as scope:
self.l0c0 = tf.layers.dense( self.l, 256, kernel_initializer=tf.random_normal_initializer, activation = tf.nn.sigmoid, name = "gating_1" )
self.l0 = self.l0g0 * self.l0c0
with tf.variable_scope( "nn" ) as scope:
self.l1g0 = tf.layers.dense( self.l0, 256, kernel_initializer=tf.random_normal_initializer, activation = tf.nn.relu, name = "dense_2" )
with tf.variable_scope( "gating" ) as scope:
self.l1c0 = tf.layers.dense( self.l0, 256, kernel_initializer=tf.random_normal_initializer, activation = tf.nn.sigmoid, name = "gating_2" )
self.l1 = self.l1g0 * self.l1c0
with tf.variable_scope( "nn" ) as scope:
self.logit = tf.layers.dense( self.l1, 10, name = "dense_out" )
self.prediction = tf.nn.softmax( self.logit )
with tf.variable_scope( "decode" ) as scope:
self.mu = tf.layers.dense( self.l1, 100, kernel_initializer=tf.random_normal_initializer, activation = None, name = "mu" )
self.logvar = tf.layers.dense( self.l1, 100, kernel_initializer=tf.random_normal_initializer, activation = tf.nn.softplus, name = "logvar" )
self.z = sample_z( self.mu, self.logvar )
self.l2 = tf.layers.dense( self.l1, 256, kernel_initializer=tf.random_normal_initializer, activation = tf.nn.relu, name = "dense_3" )
self.l3 = tf.layers.dense( self.l2, 256, kernel_initializer=tf.random_normal_initializer, activation = tf.nn.relu, name = "dense_4" )
self.l4 = self.l2 = tf.layers.dense( self.l3, 784, kernel_initializer=tf.random_normal_initializer, activation = tf.nn.tanh, name = "dense_5" )
def construct_loss( self ):
self.classifier_loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2( logits = self.logit, labels = self.in_label ) )
self.nn_loss = self.classifier_loss # + tf.reduce_mean( tf.abs( self.l0g ) ) + tf.reduce_mean( tf.abs( self.l1g ) ) + tf.reduce_mean( tf.abs( self.logit ) )
self.gate_loss = self.classifier_loss + ( tf.reduce_mean( self.l0c0 ) + tf.reduce_mean( self.l1c0 ) )
self.elbo = 0.5 * tf.reduce_sum(tf.exp(self.logvar) + self.mu**2 - 1. - self.logvar, 1)
self.mmd = compute_mmd(get_z_sample( self.opts.batch_size, 100 ), self.z)
self.recon = tf.reduce_sum( tf.square( self.in_sample - self.l4 ), axis = 1 )
self.vae_loss = tf.reduce_mean( self.recon)
#self.loss = tf.reduce_mean( tf.square( self.in_label - self.prediction ) )
self.optim_nn = tf.train.AdamOptimizer( self.opts.lr, beta1 = 0.9, beta2 = 0.99 ).minimize( loss = self.nn_loss, var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='nn') )
self.optim_gate = tf.train.AdamOptimizer( self.opts.lr, beta1 = 0.9, beta2 = 0.99 ).minimize( loss = self.gate_loss, var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='gating') )
# self.optim_vae = tf.train.AdamOptimizer( self.opts.lr, beta1 = 0.9, beta2 = 0.99 ).minimize( loss = self.vae_loss, var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='nn') + \
# tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='decode') )
# self.optim = tf.train.GradientDescentOptimizer( self.opts.lr ).minimize( loss = self.loss )
def train( self ):
self.loss_list = []
self.accu_list = []
for i in range( 0, self.opts.train_iter + 1 ):
in_sample, in_label = self.opts.data_source.next_batch()
# print(in_label)
# self.sess.run( self.optim_vae, feed_dict = { self.in_sample : in_sample, self.in_label: in_label } )
self.sess.run( self.optim_nn, feed_dict = { self.in_sample : in_sample, self.in_label: in_label } )
self.sess.run( self.optim_gate, feed_dict = { self.in_sample : in_sample, self.in_label: in_label } )
if i % 100 == 0:
nn_loss = self.sess.run( self.nn_loss, feed_dict = { self.in_sample : in_sample, self.in_label: in_label } )
gate_loss = self.sess.run( self.gate_loss, feed_dict = { self.in_sample : in_sample, self.in_label: in_label } )
print( "iter: ", i, "NN LOSS: ", nn_loss, "Gate LOSS: ", gate_loss )
print("-----------------")
if i % 1000 == 0:
in_sample, in_label = self.opts.data_source.get_test()
accu = self.predict( in_sample, in_label )
print( "Iter: ", i, "Accu: ", accu )
print("-------------------------------------")
self.accu_list.append( accu )
if i != 0 and i % 20000 == 0:
path = self.opts.cpt_path +"/"+ str( i )
os.mkdir( path )
path += "/model.ckpt"
self.saver.save( self.sess, path )
def predict( self, sample, label ):
res = self.sess.run( self.prediction, feed_dict = { self.in_sample : sample, self.in_label: label } )
res = np.argmax( res, axis = 1 )
true = np.argmax( label, axis = 1 )
print( res[:10] )
print( true[:10])
accu = np.sum(res == true) / res.shape[0]
return accu
| [
"tensorflow.shape",
"tensorflow.nn.softmax",
"tensorflow.reduce_mean",
"tensorflow.cast",
"os.path.exists",
"tensorflow.random_normal",
"tensorflow.Session",
"tensorflow.placeholder",
"os.mkdir",
"tensorflow.square",
"tensorflow.train.AdamOptimizer",
"tensorflow.stack",
"tensorflow.variable_... | [((203, 253), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': 'size', 'stddev': 'xavier_stddev'}), '(shape=size, stddev=xavier_stddev)\n', (219, 253), True, 'import tensorflow as tf\n'), ((171, 192), 'tensorflow.sqrt', 'tf.sqrt', (['(in_dim / 2.0)'], {}), '(in_dim / 2.0)\n', (178, 192), True, 'import tensorflow as tf\n'), ((421, 432), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (429, 432), True, 'import tensorflow as tf\n'), ((449, 460), 'tensorflow.shape', 'tf.shape', (['y'], {}), '(y)\n', (457, 460), True, 'import tensorflow as tf\n'), ((474, 485), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (482, 485), True, 'import tensorflow as tf\n'), ((554, 578), 'tensorflow.stack', 'tf.stack', (['[1, y_size, 1]'], {}), '([1, y_size, 1])\n', (562, 578), True, 'import tensorflow as tf\n'), ((645, 669), 'tensorflow.stack', 'tf.stack', (['[x_size, 1, 1]'], {}), '([x_size, 1, 1])\n', (653, 669), True, 'import tensorflow as tf\n'), ((1082, 1111), 'tensorflow.stack', 'tf.stack', (['[batch_size, z_dim]'], {}), '([batch_size, z_dim])\n', (1090, 1111), True, 'import tensorflow as tf\n'), ((1206, 1218), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1216, 1218), True, 'import tensorflow as tf\n'), ((1287, 1318), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(100)'}), '(max_to_keep=100)\n', (1301, 1318), True, 'import tensorflow as tf\n'), ((3228, 3253), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.logit'], {}), '(self.logit)\n', (3241, 3253), True, 'import tensorflow as tf\n'), ((4848, 4874), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.recon'], {}), '(self.recon)\n', (4862, 4874), True, 'import tensorflow as tf\n'), ((7586, 7608), 'numpy.argmax', 'np.argmax', (['res'], {'axis': '(1)'}), '(res, axis=1)\n', (7595, 7608), True, 'import numpy as np\n'), ((7628, 7652), 'numpy.argmax', 'np.argmax', (['label'], {'axis': '(1)'}), '(label, axis=1)\n', (7637, 7652), True, 'import numpy as np\n'), ((315, 327), 'tensorflow.shape', 'tf.shape', (['mu'], {}), '(mu)\n', (323, 327), True, 'import tensorflow as tf\n'), ((344, 363), 'tensorflow.exp', 'tf.exp', (['(log_var / 2)'], {}), '(log_var / 2)\n', (350, 363), True, 'import tensorflow as tf\n'), ((525, 551), 'tensorflow.stack', 'tf.stack', (['[x_size, 1, dim]'], {}), '([x_size, 1, dim])\n', (533, 551), True, 'import tensorflow as tf\n'), ((616, 642), 'tensorflow.stack', 'tf.stack', (['[1, y_size, dim]'], {}), '([1, y_size, dim])\n', (624, 642), True, 'import tensorflow as tf\n'), ((745, 769), 'tensorflow.cast', 'tf.cast', (['dim', 'tf.float32'], {}), '(dim, tf.float32)\n', (752, 769), True, 'import tensorflow as tf\n'), ((930, 954), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x_kernel'], {}), '(x_kernel)\n', (944, 954), True, 'import tensorflow as tf\n'), ((957, 981), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['y_kernel'], {}), '(y_kernel)\n', (971, 981), True, 'import tensorflow as tf\n'), ((988, 1013), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['xy_kernel'], {}), '(xy_kernel)\n', (1002, 1013), True, 'import tensorflow as tf\n'), ((1518, 1546), 'os.mkdir', 'os.mkdir', (['self.opts.cpt_path'], {}), '(self.opts.cpt_path)\n', (1526, 1546), False, 'import os\n'), ((1687, 1710), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""nn"""'], {}), "('nn')\n", (1704, 1710), True, 'import tensorflow as tf\n'), ((1752, 1806), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([None] + opts.sample_shape)'], {}), '(tf.float32, [None] + opts.sample_shape)\n', (1766, 1806), True, 'import tensorflow as tf\n'), ((1839, 1892), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([None] + opts.label_shape)'], {}), '(tf.float32, [None] + opts.label_shape)\n', (1853, 1892), True, 'import tensorflow as tf\n'), ((1942, 1969), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""gating"""'], {}), "('gating')\n", (1959, 1969), True, 'import tensorflow as tf\n'), ((2005, 2138), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.in_sample', '(784)'], {'kernel_initializer': 'tf.random_normal_initializer', 'activation': 'tf.nn.sigmoid', 'name': '"""gating_0"""'}), "(self.in_sample, 784, kernel_initializer=tf.\n random_normal_initializer, activation=tf.nn.sigmoid, name='gating_0')\n", (2020, 2138), True, 'import tensorflow as tf\n'), ((2206, 2229), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""nn"""'], {}), "('nn')\n", (2223, 2229), True, 'import tensorflow as tf\n'), ((2266, 2387), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.l', '(256)'], {'kernel_initializer': 'tf.random_normal_initializer', 'activation': 'tf.nn.relu', 'name': '"""dense_1"""'}), "(self.l, 256, kernel_initializer=tf.\n random_normal_initializer, activation=tf.nn.relu, name='dense_1')\n", (2281, 2387), True, 'import tensorflow as tf\n'), ((2402, 2429), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""gating"""'], {}), "('gating')\n", (2419, 2429), True, 'import tensorflow as tf\n'), ((2466, 2591), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.l', '(256)'], {'kernel_initializer': 'tf.random_normal_initializer', 'activation': 'tf.nn.sigmoid', 'name': '"""gating_1"""'}), "(self.l, 256, kernel_initializer=tf.\n random_normal_initializer, activation=tf.nn.sigmoid, name='gating_1')\n", (2481, 2591), True, 'import tensorflow as tf\n'), ((2647, 2670), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""nn"""'], {}), "('nn')\n", (2664, 2670), True, 'import tensorflow as tf\n'), ((2707, 2829), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.l0', '(256)'], {'kernel_initializer': 'tf.random_normal_initializer', 'activation': 'tf.nn.relu', 'name': '"""dense_2"""'}), "(self.l0, 256, kernel_initializer=tf.\n random_normal_initializer, activation=tf.nn.relu, name='dense_2')\n", (2722, 2829), True, 'import tensorflow as tf\n'), ((2844, 2871), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""gating"""'], {}), "('gating')\n", (2861, 2871), True, 'import tensorflow as tf\n'), ((2908, 3034), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.l0', '(256)'], {'kernel_initializer': 'tf.random_normal_initializer', 'activation': 'tf.nn.sigmoid', 'name': '"""gating_2"""'}), "(self.l0, 256, kernel_initializer=tf.\n random_normal_initializer, activation=tf.nn.sigmoid, name='gating_2')\n", (2923, 3034), True, 'import tensorflow as tf\n'), ((3090, 3113), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""nn"""'], {}), "('nn')\n", (3107, 3113), True, 'import tensorflow as tf\n'), ((3151, 3197), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.l1', '(10)'], {'name': '"""dense_out"""'}), "(self.l1, 10, name='dense_out')\n", (3166, 3197), True, 'import tensorflow as tf\n'), ((3269, 3296), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decode"""'], {}), "('decode')\n", (3286, 3296), True, 'import tensorflow as tf\n'), ((3331, 3442), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.l1', '(100)'], {'kernel_initializer': 'tf.random_normal_initializer', 'activation': 'None', 'name': '"""mu"""'}), "(self.l1, 100, kernel_initializer=tf.\n random_normal_initializer, activation=None, name='mu')\n", (3346, 3442), True, 'import tensorflow as tf\n'), ((3471, 3596), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.l1', '(100)'], {'kernel_initializer': 'tf.random_normal_initializer', 'activation': 'tf.nn.softplus', 'name': '"""logvar"""'}), "(self.l1, 100, kernel_initializer=tf.\n random_normal_initializer, activation=tf.nn.softplus, name='logvar')\n", (3486, 3596), True, 'import tensorflow as tf\n'), ((3674, 3796), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.l1', '(256)'], {'kernel_initializer': 'tf.random_normal_initializer', 'activation': 'tf.nn.relu', 'name': '"""dense_3"""'}), "(self.l1, 256, kernel_initializer=tf.\n random_normal_initializer, activation=tf.nn.relu, name='dense_3')\n", (3689, 3796), True, 'import tensorflow as tf\n'), ((3820, 3942), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.l2', '(256)'], {'kernel_initializer': 'tf.random_normal_initializer', 'activation': 'tf.nn.relu', 'name': '"""dense_4"""'}), "(self.l2, 256, kernel_initializer=tf.\n random_normal_initializer, activation=tf.nn.relu, name='dense_4')\n", (3835, 3942), True, 'import tensorflow as tf\n'), ((3976, 4098), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.l3', '(784)'], {'kernel_initializer': 'tf.random_normal_initializer', 'activation': 'tf.nn.tanh', 'name': '"""dense_5"""'}), "(self.l3, 784, kernel_initializer=tf.\n random_normal_initializer, activation=tf.nn.tanh, name='dense_5')\n", (3991, 4098), True, 'import tensorflow as tf\n'), ((4184, 4272), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'self.logit', 'labels': 'self.in_label'}), '(logits=self.logit, labels=self.\n in_label)\n', (4226, 4272), True, 'import tensorflow as tf\n'), ((4774, 4809), 'tensorflow.square', 'tf.square', (['(self.in_sample - self.l4)'], {}), '(self.in_sample - self.l4)\n', (4783, 4809), True, 'import tensorflow as tf\n'), ((7724, 7743), 'numpy.sum', 'np.sum', (['(res == true)'], {}), '(res == true)\n', (7730, 7743), True, 'import numpy as np\n'), ((1472, 1505), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1503, 1505), True, 'import tensorflow as tf\n'), ((1556, 1590), 'os.path.exists', 'os.path.exists', (['self.opts.cpt_path'], {}), '(self.opts.cpt_path)\n', (1570, 1590), False, 'import os\n'), ((4491, 4516), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.l0c0'], {}), '(self.l0c0)\n', (4505, 4516), True, 'import tensorflow as tf\n'), ((4521, 4546), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.l1c0'], {}), '(self.l1c0)\n', (4535, 4546), True, 'import tensorflow as tf\n'), ((4993, 5052), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.opts.lr'], {'beta1': '(0.9)', 'beta2': '(0.99)'}), '(self.opts.lr, beta1=0.9, beta2=0.99)\n', (5015, 5052), True, 'import tensorflow as tf\n'), ((5102, 5165), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""nn"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='nn')\n", (5119, 5165), True, 'import tensorflow as tf\n'), ((5194, 5253), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.opts.lr'], {'beta1': '(0.9)', 'beta2': '(0.99)'}), '(self.opts.lr, beta1=0.9, beta2=0.99)\n', (5216, 5253), True, 'import tensorflow as tf\n'), ((5305, 5372), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""gating"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='gating')\n", (5322, 5372), True, 'import tensorflow as tf\n'), ((7315, 7329), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (7323, 7329), False, 'import os\n'), ((705, 733), 'tensorflow.square', 'tf.square', (['(tiled_x - tiled_y)'], {}), '(tiled_x - tiled_y)\n', (714, 733), True, 'import tensorflow as tf\n'), ((4600, 4619), 'tensorflow.exp', 'tf.exp', (['self.logvar'], {}), '(self.logvar)\n', (4606, 4619), True, 'import tensorflow as tf\n')] |
"""
author: ASCKSV a.k.a <NAME>
"""
import numpy as np
import cv2
import os
cap = cv2.VideoCapture(0)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
wrapper_step = 1
wrapper_speed = 1
cv2.namedWindow("Time Warp Filter", cv2.WINDOW_AUTOSIZE)
# cv2.namedWindow("Time Warp Filter", cv2.WND_PROP_FULLSCREEN)
# cv2.setWindowProperty("Time Warp Filter", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
def time_warp(orientation=0):
wrapper_height = 0
count = 0
final_image = np.zeros((height, width, 3), np.uint8)
use_cam = True
pause = False
pause_value = 0
while True:
count += 1
# Capture frame-by-frame
if use_cam:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
# Our operations on the frame come here
if not pause:
if count % wrapper_speed == wrapper_speed - 1:
wrapper_height_before = wrapper_height
wrapper_height += wrapper_step
else:
wrapper_height = pause_value
if orientation == 0:
frame_freeze = frame[wrapper_height_before: wrapper_height][:][:]
frame_remaining = frame[wrapper_height:][:][:]
final_image[wrapper_height_before: wrapper_height, :width, :3] = frame_freeze
final_image[wrapper_height:, :width, :3] = frame_remaining
if wrapper_height > height:
use_cam = False
cv2.imshow('Time Warp Filter', final_image)
else:
frame_freeze = frame[:height, wrapper_height_before: wrapper_height, :3]
frame_remaining = frame[:height, wrapper_height:, :3]
final_image[:height, wrapper_height_before: wrapper_height, :3] = frame_freeze
final_image[:height, wrapper_height:, :3] = frame_remaining
if wrapper_height > width:
use_cam = False
cv2.imshow('Time Warp Filter', final_image)
moving_wrapper = draw_wrapper(final_image, wrapper_height, orientation)
cv2.imshow("Time Warp Filter", moving_wrapper)
# key bindings
k = cv2.waitKey(1)
if k == ord('q'):
break
if k == ord('r'):
use_cam = True
pause = False
wrapper_height = 0
if k == ord('i'):
use_cam = True
pause = False
wrapper_height = 0
orientation = (orientation + 1) % 2
if k == ord('p'):
pause = False if pause else True
if pause:
pause_value = wrapper_height
if k == ord('s'):
path = './'
cv2.imwrite(os.path.join(path, 'timeWarp.jpg'), final_image)
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
return final_image
def draw_wrapper(bg_image, wrapper_height, orientation):
if orientation == 0:
start_point = (0, wrapper_height)
end_point = (width, wrapper_height)
else:
start_point = (wrapper_height, 0)
end_point = (wrapper_height, height)
line_thickness = 1
color = (255, 0, 0)
cv2.line(bg_image, start_point, end_point, color, thickness=line_thickness)
return bg_image
if __name__ == "__main__":
image = time_warp(orientation=0)
| [
"cv2.flip",
"cv2.line",
"os.path.join",
"cv2.imshow",
"numpy.zeros",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.waitKey",
"cv2.namedWindow"
] | [((90, 109), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (106, 109), False, 'import cv2\n'), ((285, 341), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Time Warp Filter"""', 'cv2.WINDOW_AUTOSIZE'], {}), "('Time Warp Filter', cv2.WINDOW_AUTOSIZE)\n", (300, 341), False, 'import cv2\n'), ((594, 632), 'numpy.zeros', 'np.zeros', (['(height, width, 3)', 'np.uint8'], {}), '((height, width, 3), np.uint8)\n', (602, 632), True, 'import numpy as np\n'), ((3069, 3092), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3090, 3092), False, 'import cv2\n'), ((3449, 3524), 'cv2.line', 'cv2.line', (['bg_image', 'start_point', 'end_point', 'color'], {'thickness': 'line_thickness'}), '(bg_image, start_point, end_point, color, thickness=line_thickness)\n', (3457, 3524), False, 'import cv2\n'), ((2283, 2329), 'cv2.imshow', 'cv2.imshow', (['"""Time Warp Filter"""', 'moving_wrapper'], {}), "('Time Warp Filter', moving_wrapper)\n", (2293, 2329), False, 'import cv2\n'), ((2369, 2383), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2380, 2383), False, 'import cv2\n'), ((847, 865), 'cv2.flip', 'cv2.flip', (['frame', '(1)'], {}), '(frame, 1)\n', (855, 865), False, 'import cv2\n'), ((2926, 2960), 'os.path.join', 'os.path.join', (['path', '"""timeWarp.jpg"""'], {}), "(path, 'timeWarp.jpg')\n", (2938, 2960), False, 'import os\n'), ((1646, 1689), 'cv2.imshow', 'cv2.imshow', (['"""Time Warp Filter"""', 'final_image'], {}), "('Time Warp Filter', final_image)\n", (1656, 1689), False, 'import cv2\n'), ((2145, 2188), 'cv2.imshow', 'cv2.imshow', (['"""Time Warp Filter"""', 'final_image'], {}), "('Time Warp Filter', final_image)\n", (2155, 2188), False, 'import cv2\n')] |
import numpy as np
with open("data/day11.txt") as f:
data = f.read()
test_data_1 = '''11111
19991
19191
19991
11111
'''
test_data_2 = '''5483143223
2745854711
5264556173
6141336146
6357385478
4167524645
2176841721
6882881134
4846848554
5283751526
'''
data = data.split('\n')
data.remove('')
print(data)
STEPS = 100
def add_border(data) -> np.array:
border_lst = []
for i in range(len(data[0])+2):
border_lst.append(-1)
data = [list(map(int, line)) for line in data]
for item in data:
item.insert(0, -1)
item.append(-1)
data.insert(0, border_lst)
data.append(border_lst)
data_array = np.array(data)
return data_array
def creating_flashes(data_array,flashed, neighbors, step):
new_flashed = set()
for x in range(len(data_array)):
for y in range(len(data_array[0])):
if data_array[x][y] == -1:
data_array[x][y] == -1
else:
if step == 0:
data_array[x][y] += 1
if data_array[x][y] > 9:
flashed.add((x,y))
data_array[x][y] = 0
new_flashed.add((x,y))
elif step > 0:
if data_array[x][y] > 9:
flashed.add((x,y))
data_array[x][y] = 0
new_flashed.add((x,y))
if step == 0:
for (x,y) in flashed:
for (dx,dy) in neighbors:
if data_array[x+dx][y+dy] == -1:
data_array[x+dx][y+dy] = -1
elif (x+dx, y+dy) not in flashed:
data_array[x+dx][y+dy] += 1
elif (x+dx, y+dy) not in flashed and data_array[x+dx][y+dy] > 9:
data_array[x+dx][y+dy] = 0
new_flashed.add((x+dx,y+dy))
elif (x+dx, y+dy) in flashed:
data_array[x+dx][y+dy] = 0
elif step > 0:
for (x,y) in new_flashed:
for (dx,dy) in neighbors:
if data_array[x+dx][y+dy] == -1:
data_array[x+dx][y+dy] = -1
elif (x+dx, y+dy) not in flashed:
data_array[x+dx][y+dy] += 1
elif (x+dx, y+dy) not in flashed and data_array[x+dx][y+dy] > 9:
data_array[x+dx][y+dy] = 0
new_flashed.add((x+dx,y+dy))
elif (x+dx, y+dy) in flashed:
data_array[x+dx][y+dy] = 0
return data_array
def squids_1(data, steps):
data_array = add_border(data)
neighbors = [(-1,-1),(-1,0),(-1,1), (0,-1), (1,-1), (1,0),(1,1), (0,1)]
flashed = set()
counting_flashes=0
for i in range(0, STEPS):
print('--------------------------------')
print('STEP: ', i + 1)
counter = 0
data_array = creating_flashes(data_array,flashed, neighbors, counter)
still_flashes = True
while still_flashes:
counter += 1
if np.count_nonzero(data_array > 9) == 0:
still_flashes = False
else:
data_array = creating_flashes(data_array,flashed, neighbors, counter)
print(f'After step {i}: \n', data_array)
counting_flashes += len(flashed)
flashed = set()
return counting_flashes
def squids_2(data):
data_array = add_border(data)
neighbors = [(-1,-1),(-1,0),(-1,1), (0,-1), (1,-1), (1,0),(1,1), (0,1)]
all_flashed = False
flashed = set()
counting_flashes=0
counting_steps = 0
while not all_flashed:
counting_steps += 1
print('--------------------------------')
print('STEP: ', counting_steps)
counter = 0
data_array = creating_flashes(data_array,flashed, neighbors, counter)
still_flashes = True
while still_flashes:
counter += 1
if np.count_nonzero(data_array > 9) == 0:
still_flashes = False
else:
data_array = creating_flashes(data_array,flashed, neighbors, counter)
print(f'After step {counting_steps}: \n', data_array)
counting_flashes += len(flashed)
flashed = set()
if np.count_nonzero(data_array == 0) == (len(data_array) - 2)**2:
all_flashed = True
return counting_steps
print(squids_1(data, STEPS))
print(squids_2(data))
| [
"numpy.count_nonzero",
"numpy.array"
] | [((658, 672), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (666, 672), True, 'import numpy as np\n'), ((4513, 4546), 'numpy.count_nonzero', 'np.count_nonzero', (['(data_array == 0)'], {}), '(data_array == 0)\n', (4529, 4546), True, 'import numpy as np\n'), ((3261, 3293), 'numpy.count_nonzero', 'np.count_nonzero', (['(data_array > 9)'], {}), '(data_array > 9)\n', (3277, 3293), True, 'import numpy as np\n'), ((4194, 4226), 'numpy.count_nonzero', 'np.count_nonzero', (['(data_array > 9)'], {}), '(data_array > 9)\n', (4210, 4226), True, 'import numpy as np\n')] |
# PINN file for Wave equation
import tensorflow as tf
import numpy as np
import time
class WaveEquation:
# Initialize the class
def __init__(self, lb, rb, X_f, X_b, X_init, layers):
self.lb = lb
self.rb = rb
#unpack interior collocation space-time points
self.x_int = X_f[:,0:1]
self.t_int = X_f[:,1:2]
#unpack boundary space-time points and displacement values
self.x_bnd = X_b[:,0:1]
self.t_bnd = X_b[:,1:2]
self.u_x_bnd = X_b[:,2:3]
#unpack point location and intitial displacement and velocity values
self.x_init = X_init[:,0:1]
self.t_init = X_init[:,1:2]
self.u_init = X_init[:,2:3]
self.v_init = X_init[:,3:4]
# Initialize NNs
self.layers = layers
self.weights, self.biases = self.initialize_NN(layers)
# tf Placeholders
self.x_bnd_tf = tf.placeholder(tf.float32)
self.t_bnd_tf = tf.placeholder(tf.float32)
self.u_x_bnd_tf = tf.placeholder(tf.float32)
self.x_init_tf = tf.placeholder(tf.float32)
self.t_init_tf = tf.placeholder(tf.float32)
self.u_init_tf = tf.placeholder(tf.float32)
self.v_init_tf = tf.placeholder(tf.float32)
self.x_int_tf = tf.placeholder(tf.float32)
self.t_int_tf = tf.placeholder(tf.float32)
# tf Graphs
_, self.v_bnd_pred, self.u_x_bnd_pred = self.net_u(self.x_bnd_tf, self.t_bnd_tf)
self.u_init_pred, self.v_init_pred, _ = self.net_u(self.x_init_tf, self.t_init_tf)
self.u_f_pred, self.v_f_pred, _ = self.net_u(self.x_int_tf, self.t_int_tf)
self.f_u_pred = self.net_f_u(self.x_int_tf, self.t_int_tf)
# Loss
self.loss_bnd = tf.reduce_mean(tf.square(self.u_x_bnd_tf - self.u_x_bnd_pred))
self.loss_u_init = tf.reduce_mean(tf.square(self.u_init_tf - self.u_init_pred))
self.loss_v_init = tf.reduce_mean(tf.square(self.v_init_tf - self.v_init_pred))
self.loss_resid = tf.reduce_mean(tf.square(self.f_u_pred))
self.loss = self.loss_bnd + self.loss_u_init + self.loss_v_init + self.loss_resid
self.lbfgs_buffer = []
# Optimizers
self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss,
method = 'L-BFGS-B',
options = {'maxiter': 50000,
'maxfun': 50000,
'maxcor': 100,
'maxls': 50,
'ftol' : 1.0 * np.finfo(float).eps})
self.optimizer_Adam = tf.train.AdamOptimizer()
self.train_op_Adam = self.optimizer_Adam.minimize(self.loss)
# tf session
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
init = tf.global_variables_initializer()
self.sess.run(init)
def initialize_NN(self, layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0,num_layers-1):
W = self.xavier_init(size=[layers[l], layers[l+1]])
b = tf.Variable(0.1*tf.ones([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32)
weights.append(W)
biases.append(b)
return weights, biases
def xavier_init(self, size):
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2/(in_dim + out_dim))
return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)
def neural_net(self, X, weights, biases):
num_layers = len(weights) + 1
H = 2.0*(X - self.lb)/(self.rb - self.lb) - 1.0
for l in range(0,num_layers-2):
W = weights[l]
b = biases[l]
H = tf.nn.tanh(tf.add(tf.matmul(H,W), b))
#H = tf.nn.relu(tf.add(tf.matmul(H, W), b))**2
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b)
return Y
def net_u(self, x, t):
u = self.neural_net(tf.concat([x,t],1), self.weights, self.biases)
u_t = tf.gradients(u, t)[0]
u_x = tf.gradients(u, x)[0]
return u, u_t, u_x
def net_f_u(self, x, t):
u, u_t, u_x = self.net_u(x, t)
u_xx = tf.gradients(u_x, x)[0]
u_tt = tf.gradients(u_t, t)[0]
f_u = u_xx - u_tt
return f_u
def callback(self, loss):
self.lbfgs_buffer = np.append(self.lbfgs_buffer, loss)
def train(self, nIter):
tf_dict = {self.x_bnd_tf: self.x_bnd,
self.t_bnd_tf: self.t_bnd,
self.u_x_bnd_tf: self.u_x_bnd,
self.x_init_tf: self.x_init,
self.t_init_tf: self.t_init,
self.u_init_tf: self.u_init,
self.v_init_tf: self.v_init,
self.x_int_tf: self.x_int,
self.t_int_tf: self.t_int}
start_time = time.time()
self.loss_adam_buff = np.zeros(nIter)
for it in range(nIter):
self.sess.run(self.train_op_Adam, tf_dict)
loss_value = self.sess.run(self.loss, tf_dict)
self.loss_adam_buff[it] = loss_value
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
loss_bnd_value = self.sess.run(self.loss_bnd, tf_dict)
loss_u_init_value = self.sess.run(self.loss_u_init, tf_dict)
loss_v_init_value = self.sess.run(self.loss_v_init, tf_dict)
print('It: %d, Loss: %.3e, Bnd Loss: %.3e, u_init_loss: %.3e, v_init_loss, %.3e, Time: %.2f' %
(it, loss_value, loss_bnd_value, loss_u_init_value,
loss_v_init_value, elapsed))
start_time = time.time()
self.optimizer.minimize(self.sess,
feed_dict = tf_dict,
fetches = [self.loss],
loss_callback = self.callback)
def predict(self, XT_star):
X_star = XT_star[:, 0:1]
T_star = XT_star[:, 1:2]
tf_dict = {self.x_int_tf: X_star, self.t_int_tf: T_star}
u_star = self.sess.run(self.u_f_pred, tf_dict)
v_star = self.sess.run(self.v_f_pred, tf_dict)
return u_star, v_star
def getWeightsBiases(self):
weights = self.sess.run(self.weights)
biases = self.sess.run(self.biases)
return weights, biases
| [
"numpy.sqrt",
"tensorflow.ones",
"tensorflow.placeholder",
"numpy.append",
"tensorflow.global_variables_initializer",
"numpy.zeros",
"tensorflow.concat",
"tensorflow.gradients",
"tensorflow.matmul",
"tensorflow.square",
"numpy.finfo",
"tensorflow.ConfigProto",
"tensorflow.train.AdamOptimizer... | [((1038, 1064), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1052, 1064), True, 'import tensorflow as tf\n'), ((1090, 1116), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1104, 1116), True, 'import tensorflow as tf\n'), ((1144, 1170), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1158, 1170), True, 'import tensorflow as tf\n'), ((1197, 1223), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1211, 1223), True, 'import tensorflow as tf\n'), ((1250, 1276), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1264, 1276), True, 'import tensorflow as tf\n'), ((1303, 1329), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1317, 1329), True, 'import tensorflow as tf\n'), ((1356, 1382), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1370, 1382), True, 'import tensorflow as tf\n'), ((1408, 1434), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1422, 1434), True, 'import tensorflow as tf\n'), ((1460, 1486), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1474, 1486), True, 'import tensorflow as tf\n'), ((3090, 3114), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (3112, 3114), True, 'import tensorflow as tf\n'), ((3414, 3447), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3445, 3447), True, 'import tensorflow as tf\n'), ((4049, 4080), 'numpy.sqrt', 'np.sqrt', (['(2 / (in_dim + out_dim))'], {}), '(2 / (in_dim + out_dim))\n', (4056, 4080), True, 'import numpy as np\n'), ((5247, 5281), 'numpy.append', 'np.append', (['self.lbfgs_buffer', 'loss'], {}), '(self.lbfgs_buffer, loss)\n', (5256, 5281), True, 'import numpy as np\n'), ((5727, 5738), 'time.time', 'time.time', ([], {}), '()\n', (5736, 5738), False, 'import time\n'), ((5770, 5785), 'numpy.zeros', 'np.zeros', (['nIter'], {}), '(nIter)\n', (5778, 5785), True, 'import numpy as np\n'), ((1918, 1964), 'tensorflow.square', 'tf.square', (['(self.u_x_bnd_tf - self.u_x_bnd_pred)'], {}), '(self.u_x_bnd_tf - self.u_x_bnd_pred)\n', (1927, 1964), True, 'import tensorflow as tf\n'), ((2009, 2053), 'tensorflow.square', 'tf.square', (['(self.u_init_tf - self.u_init_pred)'], {}), '(self.u_init_tf - self.u_init_pred)\n', (2018, 2053), True, 'import tensorflow as tf\n'), ((2118, 2162), 'tensorflow.square', 'tf.square', (['(self.v_init_tf - self.v_init_pred)'], {}), '(self.v_init_tf - self.v_init_pred)\n', (2127, 2162), True, 'import tensorflow as tf\n'), ((2206, 2230), 'tensorflow.square', 'tf.square', (['self.f_u_pred'], {}), '(self.f_u_pred)\n', (2215, 2230), True, 'import tensorflow as tf\n'), ((4107, 4167), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[in_dim, out_dim]'], {'stddev': 'xavier_stddev'}), '([in_dim, out_dim], stddev=xavier_stddev)\n', (4126, 4167), True, 'import tensorflow as tf\n'), ((4666, 4681), 'tensorflow.matmul', 'tf.matmul', (['H', 'W'], {}), '(H, W)\n', (4675, 4681), True, 'import tensorflow as tf\n'), ((4777, 4797), 'tensorflow.concat', 'tf.concat', (['[x, t]', '(1)'], {}), '([x, t], 1)\n', (4786, 4797), True, 'import tensorflow as tf\n'), ((4854, 4872), 'tensorflow.gradients', 'tf.gradients', (['u', 't'], {}), '(u, t)\n', (4866, 4872), True, 'import tensorflow as tf\n'), ((4891, 4909), 'tensorflow.gradients', 'tf.gradients', (['u', 'x'], {}), '(u, x)\n', (4903, 4909), True, 'import tensorflow as tf\n'), ((5049, 5069), 'tensorflow.gradients', 'tf.gradients', (['u_x', 'x'], {}), '(u_x, x)\n', (5061, 5069), True, 'import tensorflow as tf\n'), ((5089, 5109), 'tensorflow.gradients', 'tf.gradients', (['u_t', 't'], {}), '(u_t, t)\n', (5101, 5109), True, 'import tensorflow as tf\n'), ((3264, 3332), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(True)'}), '(allow_soft_placement=True, log_device_placement=True)\n', (3278, 3332), True, 'import tensorflow as tf\n'), ((6654, 6665), 'time.time', 'time.time', ([], {}), '()\n', (6663, 6665), False, 'import time\n'), ((3756, 3801), 'tensorflow.ones', 'tf.ones', (['[1, layers[l + 1]]'], {'dtype': 'tf.float32'}), '([1, layers[l + 1]], dtype=tf.float32)\n', (3763, 3801), True, 'import tensorflow as tf\n'), ((4514, 4529), 'tensorflow.matmul', 'tf.matmul', (['H', 'W'], {}), '(H, W)\n', (4523, 4529), True, 'import tensorflow as tf\n'), ((6077, 6088), 'time.time', 'time.time', ([], {}), '()\n', (6086, 6088), False, 'import time\n'), ((3031, 3046), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (3039, 3046), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy as np
# Compares 2 numbers if equal, designed for floats to overcome precision errors
def almost_equal(a, b):
return np.abs(a - b) < 0.000001
# Faster implementation of np.cross() for 2 vectors (3 points) returning magnitude directly
def area_triangle(a, b, c):
return area_rectangle(a, b, c, b) # d = b
# Faster implementation of np.cross() for 2 vectors (4 points) returning magnitude directly
def area_rectangle(a, b, c, d):
return (a[0] - b[0]) * (c[1] - d[1]) - (a[1] - b[1]) * (c[0] - d[0])
| [
"numpy.abs"
] | [((160, 173), 'numpy.abs', 'np.abs', (['(a - b)'], {}), '(a - b)\n', (166, 173), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.