code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import joblib
import numpy as np
import pandas as pd
import streamlit as st
APP_FILE = "app.py"
MODEL_JOBLIB_FILE = "model.joblib"
def main():
"""This function runs/ orchestrates the Machine Learning App Registry"""
st.markdown(
"""
# Machine Learning App
The main objective of this app is building a customer segmentation based on credit card
payments behavior during the last six months to define marketing strategies.
You can find the source code for this project in the following [Github repository](https://github.com/andreshugueth/credit_card_clustering).
"""
)
html_temp = """
<div style="text-align: right"> <strong> Author: </strong> <a href=https://www.linkedin.com/in/carlosbarros7/ target="_blank"><NAME></a> </div>
"""
st.markdown(html_temp, unsafe_allow_html=True)
st.markdown('## Dataset')
if st.checkbox('Show sample data'):
st.write(show_data)
customer_predictor()
def customer_predictor():
"""## Customer predictor
A user may have to input data about the customer's finances to predict which cluster he belongs to.
"""
st.markdown("## Customer segmentation model based on credit behavior")
balance = st.number_input("Balance")
purchases = st.number_input("Purchases")
cash_advance = st.number_input("Cash Advance")
credit_limit = st.number_input("Credit Limit")
payments = st.number_input("Payments")
prediction = 0
if st.button("Predict"):
model = joblib.load(MODEL_JOBLIB_FILE)
features = [balance, purchases, cash_advance, credit_limit, payments]
final_features = [np.array(features)]
prediction = model.predict(final_features)
st.balloons()
st.success(f"The client belongs to the cluster: {prediction[0]:.0f}")
if(prediction[0] == 0):
st.markdown("""
These kinds of customers pay a minimum amount in advance and their payment is proportional to the
movement of their purchases, this means that they are good customers **paying the debts** :hand: they incur
with their credit cards.
""")
if(prediction[0] == 1):
st.markdown("""
In this group are presented the customers who pay the **most in advance before** :ok_hand: the loan starts with
a balanced balance statement because their purchases are minimal compared to the other groups,
also it is the **second-best paying**. :hand:
""")
if(prediction[0] == 2):
st.markdown("""
Customers in this cluster pay the minimum amount in advance, however, it is the **group that buys the most**:gift: :sunglasses:,
and it is also the **group that pays the most** :moneybag:. In other words, these types of customers are quite
active regarding the number of purchases they make with their credit cards.
""")
if(prediction[0] == 3):
st.markdown("""These clients are the ones with the highest balance status, in addition to that,
they are the second group that pays the most in advance before starting their credit.
However, they are the customers who make the **least purchases** :sleepy: and following the same idea,
they are the seconds when it comes to making payments on the debt with their credit card. This
makes sense since they have an amount of the loan provided in advance. It can be concluded that
they are **conservative** and **meticulous** customers when buying. :expressionless:""")
if(prediction[0] == 4):
st.markdown("""
This group of customers has **low-frequency usage** :sleeping: of their credit cards since it is the second group that
purchases the least, in addition to that, they are customers who pay well in proportion to
their purchases. As for the advance payment before starting the loan, it is minimal compared to the other groups.
""")
@st.cache
def load_data():
data = pd.read_csv('final_data.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data = data.sort_index(axis=1)
return data
show_data = load_data()
if __name__ == "__main__":
main()
| [
"streamlit.checkbox",
"streamlit.markdown",
"pandas.read_csv",
"streamlit.number_input",
"streamlit.balloons",
"streamlit.button",
"streamlit.write",
"numpy.array",
"streamlit.success",
"joblib.load"
] | [((229, 592), 'streamlit.markdown', 'st.markdown', (['"""\n# Machine Learning App \n\nThe main objective of this app is building a customer segmentation based on credit card \npayments behavior during the last six months to define marketing strategies. \nYou can find the source code for this project in the following [Github repository](https://github.com/andreshugueth/credit_card_clustering).\n"""'], {}), '(\n """\n# Machine Learning App \n\nThe main objective of this app is building a customer segmentation based on credit card \npayments behavior during the last six months to define marketing strategies. \nYou can find the source code for this project in the following [Github repository](https://github.com/andreshugueth/credit_card_clustering).\n"""\n )\n', (240, 592), True, 'import streamlit as st\n'), ((779, 825), 'streamlit.markdown', 'st.markdown', (['html_temp'], {'unsafe_allow_html': '(True)'}), '(html_temp, unsafe_allow_html=True)\n', (790, 825), True, 'import streamlit as st\n'), ((831, 856), 'streamlit.markdown', 'st.markdown', (['"""## Dataset"""'], {}), "('## Dataset')\n", (842, 856), True, 'import streamlit as st\n'), ((864, 895), 'streamlit.checkbox', 'st.checkbox', (['"""Show sample data"""'], {}), "('Show sample data')\n", (875, 895), True, 'import streamlit as st\n'), ((1126, 1196), 'streamlit.markdown', 'st.markdown', (['"""## Customer segmentation model based on credit behavior"""'], {}), "('## Customer segmentation model based on credit behavior')\n", (1137, 1196), True, 'import streamlit as st\n'), ((1212, 1238), 'streamlit.number_input', 'st.number_input', (['"""Balance"""'], {}), "('Balance')\n", (1227, 1238), True, 'import streamlit as st\n'), ((1255, 1283), 'streamlit.number_input', 'st.number_input', (['"""Purchases"""'], {}), "('Purchases')\n", (1270, 1283), True, 'import streamlit as st\n'), ((1303, 1334), 'streamlit.number_input', 'st.number_input', (['"""Cash Advance"""'], {}), "('Cash Advance')\n", (1318, 1334), True, 'import streamlit as st\n'), ((1354, 1385), 'streamlit.number_input', 'st.number_input', (['"""Credit Limit"""'], {}), "('Credit Limit')\n", (1369, 1385), True, 'import streamlit as st\n'), ((1401, 1428), 'streamlit.number_input', 'st.number_input', (['"""Payments"""'], {}), "('Payments')\n", (1416, 1428), True, 'import streamlit as st\n'), ((1456, 1476), 'streamlit.button', 'st.button', (['"""Predict"""'], {}), "('Predict')\n", (1465, 1476), True, 'import streamlit as st\n'), ((4361, 4390), 'pandas.read_csv', 'pd.read_csv', (['"""final_data.csv"""'], {}), "('final_data.csv')\n", (4372, 4390), True, 'import pandas as pd\n'), ((905, 924), 'streamlit.write', 'st.write', (['show_data'], {}), '(show_data)\n', (913, 924), True, 'import streamlit as st\n'), ((1494, 1524), 'joblib.load', 'joblib.load', (['MODEL_JOBLIB_FILE'], {}), '(MODEL_JOBLIB_FILE)\n', (1505, 1524), False, 'import joblib\n'), ((1708, 1721), 'streamlit.balloons', 'st.balloons', ([], {}), '()\n', (1719, 1721), True, 'import streamlit as st\n'), ((1730, 1799), 'streamlit.success', 'st.success', (['f"""The client belongs to the cluster: {prediction[0]:.0f}"""'], {}), "(f'The client belongs to the cluster: {prediction[0]:.0f}')\n", (1740, 1799), True, 'import streamlit as st\n'), ((1629, 1647), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (1637, 1647), True, 'import numpy as np\n'), ((1844, 2205), 'streamlit.markdown', 'st.markdown', (['"""\n These kinds of customers pay a minimum amount in advance and their payment is proportional to the \n movement of their purchases, this means that they are good customers **paying the debts** :hand: they incur \n with their credit cards. \n """'], {}), '(\n """\n These kinds of customers pay a minimum amount in advance and their payment is proportional to the \n movement of their purchases, this means that they are good customers **paying the debts** :hand: they incur \n with their credit cards. \n """\n )\n', (1855, 2205), True, 'import streamlit as st\n'), ((2240, 2622), 'streamlit.markdown', 'st.markdown', (['"""\n In this group are presented the customers who pay the **most in advance before** :ok_hand: the loan starts with \n a balanced balance statement because their purchases are minimal compared to the other groups, \n also it is the **second-best paying**. :hand:\n """'], {}), '(\n """\n In this group are presented the customers who pay the **most in advance before** :ok_hand: the loan starts with \n a balanced balance statement because their purchases are minimal compared to the other groups, \n also it is the **second-best paying**. :hand:\n """\n )\n', (2251, 2622), True, 'import streamlit as st\n'), ((2657, 3103), 'streamlit.markdown', 'st.markdown', (['"""\n Customers in this cluster pay the minimum amount in advance, however, it is the **group that buys the most**:gift: :sunglasses:, \n and it is also the **group that pays the most** :moneybag:. In other words, these types of customers are quite \n active regarding the number of purchases they make with their credit cards. \n """'], {}), '(\n """\n Customers in this cluster pay the minimum amount in advance, however, it is the **group that buys the most**:gift: :sunglasses:, \n and it is also the **group that pays the most** :moneybag:. In other words, these types of customers are quite \n active regarding the number of purchases they make with their credit cards. \n """\n )\n', (2668, 3103), True, 'import streamlit as st\n'), ((3138, 3841), 'streamlit.markdown', 'st.markdown', (['"""These clients are the ones with the highest balance status, in addition to that, \n they are the second group that pays the most in advance before starting their credit. \n However, they are the customers who make the **least purchases** :sleepy: and following the same idea, \n they are the seconds when it comes to making payments on the debt with their credit card. This \n makes sense since they have an amount of the loan provided in advance. It can be concluded that \n they are **conservative** and **meticulous** customers when buying. :expressionless:"""'], {}), '(\n """These clients are the ones with the highest balance status, in addition to that, \n they are the second group that pays the most in advance before starting their credit. \n However, they are the customers who make the **least purchases** :sleepy: and following the same idea, \n they are the seconds when it comes to making payments on the debt with their credit card. This \n makes sense since they have an amount of the loan provided in advance. It can be concluded that \n they are **conservative** and **meticulous** customers when buying. :expressionless:"""\n )\n', (3149, 3841), True, 'import streamlit as st\n'), ((3876, 4330), 'streamlit.markdown', 'st.markdown', (['"""\n This group of customers has **low-frequency usage** :sleeping: of their credit cards since it is the second group that \n purchases the least, in addition to that, they are customers who pay well in proportion to \n their purchases. As for the advance payment before starting the loan, it is minimal compared to the other groups. \n """'], {}), '(\n """\n This group of customers has **low-frequency usage** :sleeping: of their credit cards since it is the second group that \n purchases the least, in addition to that, they are customers who pay well in proportion to \n their purchases. As for the advance payment before starting the loan, it is minimal compared to the other groups. \n """\n )\n', (3887, 4330), True, 'import streamlit as st\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
import scipy
class PlaceCells(object):
def __init__(self, options):
self.Np = options.Np
self.sigma = options.place_cell_rf
self.surround_scale = options.surround_scale
self.box_width = options.box_width
self.box_height = options.box_height
self.is_periodic = options.periodic
self.DoG = options.DoG
# Randomly tile place cell centers across environment
tf.random.set_seed(0)
usx = tf.random.uniform((self.Np,), -self.box_width/2, self.box_width/2, dtype=tf.float64)
usy = tf.random.uniform((self.Np,), -self.box_height/2, self.box_height/2, dtype=tf.float64)
self.us = tf.stack([usx, usy], axis=-1)
# # Grid place cell centers
# us = np.mgrid[:24,:24]/24 *self.box_width - self.box_width/2
# self.us = tf.transpose(tf.reshape(us, (2,-1)))
def get_activation(self, pos):
'''
Get place cell activations for a given position.
Args:
pos: 2d position of shape [batch_size, sequence_length, 2].
Returns:
outputs: Place cell activations with shape [batch_size, sequence_length, Np].
'''
d = tf.abs(pos[:, :, tf.newaxis, :] - self.us[tf.newaxis, tf.newaxis, ...])
if self.is_periodic:
dx = tf.gather(d, 0, axis=-1)
dy = tf.gather(d, 1, axis=-1)
dx = tf.minimum(dx, self.box_width - dx)
dy = tf.minimum(dy, self.box_height - dy)
d = tf.stack([dx,dy], axis=-1)
norm2 = tf.reduce_sum(d**2, axis=-1)
# Normalize place cell outputs with prefactor alpha=1/2/np.pi/self.sigma**2,
# or, simply normalize with softmax, which yields same normalization on
# average and seems to speed up training.
outputs = tf.nn.softmax(-norm2/(2*self.sigma**2))
if self.DoG:
# Again, normalize with prefactor
# beta=1/2/np.pi/self.sigma**2/self.surround_scale, or use softmax.
outputs -= tf.nn.softmax(-norm2/(2*self.surround_scale*self.sigma**2))
# Shift and scale outputs so that they lie in [0,1].
outputs += tf.abs(tf.reduce_min(outputs, axis=-1, keepdims=True))
outputs /= tf.reduce_sum(outputs, axis=-1, keepdims=True)
return outputs
def get_nearest_cell_pos(self, activation, k=3):
'''
Decode position using centers of k maximally active place cells.
Args:
activation: Place cell activations of shape [batch_size, sequence_length, Np].
k: Number of maximally active place cells with which to decode position.
Returns:
pred_pos: Predicted 2d position with shape [batch_size, sequence_length, 2].
'''
_, idxs = tf.math.top_k(activation, k=k)
pred_pos = tf.reduce_mean(tf.gather(self.us, idxs), axis=-2)
return pred_pos
def grid_pc(self, pc_outputs, res=32):
''' Interpolate place cell outputs onto a grid'''
coordsx = np.linspace(-self.box_width/2, self.box_width/2, res)
coordsy = np.linspace(-self.box_height/2, self.box_height/2, res)
grid_x, grid_y = np.meshgrid(coordsx, coordsy)
grid = np.stack([grid_x.ravel(), grid_y.ravel()]).T
# Convert to numpy
us_np = self.us.numpy()
pc_outputs = pc_outputs.numpy().reshape(-1, self.Np)
T = pc_outputs.shape[0] #T vs transpose? What is T? (dim's?)
pc = np.zeros([T, res, res])
for i in range(len(pc_outputs)):
gridval = scipy.interpolate.griddata(us_np, pc_outputs[i], grid)
pc[i] = gridval.reshape([res, res])
return pc
def compute_covariance(self, res=30):
'''Compute spatial covariance matrix of place cell outputs'''
pos = np.array(np.meshgrid(np.linspace(-self.box_width/2, self.box_width/2, res),
np.linspace(-self.box_height/2, self.box_height/2, res))).T
pos = pos.astype(np.float32)
#Maybe specify dimensions here again?
pc_outputs = self.get_activation(pos)
pc_outputs = tf.reshape(pc_outputs, (-1, self.Np))
C = pc_outputs@tf.transpose(pc_outputs)
Csquare = tf.reshape(C, (res,res,res,res))
Cmean = np.zeros([res,res])
for i in range(res):
for j in range(res):
Cmean += np.roll(np.roll(Csquare[i,j], -i, axis=0), -j, axis=1)
Cmean = np.roll(np.roll(Cmean, res//2, axis=0), res//2, axis=1)
return Cmean | [
"tensorflow.random.uniform",
"tensorflow.reduce_min",
"numpy.roll",
"tensorflow.random.set_seed",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"scipy.interpolate.griddata",
"numpy.linspace",
"numpy.zeros",
"tensorflow.gather",
"tensorflow.nn.softmax",
"tensorflow.reshape",
"tensorflow.ma... | [((509, 530), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(0)'], {}), '(0)\n', (527, 530), True, 'import tensorflow as tf\n'), ((545, 637), 'tensorflow.random.uniform', 'tf.random.uniform', (['(self.Np,)', '(-self.box_width / 2)', '(self.box_width / 2)'], {'dtype': 'tf.float64'}), '((self.Np,), -self.box_width / 2, self.box_width / 2,\n dtype=tf.float64)\n', (562, 637), True, 'import tensorflow as tf\n'), ((644, 738), 'tensorflow.random.uniform', 'tf.random.uniform', (['(self.Np,)', '(-self.box_height / 2)', '(self.box_height / 2)'], {'dtype': 'tf.float64'}), '((self.Np,), -self.box_height / 2, self.box_height / 2,\n dtype=tf.float64)\n', (661, 738), True, 'import tensorflow as tf\n'), ((749, 778), 'tensorflow.stack', 'tf.stack', (['[usx, usy]'], {'axis': '(-1)'}), '([usx, usy], axis=-1)\n', (757, 778), True, 'import tensorflow as tf\n'), ((1277, 1348), 'tensorflow.abs', 'tf.abs', (['(pos[:, :, tf.newaxis, :] - self.us[tf.newaxis, tf.newaxis, ...])'], {}), '(pos[:, :, tf.newaxis, :] - self.us[tf.newaxis, tf.newaxis, ...])\n', (1283, 1348), True, 'import tensorflow as tf\n'), ((1631, 1661), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(d ** 2)'], {'axis': '(-1)'}), '(d ** 2, axis=-1)\n', (1644, 1661), True, 'import tensorflow as tf\n'), ((1895, 1940), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['(-norm2 / (2 * self.sigma ** 2))'], {}), '(-norm2 / (2 * self.sigma ** 2))\n', (1908, 1940), True, 'import tensorflow as tf\n'), ((2885, 2915), 'tensorflow.math.top_k', 'tf.math.top_k', (['activation'], {'k': 'k'}), '(activation, k=k)\n', (2898, 2915), True, 'import tensorflow as tf\n'), ((3138, 3195), 'numpy.linspace', 'np.linspace', (['(-self.box_width / 2)', '(self.box_width / 2)', 'res'], {}), '(-self.box_width / 2, self.box_width / 2, res)\n', (3149, 3195), True, 'import numpy as np\n'), ((3210, 3269), 'numpy.linspace', 'np.linspace', (['(-self.box_height / 2)', '(self.box_height / 2)', 'res'], {}), '(-self.box_height / 2, self.box_height / 2, res)\n', (3221, 3269), True, 'import numpy as np\n'), ((3291, 3320), 'numpy.meshgrid', 'np.meshgrid', (['coordsx', 'coordsy'], {}), '(coordsx, coordsy)\n', (3302, 3320), True, 'import numpy as np\n'), ((3593, 3616), 'numpy.zeros', 'np.zeros', (['[T, res, res]'], {}), '([T, res, res])\n', (3601, 3616), True, 'import numpy as np\n'), ((4250, 4287), 'tensorflow.reshape', 'tf.reshape', (['pc_outputs', '(-1, self.Np)'], {}), '(pc_outputs, (-1, self.Np))\n', (4260, 4287), True, 'import tensorflow as tf\n'), ((4355, 4390), 'tensorflow.reshape', 'tf.reshape', (['C', '(res, res, res, res)'], {}), '(C, (res, res, res, res))\n', (4365, 4390), True, 'import tensorflow as tf\n'), ((4405, 4425), 'numpy.zeros', 'np.zeros', (['[res, res]'], {}), '([res, res])\n', (4413, 4425), True, 'import numpy as np\n'), ((1396, 1420), 'tensorflow.gather', 'tf.gather', (['d', '(0)'], {'axis': '(-1)'}), '(d, 0, axis=-1)\n', (1405, 1420), True, 'import tensorflow as tf\n'), ((1438, 1462), 'tensorflow.gather', 'tf.gather', (['d', '(1)'], {'axis': '(-1)'}), '(d, 1, axis=-1)\n', (1447, 1462), True, 'import tensorflow as tf\n'), ((1480, 1515), 'tensorflow.minimum', 'tf.minimum', (['dx', '(self.box_width - dx)'], {}), '(dx, self.box_width - dx)\n', (1490, 1515), True, 'import tensorflow as tf\n'), ((1534, 1570), 'tensorflow.minimum', 'tf.minimum', (['dy', '(self.box_height - dy)'], {}), '(dy, self.box_height - dy)\n', (1544, 1570), True, 'import tensorflow as tf\n'), ((1587, 1614), 'tensorflow.stack', 'tf.stack', (['[dx, dy]'], {'axis': '(-1)'}), '([dx, dy], axis=-1)\n', (1595, 1614), True, 'import tensorflow as tf\n'), ((2107, 2174), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['(-norm2 / (2 * self.surround_scale * self.sigma ** 2))'], {}), '(-norm2 / (2 * self.surround_scale * self.sigma ** 2))\n', (2120, 2174), True, 'import tensorflow as tf\n'), ((2334, 2380), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['outputs'], {'axis': '(-1)', 'keepdims': '(True)'}), '(outputs, axis=-1, keepdims=True)\n', (2347, 2380), True, 'import tensorflow as tf\n'), ((2950, 2974), 'tensorflow.gather', 'tf.gather', (['self.us', 'idxs'], {}), '(self.us, idxs)\n', (2959, 2974), True, 'import tensorflow as tf\n'), ((3680, 3734), 'scipy.interpolate.griddata', 'scipy.interpolate.griddata', (['us_np', 'pc_outputs[i]', 'grid'], {}), '(us_np, pc_outputs[i], grid)\n', (3706, 3734), False, 'import scipy\n'), ((4312, 4336), 'tensorflow.transpose', 'tf.transpose', (['pc_outputs'], {}), '(pc_outputs)\n', (4324, 4336), True, 'import tensorflow as tf\n'), ((4608, 4640), 'numpy.roll', 'np.roll', (['Cmean', '(res // 2)'], {'axis': '(0)'}), '(Cmean, res // 2, axis=0)\n', (4615, 4640), True, 'import numpy as np\n'), ((2263, 2309), 'tensorflow.reduce_min', 'tf.reduce_min', (['outputs'], {'axis': '(-1)', 'keepdims': '(True)'}), '(outputs, axis=-1, keepdims=True)\n', (2276, 2309), True, 'import tensorflow as tf\n'), ((3958, 4015), 'numpy.linspace', 'np.linspace', (['(-self.box_width / 2)', '(self.box_width / 2)', 'res'], {}), '(-self.box_width / 2, self.box_width / 2, res)\n', (3969, 4015), True, 'import numpy as np\n'), ((4038, 4097), 'numpy.linspace', 'np.linspace', (['(-self.box_height / 2)', '(self.box_height / 2)', 'res'], {}), '(-self.box_height / 2, self.box_height / 2, res)\n', (4049, 4097), True, 'import numpy as np\n'), ((4520, 4554), 'numpy.roll', 'np.roll', (['Csquare[i, j]', '(-i)'], {'axis': '(0)'}), '(Csquare[i, j], -i, axis=0)\n', (4527, 4554), True, 'import numpy as np\n')] |
import numpy as np
#--------------------------------------------------------------------------
# nx = 29
# ny = 44
# R = (30.0 / (1000.0 * 3600.0)) # [m/s]
# da = 900 # [m2]
# dur = (75 * 60) # [sec] (75 minutes or 4500 sec)
# A = (nx * ny * da)
# vol_tot = (A * R * dur)
# print( vol_tot )
#--------------------------------------------------------------------------
# dtype = 'float32'
# nx = 29
# ny = 44
# n_values = nx * ny
# grid_file = 'June_20_67_rain_uniform_30.rtg'
# grid_unit = open( grid_file, 'rb' )
# grid = np.fromfile( grid_unit, count=n_values, dtype=dtype )
# grid.byteswap( True )
# grid = grid.reshape( ny, nx )
# print('min(grid) =', grid.min())
# print('max(grid) =', grid.max())
# # Try to read again from grid_unit
# grid = np.fromfile( grid_unit, count=n_values, dtype=dtype )
# print( grid.dtype )
# print( grid.size )
# print( grid )
# # print('min(grid) =', grid.min())
# # print('max(grid) =', grid.max())
# grid_unit.close()
#--------------------------------------------------------------------------
def create_rainfall_grid():
# Create a grid of uniform rainfall
nx = 29
ny = 44
grid = np.zeros((ny,nx), dtype='float32') + 60.0 # [mmph]
new_rtg_file = 'June_20_67_rain_uniform_60.rtg'
rtg_unit = open(new_rtg_file, 'wb')
grid.byteswap( True )
grid.tofile( rtg_unit )
rtg_unit.close()
# create_rainfall_grid()
#--------------------------------------------------------------------------
def create_rainfall_grid_stack():
# Create a grid stack of uniform rainfall
nx = 29
ny = 44
grid = np.zeros((ny,nx), dtype='float32') + 30.0 # [mmph]
new_rts_file = 'June_20_67_rain_uniform_30_75min.rts'
rts_unit = open(new_rts_file, 'wb')
grid.byteswap( True )
for k in range(75): # 75 grids for 75 minutes
grid.tofile( rts_unit )
rts_unit.close()
# create_rainfall_grid_stack()
#--------------------------------------------------------------------------
def read_rainfall_grid():
# Create a grid of uniform rainfall
dtype = 'float32'
nx = 29
ny = 44
n_values = nx * ny
grid_file = 'June_20_67_rain_uniform_60.rtg'
grid_unit = open( grid_file, 'rb' )
grid = np.fromfile( grid_unit, count=n_values, dtype=dtype )
grid.byteswap( True )
grid = grid.reshape( ny, nx )
grid_unit.close()
print('min(grid) =', grid.min())
print('max(grid) =', grid.max())
return grid
# read_rainfall_grid()
#--------------------------------------------------------------------------
| [
"numpy.fromfile",
"numpy.zeros"
] | [((2214, 2265), 'numpy.fromfile', 'np.fromfile', (['grid_unit'], {'count': 'n_values', 'dtype': 'dtype'}), '(grid_unit, count=n_values, dtype=dtype)\n', (2225, 2265), True, 'import numpy as np\n'), ((1140, 1175), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {'dtype': '"""float32"""'}), "((ny, nx), dtype='float32')\n", (1148, 1175), True, 'import numpy as np\n'), ((1583, 1618), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {'dtype': '"""float32"""'}), "((ny, nx), dtype='float32')\n", (1591, 1618), True, 'import numpy as np\n')] |
"""
Classes for lighting in renderer
Author: <NAME>
"""
import numpy as np
from autolab_core import RigidTransform
class Color(object):
WHITE = np.array([255, 255, 255])
BLACK = np.array([0, 0, 0])
RED = np.array([255, 0, 0])
GREEN = np.array([0, 255, 0])
BLUE = np.array([0, 0, 255])
class MaterialProperties(object):
""" Struct to encapsulate material properties for
OpenGL rendering.
Attributes
----------
color : :obj:`numpy.ndarray`
3-array of integers between 0 and 255
"""
def __init__(self, color=Color.WHITE,
ambient=0.2,
diffuse=0.8,
specular=0,
shininess=0):
# set params
self.color = np.array(color).astype(np.uint8)
self.ambient = ambient
self.diffuse = diffuse
self.specular = specular
self.shininess = shininess
def __str__(self):
s = ''
s += 'Color: %s\n' %(str(self.color))
s += 'Ambient: %f\n' %(self.ambient)
s += 'Diffuse: %f\n' %(self.diffuse)
s += 'Specular: %f\n' %(self.specular)
s += 'Shininess: %f\n' %(self.shininess)
return s
@property
def arr(self):
""" Returns the material properties as a contiguous numpy array. """
return np.r_[self.color,
self.ambient * np.ones(3), 1,
self.diffuse * np.ones(3), 1,
self.specular * np.ones(3), 1,
self.shininess].astype(np.float64)
class LightingProperties(object):
""" Struct to encapsulate lighting properties for
OpenGL rendering.
"""
def __init__(self, ambient=0,
diffuse=1,
specular=1,
T_light_camera=RigidTransform(rotation=np.eye(3),
translation=np.zeros(3),
from_frame='light',
to_frame='camera'),
cutoff=180.0):
self.ambient = ambient
self.diffuse = diffuse
self.specular = specular
self.T_light_camera = T_light_camera
self.cutoff = cutoff
self.T_light_obj = None
def __str__(self):
s = ''
s += 'Ambient: %f\n' %(self.ambient)
s += 'Diffuse: %f\n' %(self.diffuse)
s += 'Specular: %f\n' %(self.specular)
s += 'T_light_camera: %s\n' %(str(self.T_light_camera))
s += 'Cutoff: %f\n' %(self.cutoff)
return s
def set_pose(self, T_obj_camera):
self.T_light_obj = T_obj_camera.inverse() * self.T_light_camera.as_frames('light', T_obj_camera.to_frame)
@property
def arr(self):
""" Returns the lighting properties as a contiguous numpy array. """
if self.T_light_obj is None:
raise ValueError('Need to set pose relative to object!')
return np.r_[self.ambient * np.ones(3), 1,
self.diffuse * np.ones(3), 1,
self.specular * np.ones(3), 1,
self.T_light_obj.translation,
self.T_light_obj.z_axis,
self.cutoff].astype(np.float64)
| [
"numpy.array",
"numpy.eye",
"numpy.ones",
"numpy.zeros"
] | [((150, 175), 'numpy.array', 'np.array', (['[255, 255, 255]'], {}), '([255, 255, 255])\n', (158, 175), True, 'import numpy as np\n'), ((188, 207), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (196, 207), True, 'import numpy as np\n'), ((220, 241), 'numpy.array', 'np.array', (['[255, 0, 0]'], {}), '([255, 0, 0])\n', (228, 241), True, 'import numpy as np\n'), ((254, 275), 'numpy.array', 'np.array', (['[0, 255, 0]'], {}), '([0, 255, 0])\n', (262, 275), True, 'import numpy as np\n'), ((288, 309), 'numpy.array', 'np.array', (['[0, 0, 255]'], {}), '([0, 0, 255])\n', (296, 309), True, 'import numpy as np\n'), ((743, 758), 'numpy.array', 'np.array', (['color'], {}), '(color)\n', (751, 758), True, 'import numpy as np\n'), ((1814, 1823), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1820, 1823), True, 'import numpy as np\n'), ((1884, 1895), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1892, 1895), True, 'import numpy as np\n'), ((1374, 1384), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1381, 1384), True, 'import numpy as np\n'), ((1425, 1435), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1432, 1435), True, 'import numpy as np\n'), ((1477, 1487), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1484, 1487), True, 'import numpy as np\n'), ((2970, 2980), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (2977, 2980), True, 'import numpy as np\n'), ((3021, 3031), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (3028, 3031), True, 'import numpy as np\n'), ((3073, 3083), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (3080, 3083), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2021
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Common functions to spatialy normalize the data.
"""
# Imports
import os
import nibabel
import numpy as np
from .utils import check_version, check_command, execute_command
def scale(imfile, scaledfile, scale, check_pkg_version=False):
""" Scale the MRI image.
.. note:: This function is based on FSL.
Parameters
----------
imfile: str
the input image.
scaledfile: str
the path to the scaled input image.
scale: int
the scale factor in all directions.
check_pkg_version: bool, default False
optionally check the package version using dpkg.
Returns
-------
scaledfile, trffile: str
the generated files.
"""
check_version("fsl", check_pkg_version)
check_command("flirt")
trffile = scaledfile.split(".")[0] + ".txt"
cmd = ["flirt", "-in", imfile, "-ref", imfile, "-out",
scaledfile, "-applyisoxfm", str(scale), "-omat", trffile]
execute_command(cmd)
return scaledfile, trffile
def bet2(imfile, brainfile, frac=0.5, cleanup=True, check_pkg_version=False):
""" Skull stripped the MRI image.
.. note:: This function is based on FSL.
Parameters
----------
imfile: str
the input image.
brainfile: str
the path to the brain image file.
frac: float, default 0.5
fractional intensity threshold (0->1);smaller values give larger brain
outline estimates
cleanup: bool, default True
optionnally add bias field & neck cleanup.
check_pkg_version: bool, default False
optionally check the package version using dpkg.
Returns
-------
brainfile, maskfile: str
the generated files.
"""
check_version("fsl", check_pkg_version)
check_command("bet")
maskfile = brainfile.split(".")[0] + "_mask.nii.gz"
cmd = ["bet", imfile, brainfile, "-f", str(frac), "-R", "-m"]
if cleanup:
cmd.append("-B")
execute_command(cmd)
return brainfile, maskfile
def reorient2std(imfile, stdfile, check_pkg_version=False):
""" Reorient the MRI image to match the approximate orientation of the
standard template images (MNI152).
.. note:: This function is based on FSL.
Parameters
----------
imfile: str
the input image.
stdfile: str
the reoriented image file.
check_pkg_version: bool, default False
optionally check the package version using dpkg.
Returns
-------
stdfile: str
the generated file.
"""
check_version("fsl", check_pkg_version)
check_command("fslreorient2std")
cmd = ["fslreorient2std", imfile, stdfile]
execute_command(cmd)
return stdfile
def biasfield(imfile, bfcfile, maskfile=None, nb_iterations=50,
convergence_threshold=0.001, bspline_grid=(1, 1, 1),
shrink_factor=1, bspline_order=3,
histogram_sharpening=(0.15, 0.01, 200), check_pkg_version=False):
""" Perform MRI bias field correction using N4 algorithm.
.. note:: This function is based on ANTS.
Parameters
----------
imfile: str
the input image.
bfcfile: str
the bias fieled corrected file.
maskfile: str, default None
the brain mask image.
nb_iterations: int, default 50
Maximum number of iterations at each level of resolution. Larger
values will increase execution time, but may lead to better results.
convergence_threshold: float, default 0.001
Stopping criterion for the iterative bias estimation. Larger values
will lead to smaller execution time.
bspline_grid: int, default (1, 1, 1)
Resolution of the initial bspline grid defined as a sequence of three
numbers. The actual resolution will be defined by adding the bspline
order (default is 3) to the resolution in each dimension specified
here. For example, 1,1,1 will result in a 4x4x4 grid of control points.
This parameter may need to be adjusted based on your input image.
In the multi-resolution N4 framework, the resolution of the bspline
grid at subsequent iterations will be doubled. The number of
resolutions is implicitly defined by Number of iterations parameter
(the size of this list is the number of resolutions).
shrink_factor: int, default 1
Defines how much the image should be upsampled before estimating the
inhomogeneity field. Increase if you want to reduce the execution
time. 1 corresponds to the original resolution. Larger values will
significantly reduce the computation time.
bspline_order: int, default 3
Order of B-spline used in the approximation. Larger values will lead
to longer execution times, may result in overfitting and poor result.
histogram_sharpening: 3-uplate, default (0.15, 0.01, 200)
A vector of up to three values. Non-zero values correspond to Bias
Field Full Width at Half Maximum, Wiener filter noise, and Number of
histogram bins.
check_pkg_version: bool, default False
optionally check the package version using dpkg.
Returns
-------
bfcfile, bffile: str
the generatedd files.
"""
check_version("ants", check_pkg_version)
check_command("N4BiasFieldCorrection")
ndim = 3
bspline_grid = [str(e) for e in bspline_grid]
histogram_sharpening = [str(e) for e in histogram_sharpening]
bffile = bfcfile.split(".")[0] + "_field.nii.gz"
cmd = [
"N4BiasFieldCorrection",
"-d", str(ndim),
"-i", imfile,
"-s", str(shrink_factor),
"-b", "[{0}, {1}]".format("x".join(bspline_grid), bspline_order),
"-c", "[{0}, {1}]".format(
"x".join([str(nb_iterations)] * 4), convergence_threshold),
"-t", "[{0}]".format(", ".join(histogram_sharpening)),
"-o", "[{0}, {1}]".format(bfcfile, bffile),
"-v"]
if maskfile is not None:
cmd += ["-x", maskfile]
execute_command(cmd)
return bfcfile, bffile
def register_affine(imfile, targetfile, regfile, mask=None, cost="normmi",
bins=256, interp="spline", dof=9, check_pkg_version=False):
""" Register the MRI image to a target image using an affine transform
with 9 dofs.
.. note:: This function is based on FSL.
Parameters
----------
imfile: str
the input image.
targetfile: str
the target image.
regfile: str
the registered file.
mask: str, default None
the white matter mask image needed by the bbr cost function.
cost: str, default 'normmi'
Choose the most appropriate metric: 'mutualinfo', 'corratio',
'normcorr', 'normmi', 'leastsq', 'labeldiff', 'bbr'.
bins: int, default 256
Number of histogram bins
interp: str, default 'spline'
Choose the most appropriate interpolation method: 'trilinear',
'nearestneighbour', 'sinc', 'spline'.
dof: int, default 9
Number of affine transform dofs.
check_pkg_version: bool, default False
optionally check the package version using dpkg.
Returns
-------
regfile, trffile: str
the generated files.
"""
check_version("fsl", check_pkg_version)
check_command("flirt")
trffile = regfile.split(".")[0] + ".txt"
cmd = ["flirt",
"-in", imfile,
"-ref", targetfile,
"-cost", cost,
"-searchcost", cost,
"-anglerep", "euler",
"-bins", str(bins),
"-interp", interp,
"-dof", str(dof),
"-out", regfile,
"-omat", trffile,
"-verbose", "1"]
if cost == "bbr":
if mask is None:
raise ValueError("A white matter mask image is needed by the "
"bbr cost function.")
cmd += ["-wmseg", mask]
execute_command(cmd)
return regfile, trffile
def apply_affine(imfile, targetfile, regfile, affines, interp="spline",
check_pkg_version=False):
""" Apply affine transformations to an image.
.. note:: This function is based on FSL.
Parameters
----------
imfile: nibabel.Nifti1Image
the input image.
targetfile: nibabel.Nifti1Image
the target image.
regfile: str
the registered file.
affines: str or list of str
the affine transforms to be applied. If multiple transforms are
specified, they are first composed.
interp: str, default 'spline'
Choose the most appropriate interpolation method: 'trilinear',
'nearestneighbour', 'sinc', 'spline'.
check_pkg_version: bool, default False
optionally check the package version using dpkg.
Returns
-------
regfile, trffile: str
the generated files.
"""
check_version("fsl", check_pkg_version)
check_command("flirt")
if not isinstance(affines, list):
affines = [affines]
elif len(affines) == 0:
raise ValueError("No transform specified.")
trffile = regfile.split(".")[0] + ".txt"
affines = [np.loadtxt(path) for path in affines][::-1]
affine = affines[0]
for matrix in affines[1:]:
affine = np.dot(matrix, affine)
np.savetxt(trffile, affine)
cmd = ["flirt",
"-in", imfile,
"-ref", targetfile,
"-init", trffile,
"-interp", interp,
"-applyxfm",
"-out", regfile]
execute_command(cmd)
return regfile, trffile
def apply_mask(imfile, maskfile, genfile):
""" Apply brain mask.
Parameters
----------
imfile: str
the input image.
maskfile: str
the mask image.
genfile: str
the input masked file.
Returns
-------
genfile: str
the generated file.
"""
im = nibabel.load(imfile)
mask_im = nibabel.load(maskfile)
arr = im.get_fdata()
arr[mask_im.get_fdata() == 0] = 0
gen_im = nibabel.Nifti1Image(arr, im.affine)
nibabel.save(gen_im, genfile)
return genfile
| [
"nibabel.save",
"nibabel.load",
"numpy.dot",
"numpy.savetxt",
"nibabel.Nifti1Image",
"numpy.loadtxt"
] | [((9689, 9716), 'numpy.savetxt', 'np.savetxt', (['trffile', 'affine'], {}), '(trffile, affine)\n', (9699, 9716), True, 'import numpy as np\n'), ((10278, 10298), 'nibabel.load', 'nibabel.load', (['imfile'], {}), '(imfile)\n', (10290, 10298), False, 'import nibabel\n'), ((10313, 10335), 'nibabel.load', 'nibabel.load', (['maskfile'], {}), '(maskfile)\n', (10325, 10335), False, 'import nibabel\n'), ((10412, 10447), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['arr', 'im.affine'], {}), '(arr, im.affine)\n', (10431, 10447), False, 'import nibabel\n'), ((10452, 10481), 'nibabel.save', 'nibabel.save', (['gen_im', 'genfile'], {}), '(gen_im, genfile)\n', (10464, 10481), False, 'import nibabel\n'), ((9662, 9684), 'numpy.dot', 'np.dot', (['matrix', 'affine'], {}), '(matrix, affine)\n', (9668, 9684), True, 'import numpy as np\n'), ((9546, 9562), 'numpy.loadtxt', 'np.loadtxt', (['path'], {}), '(path)\n', (9556, 9562), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import matplotlib.pyplot as plt
def canny(image):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
canny = cv2.Canny(blur, 50, 150)
return canny
def display_lines(image, lines):
line_image = np.zeros_like(image)
if lines is not None:
for line in lines:
#print(line)
x1,y1,x2,y2 = line.reshape(4)
cv2.line(line_image, (x1,y1), (x2,y2),[255,0,0], 10)
return line_image
def region_of_interest(image):
height = image.shape[0]
polygons = np.array([[(200,height),(1100,height),(550,250)]])
mask = np.zeros_like(image)
cv2.fillPoly(mask, polygons, 255)
masked_image = cv2.bitwise_and(image, mask)
return masked_image
image = cv2.imread("test_image.jpg")
lane_image = np.copy(image)
canny1 = canny(lane_image)
cv2.imshow('result',canny1)
cv2.waitKey(0)
'''#For video feed
cap = cv2.VideoCapture("test2.mp4")
while(cap.isOpened):
_, frame = cap.read()
canny1 = canny(frame)
cropped_image = region_of_interest(canny1)
lines = cv2.HoughLinesP(cropped_image, 2, np.pi/180,100, np.array([]), minLineLength=40, maxLineGap=5)
averaged_lines = average_slope_intercept(frame, lines)
line_image = display_lines(frame, averaged_lines)
combo_image = cv2.addWeighted(frame, 0.8, line_image, 1, 1)
cv2.imshow('result',combo_image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destryAllWindows()
'''
| [
"numpy.copy",
"cv2.fillPoly",
"cv2.Canny",
"cv2.line",
"numpy.zeros_like",
"cv2.bitwise_and",
"cv2.imshow",
"numpy.array",
"cv2.cvtColor",
"cv2.GaussianBlur",
"cv2.waitKey",
"cv2.imread"
] | [((786, 814), 'cv2.imread', 'cv2.imread', (['"""test_image.jpg"""'], {}), "('test_image.jpg')\n", (796, 814), False, 'import cv2\n'), ((828, 842), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (835, 842), True, 'import numpy as np\n'), ((871, 899), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'canny1'], {}), "('result', canny1)\n", (881, 899), False, 'import cv2\n'), ((899, 913), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (910, 913), False, 'import cv2\n'), ((94, 133), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2GRAY'], {}), '(image, cv2.COLOR_RGB2GRAY)\n', (106, 133), False, 'import cv2\n'), ((145, 178), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(5, 5)', '(0)'], {}), '(gray, (5, 5), 0)\n', (161, 178), False, 'import cv2\n'), ((188, 212), 'cv2.Canny', 'cv2.Canny', (['blur', '(50)', '(150)'], {}), '(blur, 50, 150)\n', (197, 212), False, 'import cv2\n'), ((281, 301), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (294, 301), True, 'import numpy as np\n'), ((584, 639), 'numpy.array', 'np.array', (['[[(200, height), (1100, height), (550, 250)]]'], {}), '([[(200, height), (1100, height), (550, 250)]])\n', (592, 639), True, 'import numpy as np\n'), ((646, 666), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (659, 666), True, 'import numpy as np\n'), ((671, 704), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'polygons', '(255)'], {}), '(mask, polygons, 255)\n', (683, 704), False, 'import cv2\n'), ((724, 752), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image', 'mask'], {}), '(image, mask)\n', (739, 752), False, 'import cv2\n'), ((434, 491), 'cv2.line', 'cv2.line', (['line_image', '(x1, y1)', '(x2, y2)', '[255, 0, 0]', '(10)'], {}), '(line_image, (x1, y1), (x2, y2), [255, 0, 0], 10)\n', (442, 491), False, 'import cv2\n')] |
import numpy as np #we use numpy for lots of things
def main():
i=0 #integers can be declared with a number
n=10 #here is another integer
x=119.0 #floating point nums are declared with a "."
#we can use numpy to declare arrays quickly
y=np.zeros(n,dtype=float)
#we cn use for loops to iterate with a variable
for i in range(n): #i in range [0, n-1]
y[i]=2.0*float(i)+1 #set y=2i+1 as floats
#we can also simply iterate through a variable
for y_element in y:
print(y_element)
#execute the main function
if __name__=="__main__":
main() | [
"numpy.zeros"
] | [((262, 286), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'float'}), '(n, dtype=float)\n', (270, 286), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
File: DataSet
Date: 5/1/18
Author: <NAME> (<EMAIL>)
This file provides loading of the BraTS datasets
for ease of use in TensorFlow models.
"""
import os
import pandas as pd
import numpy as np
import nibabel as nib
from tqdm import tqdm
from BraTS.Patient import *
from BraTS.structure import *
from BraTS.modalities import *
from BraTS.load_utils import *
survival_df_cache = {} # Prevents loading CSVs more than once
class DataSubSet:
def __init__(self, directory_map, survival_csv, data_set_type=None):
self.directory_map = directory_map
self._patient_ids = sorted(list(directory_map.keys()))
self._survival_csv = survival_csv
self._num_patients = len(self._patient_ids)
self.type = data_set_type
# Data caches
self._mris = None
self._segs = None
self._patients = {}
self._survival_df_cached = None
self._patients_fully_loaded = False
self._id_indexer = {patient_id: i for i, patient_id in enumerate(self._patient_ids)}
def subset(self, patient_ids):
"""
Split this data subset into a small subset by patient ID
:param n: The number of elements in the smaller training set
:return: A new data subset with only the specified number of items
"""
dir_map = {id: self.directory_map[id] for id in patient_ids}
return DataSubSet(dir_map, self._survival_csv)
@property
def ids(self):
"""
List of all patient IDs in this dataset
Will copy the ids... so modify them all you want
:return: Copy of the patient IDs
"""
return list(self._patient_ids)
@property
def mris(self):
if self._mris is not None:
return self._mris
self._load_images()
return self._mris
@property
def segs(self):
if self._segs is None:
self._load_images()
return self._segs
def _load_images(self):
mris_shape = (self._num_patients,) + mri_shape
segs_shape = (self._num_patients,) + image_shape
self._mris = np.empty(shape=mris_shape)
self._segs = np.empty(shape=segs_shape)
if self._patients_fully_loaded:
# All the patients were already loaded
for i, patient in enumerate(tqdm(self._patients.values())):
self._mris[i] = patient.mri_data
self._segs[i] = patient.seg
else:
# Load it from scratch
for i, patient_id in enumerate(self._patient_ids):
patient_dir = self.directory_map[patient_id]
load_patient_data_inplace(patient_dir, self._mris, self._segs, i)
@property
def patients(self):
"""
Loads ALL of the patients from disk into patient objects
:return: A dictionary containing ALL patients
"""
for patient_id in self.ids:
yield self.patient(patient_id)
self._patients_fully_loaded = True
def patient(self, patient_id):
"""
Loads only a single patient from disk
:param patient_id: The patient ID
:return: A Patient object loaded from disk
"""
if patient_id not in self._patient_ids:
raise ValueError("Patient id \"%s\" not present." % patient_id)
# Return cached value if present
if patient_id in self._patients:
return self._patients[patient_id]
# Load patient data into memory
patient = Patient(patient_id)
patient_dir = self.directory_map[patient_id]
df = self._survival_df
if patient_id in df.id.values:
patient.age = float(df.loc[df.id == patient_id].age)
patient.survival = int(df.loc[df.id == patient_id].survival)
if self._mris is not None and self._segs is not None:
# Load from _mris and _segs if possible
index = self._id_indexer[patient_id]
patient.mri = self._mris[index]
patient.seg = self._segs[index]
else:
# Load the mri and segmentation data from disk
patient.mri, patient.seg = load_patient_data(patient_dir)
self._patients[patient_id] = patient # cache the value for later
return patient
def drop_cache(self):
self._patients.clear()
self._mris = None
self._segs = None
@property
def _survival_df(self):
if self._survival_csv in survival_df_cache:
return survival_df_cache[self._survival_csv]
df = load_survival(self._survival_csv)
survival_df_cache[self._survival_csv] = df
return df
class DataSet(object):
def __init__(self, data_set_dir=None, brats_root=None, year=None):
if data_set_dir is not None:
# The data-set directory was specified explicitly
assert isinstance(data_set_dir, str)
self._data_set_dir = data_set_dir
elif brats_root is not None and isinstance(year, int):
# Find the directory by specifying the year
assert isinstance(brats_root, str)
year_dir = find_file_containing(brats_root, str(year % 100))
self._data_set_dir = os.path.join(brats_root, year_dir)
self._brats_root = brats_root
self._year = year
else:
# BraTS data-set location was not improperly specified
raise Exception("Specify BraTS location with \"data_set_dir\" or with \"brats_root\" and \"year\"")
self._validation = None
self._train = None
self._hgg = None
self._lgg = None
self._dir_map_cache = None
self._val_dir = None
self._train_dir_cached = None
self._hgg_dir = os.path.join(self._train_dir, "HGG")
self._lgg_dir = os.path.join(self._train_dir, "LGG")
self._train_survival_csv_cached = None
self._validation_survival_csv_cached = None
self._train_ids = None
self._hgg_ids_cached = None
self._lgg_ids_cached = None
self._train_dir_map_cache = None
self._validation_dir_map_cache = None
self._hgg_dir_map_cache = None
self._lgg_dir_map_cache = None
def set(self, data_set_type):
"""
Get a data subset by type
:param data_set_type: The DataSubsetType to get
:return: The data sub-set of interest
"""
assert isinstance(data_set_type, DataSubsetType)
if data_set_type == DataSubsetType.train:
return self.train
if data_set_type == DataSubsetType.hgg:
return self.hgg
if data_set_type == DataSubsetType.lgg:
return self.lgg
if data_set_type == DataSubsetType.validation:
return self.validation
@property
def train(self):
"""
Training data
Loads the training data from disk, utilizing caching
:return: A tf.data.Dataset object containing the training data
"""
if self._train is None:
try:
self._train = DataSubSet(self._train_dir_map,
self._train_survival_csv,
data_set_type=DataSubsetType.train)
except FileNotFoundError:
return None
return self._train
@property
def validation(self):
"""
Validation data
:return: Validation data
"""
if self._validation is None:
try:
self._validation = DataSubSet(self._validation_dir_map,
self._validation_survival_csv,
data_set_type=DataSubsetType.validation)
except FileNotFoundError:
return None
return self._validation
@property
def hgg(self):
if self._hgg is None:
try:
self._hgg = DataSubSet(self._hgg_dir_map,
self._train_survival_csv,
data_set_type=DataSubsetType.hgg)
except FileNotFoundError:
return None
return self._hgg
@property
def lgg(self):
if self._lgg is None:
try:
self._lgg = DataSubSet(self._lgg_dir_map,
self._train_survival_csv,
data_set_type=DataSubsetType.lgg)
except FileNotFoundError:
return None
return self._lgg
def drop_cache(self):
"""
Drops the cached values in the object
:return: None
"""
self._validation = None
self._train = None
self._hgg = None
self._lgg = None
self._dir_map_cache = None
self._val_dir = None
self._train_dir_cached = None
self._train_survival_csv_cached = None
self._validation_survival_csv_cached = None
self._train_ids = None
self._hgg_ids_cached = None
self._lgg_ids_cached = None
self._train_dir_map_cache = None
self._validation_dir_map_cache = None
self._hgg_dir_map_cache = None
self._lgg_dir_map_cache = None
@property
def _train_survival_csv(self):
if self._train_survival_csv_cached is None:
self._train_survival_csv_cached = find_file_containing(self._train_dir, "survival")
if self._train_survival_csv_cached is None:
raise FileNotFoundError("Could not find survival CSV in %s" % self._train_dir)
return self._train_survival_csv_cached
@property
def _validation_survival_csv(self):
if self._validation_survival_csv_cached is None:
self._validation_survival_csv_cached = find_file_containing(self._validation_dir, "survival")
if self._validation_survival_csv_cached is None:
raise FileNotFoundError("Could not find survival CSV in %s" % self._validation_dir)
return self._validation_survival_csv_cached
@property
def _train_dir(self):
if self._train_dir_cached is not None:
return self._train_dir_cached
self._train_dir_cached = find_file_containing(self._data_set_dir, "training")
if self._train_dir_cached is None:
raise FileNotFoundError("Could not find training directory in %s" % self._data_set_dir)
return self._train_dir_cached
@property
def _validation_dir(self):
if self._val_dir is not None:
return self._val_dir
self._val_dir = find_file_containing(self._data_set_dir, "validation")
if self._val_dir is None:
raise FileNotFoundError("Could not find validation directory in %s" % self._data_set_dir)
return self._val_dir
@property
def _train_dir_map(self):
if self._train_dir_map_cache is None:
self._train_dir_map_cache = dict(self._hgg_dir_map)
self._train_dir_map_cache.update(self._lgg_dir_map)
return self._train_dir_map_cache
@property
def _validation_dir_map(self):
if self._validation_dir_map_cache is None:
self._validation_dir_map_cache = self._directory_map(self._validation_dir)
return self._validation_dir_map_cache
@property
def _hgg_dir_map(self):
if self._hgg_dir_map_cache is None:
self._hgg_dir_map_cache = self._directory_map(self._hgg_dir)
return self._hgg_dir_map_cache
@property
def _lgg_dir_map(self):
if self._lgg_dir_map_cache is None:
self._lgg_dir_map_cache = self._directory_map(self._lgg_dir)
return self._lgg_dir_map_cache
@property
def _hgg_ids(self):
if self._hgg_ids_cached is None:
self._hgg_ids_cached = os.listdir(self._hgg_dir)
return self._hgg_ids_cached
@property
def _lgg_ids(self):
if self._lgg_ids_cached is None:
self._lgg_ids_cached = os.listdir(self._lgg_dir)
return self._lgg_ids_cached
@classmethod
def _directory_map(cls, dir):
return {file: os.path.join(dir, file)
for file in os.listdir(dir)
if os.path.isdir(os.path.join(dir, file))}
| [
"os.path.join",
"os.listdir",
"numpy.empty"
] | [((2138, 2164), 'numpy.empty', 'np.empty', ([], {'shape': 'mris_shape'}), '(shape=mris_shape)\n', (2146, 2164), True, 'import numpy as np\n'), ((2186, 2212), 'numpy.empty', 'np.empty', ([], {'shape': 'segs_shape'}), '(shape=segs_shape)\n', (2194, 2212), True, 'import numpy as np\n'), ((5800, 5836), 'os.path.join', 'os.path.join', (['self._train_dir', '"""HGG"""'], {}), "(self._train_dir, 'HGG')\n", (5812, 5836), False, 'import os\n'), ((5861, 5897), 'os.path.join', 'os.path.join', (['self._train_dir', '"""LGG"""'], {}), "(self._train_dir, 'LGG')\n", (5873, 5897), False, 'import os\n'), ((11945, 11970), 'os.listdir', 'os.listdir', (['self._hgg_dir'], {}), '(self._hgg_dir)\n', (11955, 11970), False, 'import os\n'), ((12122, 12147), 'os.listdir', 'os.listdir', (['self._lgg_dir'], {}), '(self._lgg_dir)\n', (12132, 12147), False, 'import os\n'), ((12258, 12281), 'os.path.join', 'os.path.join', (['dir', 'file'], {}), '(dir, file)\n', (12270, 12281), False, 'import os\n'), ((5261, 5295), 'os.path.join', 'os.path.join', (['brats_root', 'year_dir'], {}), '(brats_root, year_dir)\n', (5273, 5295), False, 'import os\n'), ((12311, 12326), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (12321, 12326), False, 'import os\n'), ((12361, 12384), 'os.path.join', 'os.path.join', (['dir', 'file'], {}), '(dir, file)\n', (12373, 12384), False, 'import os\n')] |
import numpy as np
import pandas as pd
import pytest
import numpy.testing as npt
import matplotlib.pyplot as plt
from pulse2percept.viz import scatter_correlation, correlation_matrix
def test_scatter_correlation():
x = np.arange(100)
_, ax = plt.subplots()
ax = scatter_correlation(x, x, ax=ax)
npt.assert_equal(len(ax.texts), 1)
print(ax.texts)
npt.assert_equal('$r$=1.000' in ax.texts[0].get_text(), True)
# Ignore NaN:
ax = scatter_correlation([0, 1, np.nan, 3], [0, 1, 2, 3])
npt.assert_equal('$r$=1.000' in ax.texts[0].get_text(), True)
with pytest.raises(ValueError):
scatter_correlation(np.arange(10), np.arange(11))
with pytest.raises(ValueError):
scatter_correlation([1], [2])
def test_correlation_matrix():
df = pd.DataFrame()
df['a'] = pd.Series(np.arange(100))
df['b'] = pd.Series(list(df['a'][::-1]))
_, ax = plt.subplots()
ax = correlation_matrix(df, ax=ax)
with pytest.raises(TypeError):
correlation_matrix(np.zeros((10, 20)))
| [
"numpy.zeros",
"pytest.raises",
"pulse2percept.viz.scatter_correlation",
"pandas.DataFrame",
"pulse2percept.viz.correlation_matrix",
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((226, 240), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (235, 240), True, 'import numpy as np\n'), ((253, 267), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (265, 267), True, 'import matplotlib.pyplot as plt\n'), ((277, 309), 'pulse2percept.viz.scatter_correlation', 'scatter_correlation', (['x', 'x'], {'ax': 'ax'}), '(x, x, ax=ax)\n', (296, 309), False, 'from pulse2percept.viz import scatter_correlation, correlation_matrix\n'), ((462, 514), 'pulse2percept.viz.scatter_correlation', 'scatter_correlation', (['[0, 1, np.nan, 3]', '[0, 1, 2, 3]'], {}), '([0, 1, np.nan, 3], [0, 1, 2, 3])\n', (481, 514), False, 'from pulse2percept.viz import scatter_correlation, correlation_matrix\n'), ((791, 805), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (803, 805), True, 'import pandas as pd\n'), ((903, 917), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (915, 917), True, 'import matplotlib.pyplot as plt\n'), ((927, 956), 'pulse2percept.viz.correlation_matrix', 'correlation_matrix', (['df'], {'ax': 'ax'}), '(df, ax=ax)\n', (945, 956), False, 'from pulse2percept.viz import scatter_correlation, correlation_matrix\n'), ((590, 615), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (603, 615), False, 'import pytest\n'), ((684, 709), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (697, 709), False, 'import pytest\n'), ((719, 748), 'pulse2percept.viz.scatter_correlation', 'scatter_correlation', (['[1]', '[2]'], {}), '([1], [2])\n', (738, 748), False, 'from pulse2percept.viz import scatter_correlation, correlation_matrix\n'), ((830, 844), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (839, 844), True, 'import numpy as np\n'), ((966, 990), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (979, 990), False, 'import pytest\n'), ((645, 658), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (654, 658), True, 'import numpy as np\n'), ((660, 673), 'numpy.arange', 'np.arange', (['(11)'], {}), '(11)\n', (669, 673), True, 'import numpy as np\n'), ((1019, 1037), 'numpy.zeros', 'np.zeros', (['(10, 20)'], {}), '((10, 20))\n', (1027, 1037), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Computes the result of a stats cPickle file.
A stats cPickle file has the following format:
- List of N elements, each representing a track.
- Each position (or track) contains the rank index of the covers
corresponding to this position.
The results this script computes are:
- Mean Average Precision (MAP)
- Average Rank per track
- Average Rank per clique
- Precision at k (default k=10)
Plotting:
- Rank histograms (one or two stats files)
----
Authors:
<NAME> (<EMAIL>)
<NAME> (<EMAIL>)
----
License:
This code is distributed under the GNU LESSER PUBLIC LICENSE
(LGPL, see www.gnu.org).
Copyright (c) 2012-2013 MARL@NYU.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of MARL, NYU nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
import argparse
import cPickle
import numpy as np
import pylab as plt
import utils
def get_top_ranked(stats):
tr = np.zeros(len(stats))
for i,s in enumerate(stats):
try:
if not np.isnan(s[0]) or s[0] != np.inf:
tr[i] = s[0]
except:
continue
return tr
def get_average_rank(stats):
tr = np.zeros(len(stats))
for i,s in enumerate(stats):
try:
if not np.isnan(s[0]) or s[0] != np.inf:
tr[i] = np.mean(s)
except:
continue
return tr
def average_rank_per_track(stats):
mean_r = []
for s in stats:
try:
for rank in s:
if not np.isnan(rank) or rank != np.inf:
mean_r.append(rank)
except:
continue
return np.mean(mean_r)
def average_rank_per_clique(stats):
mean_r = []
for s in stats:
try:
mean_r.append(np.mean(s))
if np.isnan(mean_r[-1]) or mean_r[-1] == np.inf:
mean_r = mean_r[:-1]
except:
continue
return np.mean(mean_r)
def precision_at_k(ranks, k):
if k == 0: return 1.0
ranks = np.asarray(ranks)
relevant = len(np.where(ranks <= k)[0])
return relevant / float(k)
def average_precision(stats, q, ver=False):
try:
nrel = len(stats[q]) # Number of relevant docs
except:
return np.nan
ap = []
for k in stats[q]:
pk = precision_at_k(stats[q], k)
ap.append(pk)
return np.sum(ap) / float(nrel)
def average_precision_at_k(stats, k):
precision = []
for s in stats:
precision.append(precision_at_k(s,k))
return np.mean(precision)
def mean_average_precision(stats):
Q = len(stats) # Number of queries
ma_p = []
for q in xrange(Q):
ap = average_precision(stats, q)
if np.isnan(ap):
continue
ma_p.append(ap)
return np.mean(ma_p)
def mean_per_clique_count(stats, N=None):
if N is None:
N = len(stats)
means = np.zeros(N)
for n in xrange(1,N):
m = []
k = 0
for s in stats:
try:
if len(s) == n:
k += 1
m.append(np.mean(s))
except:
continue
if len(m) != 0:
means[n] = np.mean(m)
return means
##### PLOTTING
def compute_rank_histogram_buckets(stats):
ranks = []
for s in stats:
try:
for rank in s:
ranks.append(rank)
except:
continue
# Calculate histogram
"""
hist = np.zeros(5) #1-10, 11-25, 26-50, 51-100, 101+
for r in ranks:
if r <= 10:
hist[0] += 1
elif r > 10 and r <= 25:
hist[1] += 1
elif r > 25 and r <= 50:
hist[2] += 1
elif r > 50 and r <= 100:
hist[3] += 1
elif r > 100:
hist[4] += 1
"""
hist = np.zeros(5) #1, 2, 3-5, 6-10, 11+
for r in ranks:
if r <= 1:
hist[0] += 1
elif r > 1 and r <= 2:
hist[1] += 1
elif r > 2 and r <= 5:
hist[2] += 1
elif r > 5 and r <= 10:
hist[3] += 1
elif r > 10:
hist[4] += 1
# Probability Density Function:
hist = hist.astype(float)
hist /= float(hist.sum())
return hist
def plot_rank_histogram(stats, bins=5):
hist = compute_rank_histogram_buckets(stats)
# Plot histogram as PDF
plt.bar(xrange(0,bins), hist, align="center")
plt.title("Rank Histogram")
plt.xlabel("Ranks")
plt.ylabel("Normalized Count")
plt.xticks(xrange(0,5), ("1-10", "11-25", "26-50", "51-100", "101+"))
plt.show()
def plot_rank_histograms(stats1, stats2, bins=5, test=True):
hist1 = compute_rank_histogram_buckets(stats1)
hist2 = compute_rank_histogram_buckets(stats2)
if test:
label1 = "k-means(2045) + LDA(50)"
label2 = "2D-FMC + PCA(200)"
title = "Rank Histogram of the test set on the MSD"
else:
label1 = "k-means(2045) + LDA(200)"
label2 = "2D-FMC + PCA(200)"
title = "Rank Histogram of the train set"
fig = plt.figure()
ax = fig.gca()
width = 0.45
ax.bar(np.arange(5)-width/2, hist1, width=width, color='b',
label=label1, align="center")
ax.bar(np.arange(5)+width/2, hist2, width=width, color='g',
label=label2, align="center")
# Plot histogram as PDF
plt.title(title)
plt.xlabel("Ranks")
plt.ylabel("Normalized Count")
#plt.xticks(xrange(0,5), ("1-10", "11-25", "26-50", "51-100", "101+"))
plt.xticks(xrange(0,5), ("1", "2", "3-5", "6-10", "11+"))
plt.legend(loc="upper left")
plt.show()
def plot_precision_at_k_histograms(stats1, stats2, K=[1,3,5,10], test=True):
P1 = [average_precision_at_k(stats1, k) for k in K]
P2 = [average_precision_at_k(stats2, k) for k in K]
if test:
label1 = "k-means(2045) + LDA(50)"
label2 = "2D-FMC + PCA(200)"
title = "Precision @ k of the test set on the MSD"
else:
label1 = "k-means(2045) + LDA(200)"
label2 = "2D-FMC + PCA(200)"
title = "Precision @ k of the train set"
fig = plt.figure()
ax = fig.gca()
width = 0.45
ax.bar(np.arange(len(K))-width/2, P1, width=width, color='0.75',
label=label1, align="center")
ax.bar(np.arange(len(K))+width/2, P2, width=width, color='0.9',
label=label2, align="center", hatch='//')
# Plot histogram as PDF
#plt.title(title)
plt.xlabel("k")
plt.ylabel("Precision @ k")
plt.xticks(xrange(0,len(K)), ("1", "3", "5", "10"))
ylabels = np.arange(0,3.,0.5)*10
plt.yticks(np.arange(0,3.,0.5)*.1, ylabels.astype(int))
plt.legend(loc="upper right")
plt.show()
def process(statsfile, k, optfile=None):
stats = utils.load_pickle(statsfile)
track_ar = average_rank_per_track(stats)
clique_ar = average_rank_per_clique(stats)
ma_p = mean_average_precision(stats)
#k_p = average_precision(stats, k, ver=True)
k_p = average_precision_at_k(stats, k)
# Set up logger
logger = utils.configure_logger()
# print results
logger.info("Number of queries: %d" % len(stats))
logger.info("Average Rank per Track: %.3f" % track_ar)
logger.info("Average Rank per Clique: %.3f" % clique_ar)
logger.info("Mean Average Precision: %.2f %%" % (ma_p * 100))
logger.info("Precision at %d: %.2f %%" % (k, k_p * 100))
if optfile is not None:
stats2 = utils.load_pickle(optfile)
#plot_rank_histograms(stats, stats2, test=False)
plot_precision_at_k_histograms(stats, stats2, K=[1,3,5,10], test=False)
else:
plot_rank_histogram(stats)
def main():
# Args parser
parser = argparse.ArgumentParser(description=
"Analyzes the stats of a stats pickle file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("statsfile", action="store",
help="stats file")
parser.add_argument("-k", action="store", dest="k", default=10, type=int,
help="Compute Precision at k")
parser.add_argument("-s", action="store", dest="optfile", default=None,
help="Optional stats file to make compare with")
args = parser.parse_args()
# Process
process(args.statsfile, k=args.k, optfile=args.optfile)
if __name__ == "__main__":
main() | [
"pylab.title",
"numpy.mean",
"argparse.ArgumentParser",
"numpy.where",
"numpy.asarray",
"pylab.xlabel",
"pylab.legend",
"utils.load_pickle",
"pylab.figure",
"utils.configure_logger",
"numpy.zeros",
"numpy.sum",
"numpy.isnan",
"pylab.ylabel",
"numpy.arange",
"pylab.show"
] | [((2950, 2965), 'numpy.mean', 'np.mean', (['mean_r'], {}), '(mean_r)\n', (2957, 2965), True, 'import numpy as np\n'), ((3236, 3251), 'numpy.mean', 'np.mean', (['mean_r'], {}), '(mean_r)\n', (3243, 3251), True, 'import numpy as np\n'), ((3321, 3338), 'numpy.asarray', 'np.asarray', (['ranks'], {}), '(ranks)\n', (3331, 3338), True, 'import numpy as np\n'), ((3826, 3844), 'numpy.mean', 'np.mean', (['precision'], {}), '(precision)\n', (3833, 3844), True, 'import numpy as np\n'), ((4080, 4093), 'numpy.mean', 'np.mean', (['ma_p'], {}), '(ma_p)\n', (4087, 4093), True, 'import numpy as np\n'), ((4190, 4201), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (4198, 4201), True, 'import numpy as np\n'), ((5122, 5133), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (5130, 5133), True, 'import numpy as np\n'), ((5722, 5749), 'pylab.title', 'plt.title', (['"""Rank Histogram"""'], {}), "('Rank Histogram')\n", (5731, 5749), True, 'import pylab as plt\n'), ((5754, 5773), 'pylab.xlabel', 'plt.xlabel', (['"""Ranks"""'], {}), "('Ranks')\n", (5764, 5773), True, 'import pylab as plt\n'), ((5778, 5808), 'pylab.ylabel', 'plt.ylabel', (['"""Normalized Count"""'], {}), "('Normalized Count')\n", (5788, 5808), True, 'import pylab as plt\n'), ((5887, 5897), 'pylab.show', 'plt.show', ([], {}), '()\n', (5895, 5897), True, 'import pylab as plt\n'), ((6369, 6381), 'pylab.figure', 'plt.figure', ([], {}), '()\n', (6379, 6381), True, 'import pylab as plt\n'), ((6657, 6673), 'pylab.title', 'plt.title', (['title'], {}), '(title)\n', (6666, 6673), True, 'import pylab as plt\n'), ((6678, 6697), 'pylab.xlabel', 'plt.xlabel', (['"""Ranks"""'], {}), "('Ranks')\n", (6688, 6697), True, 'import pylab as plt\n'), ((6702, 6732), 'pylab.ylabel', 'plt.ylabel', (['"""Normalized Count"""'], {}), "('Normalized Count')\n", (6712, 6732), True, 'import pylab as plt\n'), ((6874, 6902), 'pylab.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (6884, 6902), True, 'import pylab as plt\n'), ((6907, 6917), 'pylab.show', 'plt.show', ([], {}), '()\n', (6915, 6917), True, 'import pylab as plt\n'), ((7412, 7424), 'pylab.figure', 'plt.figure', ([], {}), '()\n', (7422, 7424), True, 'import pylab as plt\n'), ((7743, 7758), 'pylab.xlabel', 'plt.xlabel', (['"""k"""'], {}), "('k')\n", (7753, 7758), True, 'import pylab as plt\n'), ((7763, 7790), 'pylab.ylabel', 'plt.ylabel', (['"""Precision @ k"""'], {}), "('Precision @ k')\n", (7773, 7790), True, 'import pylab as plt\n'), ((7948, 7977), 'pylab.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (7958, 7977), True, 'import pylab as plt\n'), ((7982, 7992), 'pylab.show', 'plt.show', ([], {}), '()\n', (7990, 7992), True, 'import pylab as plt\n'), ((8048, 8076), 'utils.load_pickle', 'utils.load_pickle', (['statsfile'], {}), '(statsfile)\n', (8065, 8076), False, 'import utils\n'), ((8336, 8360), 'utils.configure_logger', 'utils.configure_logger', ([], {}), '()\n', (8358, 8360), False, 'import utils\n'), ((8987, 9133), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Analyzes the stats of a stats pickle file"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Analyzes the stats of a stats pickle file', formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n", (9010, 9133), False, 'import argparse\n'), ((3666, 3676), 'numpy.sum', 'np.sum', (['ap'], {}), '(ap)\n', (3672, 3676), True, 'import numpy as np\n'), ((4010, 4022), 'numpy.isnan', 'np.isnan', (['ap'], {}), '(ap)\n', (4018, 4022), True, 'import numpy as np\n'), ((7861, 7883), 'numpy.arange', 'np.arange', (['(0)', '(3.0)', '(0.5)'], {}), '(0, 3.0, 0.5)\n', (7870, 7883), True, 'import numpy as np\n'), ((8733, 8759), 'utils.load_pickle', 'utils.load_pickle', (['optfile'], {}), '(optfile)\n', (8750, 8759), False, 'import utils\n'), ((3358, 3378), 'numpy.where', 'np.where', (['(ranks <= k)'], {}), '(ranks <= k)\n', (3366, 3378), True, 'import numpy as np\n'), ((4490, 4500), 'numpy.mean', 'np.mean', (['m'], {}), '(m)\n', (4497, 4500), True, 'import numpy as np\n'), ((6429, 6441), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (6438, 6441), True, 'import numpy as np\n'), ((6532, 6544), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (6541, 6544), True, 'import numpy as np\n'), ((7899, 7921), 'numpy.arange', 'np.arange', (['(0)', '(3.0)', '(0.5)'], {}), '(0, 3.0, 0.5)\n', (7908, 7921), True, 'import numpy as np\n'), ((2631, 2641), 'numpy.mean', 'np.mean', (['s'], {}), '(s)\n', (2638, 2641), True, 'import numpy as np\n'), ((3078, 3088), 'numpy.mean', 'np.mean', (['s'], {}), '(s)\n', (3085, 3088), True, 'import numpy as np\n'), ((3105, 3125), 'numpy.isnan', 'np.isnan', (['mean_r[-1]'], {}), '(mean_r[-1])\n', (3113, 3125), True, 'import numpy as np\n'), ((2334, 2348), 'numpy.isnan', 'np.isnan', (['s[0]'], {}), '(s[0])\n', (2342, 2348), True, 'import numpy as np\n'), ((2573, 2587), 'numpy.isnan', 'np.isnan', (['s[0]'], {}), '(s[0])\n', (2581, 2587), True, 'import numpy as np\n'), ((2828, 2842), 'numpy.isnan', 'np.isnan', (['rank'], {}), '(rank)\n', (2836, 2842), True, 'import numpy as np\n'), ((4386, 4396), 'numpy.mean', 'np.mean', (['s'], {}), '(s)\n', (4393, 4396), True, 'import numpy as np\n')] |
import warnings
import numpy as np
from . import dispatch, B, Numeric
from ..shape import unwrap_dimension
from ..types import NPDType, NPRandomState, Int
__all__ = []
@dispatch
def create_random_state(_: NPDType, seed: Int = 0):
return np.random.RandomState(seed=seed)
@dispatch
def global_random_state(_: NPDType):
return np.random.random.__self__
@dispatch
def set_global_random_state(state: NPRandomState):
np.random.random.__self__.set_state(state.get_state())
def _warn_dtype(dtype):
if B.issubdtype(dtype, np.integer):
warnings.warn("Casting random number of type float to type integer.")
@dispatch
def rand(state: NPRandomState, dtype: NPDType, *shape: Int):
_warn_dtype(dtype)
return state, B.cast(dtype, state.rand(*shape))
@dispatch
def rand(dtype: NPDType, *shape: Int):
return rand(global_random_state(dtype), dtype, *shape)[1]
@dispatch
def randn(state: NPRandomState, dtype: NPDType, *shape: Int):
_warn_dtype(dtype)
return state, B.cast(dtype, state.randn(*shape))
@dispatch
def randn(dtype: NPDType, *shape: Int):
return randn(global_random_state(dtype), dtype, *shape)[1]
@dispatch
def choice(state: NPRandomState, a: Numeric, n: Int):
inds = state.choice(unwrap_dimension(B.shape(a)[0]), n, replace=True)
choices = a[inds]
return state, choices[0] if n == 1 else choices
@dispatch
def choice(a: Numeric, n: Int):
return choice(global_random_state(a), a, n)[1]
| [
"warnings.warn",
"numpy.random.RandomState"
] | [((246, 278), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (267, 278), True, 'import numpy as np\n'), ((561, 630), 'warnings.warn', 'warnings.warn', (['"""Casting random number of type float to type integer."""'], {}), "('Casting random number of type float to type integer.')\n", (574, 630), False, 'import warnings\n')] |
"""
Author: <NAME>
Affiliation: NAIST & OSX
"""
from __future__ import annotations
import inspect
import random
from abc import ABC, abstractmethod, abstractstaticmethod
from itertools import count
from typing import Dict, Iterator, List, Optional, Type
import gym
import jax
import numpy as np
import structlog
from chex import PRNGKey
from tqdm import tqdm
from shinrl import ShinEnv
from .config import SolverConfig
from .history import History
class BaseSolver(ABC, History):
"""
Base class to implement solvers. The results are treated by the inherited History class.
# MixIn:
Our Solver interface adopts "mixin" mechanism to realize the flexible behavior.
The `make_mixin` method should return mixins that have necessary methods such as `evaluate` and `step` functions.
See [shinrl/solvers/vi/discrete/solver.py] for an example implementation.
"""
_id: Iterator[int] = count(0)
DefaultConfig = SolverConfig
# ########## YOU NEED TO IMPLEMENT HERE ##########
@abstractstaticmethod
def make_mixins(env: gym.Env, config: SolverConfig) -> List[Type[object]]:
"""Make a list of mixins from env and config"""
pass
@abstractmethod
def evaluate(self) -> Dict[str, float]:
"""Evaluate the solver and return the dict of results. Called every self.config.eval_interval steps."""
pass
@abstractmethod
def step(self) -> Dict[str, float]:
"""Execute the solver by one step and return the dict of results."""
pass
# ################################################
@staticmethod
def factory(
env: gym.Env,
config: SolverConfig,
mixins: List[Type[object]],
) -> BaseSolver:
"""Instantiate a solver with mixins and initialize it."""
class MixedSolver(*mixins):
pass
solver = MixedSolver()
solver.mixins = mixins
methods = inspect.getmembers(solver, predicate=inspect.ismethod)
solver.methods_str = [method[1].__qualname__ for method in methods]
solver.initialize(env, config)
return solver
def __init__(self) -> None:
self.env_id: int = -1
self.solver_id: str = f"{type(self).__name__}-{next(self._id)}"
self.logger = structlog.get_logger(solver_id=self.solver_id, env_id=None)
self.is_initialized: bool = False
self.env = None
self.key: PRNGKey = None
self.mixins: List[Type] = []
self.methods_str: List[str] = []
def initialize(
self,
env: gym.Env,
config: Optional[SolverConfig] = None,
) -> None:
"""Set the env and initialize the history.
Args:
env (gym.Env): Environment to solve..
config (SolverConfig, optional): Configuration of an algorithm.
"""
self.init_history()
self.set_config(config)
self.set_env(env)
self.seed(self.config.seed)
self.is_initialized = True
if self.config.verbose:
self.logger.info(
"Solver is initialized.", mixins=self.mixins, methods=self.methods_str
)
def seed(self, seed: int = 0) -> None:
self.key = jax.random.PRNGKey(seed)
self.env.seed(seed)
random.seed(seed)
np.random.seed(seed)
@property
def is_shin_env(self) -> bool:
if isinstance(self.env, gym.Wrapper):
return isinstance(self.env.unwrapped, ShinEnv)
else:
return isinstance(self.env, ShinEnv)
def set_env(self, env: gym.Env, reset: bool = True) -> None:
"""Set the environment to self.env.
Args:
env (gym.Env): Environment to solve.
reset (bool): Reset the env if True
"""
if isinstance(env.action_space, gym.spaces.Box):
is_high_normalized = (env.action_space.high == 1.0).all()
is_low_normalized = (env.action_space.low == -1.0).all()
assert_msg = """
Algorithms in ShinRL assume that the env.actions_space is in range [-1, 1].
Please wrap the env by shinrl.NormalizeActionWrapper.
"""
assert is_high_normalized and is_low_normalized, assert_msg
self.env = env
# Check discount factor
if self.is_shin_env:
if self.config.discount != env.config.discount:
self.logger.warning(
f"env.config.discount != solver.config.discount ({env.config.discount} != {self.config.discount}). \
This may cause an unexpected behavior."
)
self.dS, self.dA, self.horizon = env.dS, env.dA, env.config.horizon
# Reset env if necessary
if reset:
if isinstance(self.env, gym.wrappers.Monitor):
# With Monitor, reset() cannot be called unless the episode is over.
if self.env.stats_recorder.steps is None:
self.env.obs = self.env.reset()
else:
done = False
while not done:
_, _, done, _ = self.env.step(self.env.action_space.sample())
self.env.obs = self.env.reset()
else:
self.env.obs = self.env.reset()
else:
assert hasattr(
env, "obs"
), 'env must have attribute "obs". Do env.obs = obs before calling "set_env".'
self.env_id += 1
self.logger = structlog.get_logger(solver_id=self.solver_id, env_id=self.env_id)
if self.config.verbose:
self.logger.info("set_env is called.")
def run(self) -> None:
"""
Run the solver with the step function.
Call self.evaluate() every [eval_interval] steps.
"""
assert self.is_initialized, '"self.initialize" is not called.'
num_steps = self.config.steps_per_epoch
for _ in tqdm(range(num_steps), desc=f"Epoch {self.n_epoch}"):
# Do evaluation
if self.n_step % self.config.eval_interval == 0:
eval_res = self.evaluate()
for key, val in eval_res.items():
self.add_scalar(key, val)
# Do one-step update
step_res = self.step()
for key, val in step_res.items():
self.add_scalar(key, val)
self.n_step += 1
self.n_epoch += 1
if self.config.verbose:
self.logger.info(
f"Epoch {self.n_epoch} has ended.",
epoch_summary=self.recent_summary(num_steps),
data=list(self.data.keys()),
)
| [
"structlog.get_logger",
"jax.random.PRNGKey",
"inspect.getmembers",
"random.seed",
"itertools.count",
"numpy.random.seed"
] | [((915, 923), 'itertools.count', 'count', (['(0)'], {}), '(0)\n', (920, 923), False, 'from itertools import count\n'), ((1931, 1985), 'inspect.getmembers', 'inspect.getmembers', (['solver'], {'predicate': 'inspect.ismethod'}), '(solver, predicate=inspect.ismethod)\n', (1949, 1985), False, 'import inspect\n'), ((2280, 2339), 'structlog.get_logger', 'structlog.get_logger', ([], {'solver_id': 'self.solver_id', 'env_id': 'None'}), '(solver_id=self.solver_id, env_id=None)\n', (2300, 2339), False, 'import structlog\n'), ((3223, 3247), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['seed'], {}), '(seed)\n', (3241, 3247), False, 'import jax\n'), ((3284, 3301), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3295, 3301), False, 'import random\n'), ((3310, 3330), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3324, 3330), True, 'import numpy as np\n'), ((5520, 5586), 'structlog.get_logger', 'structlog.get_logger', ([], {'solver_id': 'self.solver_id', 'env_id': 'self.env_id'}), '(solver_id=self.solver_id, env_id=self.env_id)\n', (5540, 5586), False, 'import structlog\n')] |
import torch
from collections import defaultdict, OrderedDict
import numba
import numpy as np
def _group_by(keys, values) -> dict:
"""Group values by keys.
:param keys: list of keys
:param values: list of values
A key value pair i is defined by (key_list[i], value_list[i]).
:return: OrderedDict where key value pairs have been grouped by key.
"""
result = defaultdict(list)
for key, value in zip(keys.tolist(), values.tolist()):
result[tuple(key)].append(value)
for key, value in result.items():
result[key] = torch.IntTensor(sorted(value))
return OrderedDict(result)
def index_KvsAll(dataset: "Dataset", split: str, key: str):
"""Return an index for the triples in split (''train'', ''valid'', ''test'')
from the specified key (''sp'' or ''po'' or ''so'') to the indexes of the
remaining constituent (''o'' or ''s'' or ''p'' , respectively.)
The index maps from `tuple' to `torch.LongTensor`.
The index is cached in the provided dataset under name `{split}_sp_to_o` or
`{split}_po_to_s`, or `{split}_so_to_p`. If this index is already present, does not
recompute it.
"""
value = None
if key == "sp":
key_cols = [0, 1]
value_column = 2
value = "o"
elif key == "po":
key_cols = [1, 2]
value_column = 0
value = "s"
elif key == "so":
key_cols = [0, 2]
value_column = 1
value = "p"
else:
raise ValueError()
name = split + "_" + key + "_to_" + value
if not dataset._indexes.get(name):
triples = dataset.split(split)
dataset._indexes[name] = _group_by(
triples[:, key_cols], triples[:, value_column]
)
dataset.config.log(
"{} distinct {} pairs in {}".format(len(dataset._indexes[name]), key, split),
prefix=" ",
)
return dataset._indexes.get(name)
def index_KvsAll_to_torch(index):
"""Convert `index_KvsAll` indexes to pytorch tensors.
Returns an nx2 keys tensor (rows = keys), an offset vector
(row = starting offset in values for corresponding key),
a values vector (entries correspond to values of original
index)
Afterwards, it holds:
index[keys[i]] = values[offsets[i]:offsets[i+1]]
"""
keys = torch.tensor(list(index.keys()), dtype=torch.int)
values = torch.cat(list(index.values()))
offsets = torch.cumsum(
torch.tensor([0] + list(map(len, index.values())), dtype=torch.int), 0
)
return keys, values, offsets
def _get_relation_types(dataset,):
"""
Classify relations into 1-N, M-1, 1-1, M-N
<NAME>, et al.
"Translating embeddings for modeling multi-relational data."
Advances in neural information processing systems. 2013.
:return: dictionary mapping from int -> {1-N, M-1, 1-1, M-N}
"""
relation_stats = torch.zeros((dataset.num_relations(), 6))
for index, p in [
(dataset.index("train_sp_to_o"), 1),
(dataset.index("train_po_to_s"), 0),
]:
for prefix, labels in index.items():
relation_stats[prefix[p], 0 + p * 2] = labels.float().sum()
relation_stats[prefix[p], 1 + p * 2] = (
relation_stats[prefix[p], 1 + p * 2] + 1.0
)
relation_stats[:, 4] = (relation_stats[:, 0] / relation_stats[:, 1]) > 1.5
relation_stats[:, 5] = (relation_stats[:, 2] / relation_stats[:, 3]) > 1.5
result = dict()
for i, relation in enumerate(dataset.relation_ids()):
result[i] = "{}-{}".format(
"1" if relation_stats[i, 4].item() == 0 else "M",
"1" if relation_stats[i, 5].item() == 0 else "N",
)
return result
def index_relation_types(dataset):
"""
create dictionary mapping from {1-N, M-1, 1-1, M-N} -> set of relations
"""
if (
"relation_types" not in dataset._indexes
or "relations_per_type" not in dataset._indexes
):
relation_types = _get_relation_types(dataset)
relations_per_type = {}
for k, v in relation_types.items():
relations_per_type.setdefault(v, set()).add(k)
dataset._indexes["relation_types"] = relation_types
dataset._indexes["relations_per_type"] = relations_per_type
for k, v in dataset._indexes["relations_per_type"].items():
dataset.config.log("{} relations of type {}".format(len(v), k), prefix=" ")
def index_frequency_percentiles(dataset, recompute=False):
"""
:return: dictionary mapping from
{
'subject':
{25%, 50%, 75%, top} -> set of entities
'relations':
{25%, 50%, 75%, top} -> set of relations
'object':
{25%, 50%, 75%, top} -> set of entities
}
"""
if "frequency_percentiles" in dataset._indexes and not recompute:
return
subject_stats = torch.zeros((dataset.num_entities(), 1))
relation_stats = torch.zeros((dataset.num_relations(), 1))
object_stats = torch.zeros((dataset.num_entities(), 1))
for (s, p, o) in dataset.split("train"):
subject_stats[s] += 1
relation_stats[p] += 1
object_stats[o] += 1
result = dict()
for arg, stats, num in [
(
"subject",
[
i
for i, j in list(
sorted(enumerate(subject_stats.tolist()), key=lambda x: x[1])
)
],
dataset.num_entities(),
),
(
"relation",
[
i
for i, j in list(
sorted(enumerate(relation_stats.tolist()), key=lambda x: x[1])
)
],
dataset.num_relations(),
),
(
"object",
[
i
for i, j in list(
sorted(enumerate(object_stats.tolist()), key=lambda x: x[1])
)
],
dataset.num_entities(),
),
]:
for percentile, (begin, end) in [
("25%", (0.0, 0.25)),
("50%", (0.25, 0.5)),
("75%", (0.5, 0.75)),
("top", (0.75, 1.0)),
]:
if arg not in result:
result[arg] = dict()
result[arg][percentile] = set(stats[int(begin * num) : int(end * num)])
dataset._indexes["frequency_percentiles"] = result
class IndexWrapper:
"""Wraps a call to an index function so that it can be pickled"""
def __init__(self, fun, **kwargs):
self.fun = fun
self.kwargs = kwargs
def __call__(self, dataset: "Dataset", **kwargs):
self.fun(dataset, **self.kwargs)
def _invert_ids(dataset, obj: str):
if not f"{obj}_id_to_index" in dataset._indexes:
ids = dataset.load_map(f"{obj}_ids")
inv = {v: k for k, v in enumerate(ids)}
dataset._indexes[f"{obj}_id_to_index"] = inv
else:
inv = dataset._indexes[f"{obj}_id_to_index"]
dataset.config.log(f"Indexed {len(inv)} {obj} ids", prefix=" ")
def create_default_index_functions(dataset: "Dataset"):
for split in dataset.files_of_type("triples"):
for key, value in [("sp", "o"), ("po", "s"), ("so", "p")]:
# self assignment needed to capture the loop var
dataset.index_functions[f"{split}_{key}_to_{value}"] = IndexWrapper(
index_KvsAll, split=split, key=key
)
dataset.index_functions["relation_types"] = index_relation_types
dataset.index_functions["relations_per_type"] = index_relation_types
dataset.index_functions["frequency_percentiles"] = index_frequency_percentiles
for obj in ["entity", "relation"]:
dataset.index_functions[f"{obj}_id_to_index"] = IndexWrapper(
_invert_ids, obj=obj
)
@numba.njit
def where_in(x, y, not_in=False):
"""Retrieve the indices of the elements in x which are also in y.
x and y are assumed to be 1 dimensional arrays.
:params: not_in: if True, returns the indices of the of the elements in x
which are not in y.
"""
# np.isin is not supported in numba. Also: "i in y" raises an error in numba
# setting njit(parallel=True) slows down the function
list_y = set(y)
return np.where(np.array([i in list_y for i in x]) != not_in)[0]
| [
"numpy.array",
"collections.OrderedDict",
"collections.defaultdict"
] | [((390, 407), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (401, 407), False, 'from collections import defaultdict, OrderedDict\n'), ((610, 629), 'collections.OrderedDict', 'OrderedDict', (['result'], {}), '(result)\n', (621, 629), False, 'from collections import defaultdict, OrderedDict\n'), ((8275, 8311), 'numpy.array', 'np.array', (['[(i in list_y) for i in x]'], {}), '([(i in list_y) for i in x])\n', (8283, 8311), True, 'import numpy as np\n')] |
from collections import defaultdict
import numpy as np
import pandas as pd
from scipy.stats import chi2_contingency, fisher_exact, f_oneway
from .simulations import classifier_posterior_probabilities
from .utils.crosstabs import (crosstab_bayes_factor,
crosstab_ztest,
top_bottom_crosstab)
from .utils.validate import boolean_array, check_consistent_length
def anova(labels, results, subset_labels=None):
"""
Returns one-way ANOVA f-statistic and p-value from
input vectors of categorical labels and numeric results
Parameters
------------
labels : array_like
containing categorical values like ['M', 'F']
results : array_like
containing real numbers
subset_labels : list of strings, optional
if only specific labels should be included
Returns
----------
F_onewayResult : scipy.stats object (essentially a 2-tuple)
contains one-way f-statistic and p-value, indicating whether
scores have same sample mean
"""
check_consistent_length(labels, results)
df = pd.DataFrame(list(zip(labels, results)), columns=['label', 'result'])
if subset_labels is not None:
df = df.loc[df['label'].isin(subset_labels)]
unique_labels = df['label'].dropna().unique()
score_vectors = [df.loc[df['label'] == lab, 'result']
for lab in unique_labels]
return f_oneway(*score_vectors)
def bias_test_check(labels, results, category=None, test_thresh=0.5 **kwargs):
"""
Utility function for checking if statistical tests are passed
at a reference threshold
Parameters
--------
labels : array_like
containing categorical values like ['M', 'F']
results : array_like
containing real numbers
category : string, optional
the name of the category labels are in, e.g. 'Gender'
test_thresh : numeric
threshold value to test
**kwargs : optional additional arguments for compare_groups
Returns
--------
print statement indicating whether specific statistical tests pass or fail
"""
min_props, z_ps, fisher_ps, chi_ps, bfs = compare_groups(
labels, results, low=test_thresh, num=1, **kwargs)
# if no category is specified, concatenate strings
if category is None:
category = '_vs_'.join(set(labels))[:20]
# test if passes at test_thresh
passes_all = True
if min_props[test_thresh] < .8:
passes_all = False
print("*%s fails 4/5 test at %.2f*" % (category, test_thresh))
print(" - %s minimum proportion at %.2f: %.3f" %
(category, test_thresh, min_props[test_thresh]))
if fisher_ps[test_thresh] < .05:
passes_all = False
print("*%s fails Fisher exact test at %.2f*" % (category, test_thresh))
print(" - %s p-value at %.2f: %.3f" %
(category, test_thresh, fisher_ps[test_thresh]))
if chi_ps[test_thresh] < .05:
passes_all = False
print("*%s fails Chi squared test at %.2f*" % (category, test_thresh))
print(" - %s p-value at %.2f: %.3f" %
(category, test_thresh, chi_ps[test_thresh]))
if z_ps[test_thresh] < .05:
passes_all = False
print("*%s fails z test at %.2f*" % (category, test_thresh))
print(" - %s Z-test p-value at %.2f: %.3f" %
(category, test_thresh, z_ps[test_thresh]))
if bfs[test_thresh] > 3.:
passes_all = False
print("*%s Bayes Factor test at %.2f*" % (category, test_thresh))
print(" - %s Bayes Factor at %.2f: %.3f" %
(category, test_thresh, bfs[test_thresh]))
if passes_all:
print("*%s passes 4/5 test, Fisher p-value, Chi-Squared p-value, "
"z-test p-value and Bayes Factor at %.2f*\n"
% (category, test_thresh))
def make_bias_report(clf, df, feature_names, categories,
low=None, high=None, num=100, ref_threshold=None):
"""
Utility function for report dictionary from
`classifier_posterior_probabilities`. Used for plotting
bar plots in `bias_bar_plot`
Parameters
-----------
clf : sklearn clf
fitted clf with predict object
df : pandas DataFrame
reference dataframe containing labeled features to test for bias
feature_names : list of strings
names of features used in fitting clf
categories : list of strings
names of categories to test for bias, e.g. ['gender']
low, high, num : float, float, int
range of values for thresholds
ref_threshold : float
cutoff value at which to generate metrics
Returns
--------
out_dict : dictionary
contains category names, average probabilities and errors by category
of form {'gender': {'categories':['F', 'M'],
'averages': [.5, .5],
'errors': [.1, .1]}
}
"""
threshes, probs = classifier_posterior_probabilities(
df, clf, feature_names, categories, low, high, num)
# if not specified, set ref_threshold at 80% of max(threshes)
if ref_threshold is None:
idx_80 = int(len(threshes)*.8)
ref_threshold = sorted(threshes)[idx_80]
ref_idx = list(threshes).index(ref_threshold)
out_dict = {}
for category in categories:
cat_vals = [k.split('__')[1]
for k in probs.keys() if k.split('__')[0] == category]
cat_avgs = [probs[val][ref_idx][0] for val in cat_vals]
cat_errors = [probs[val][ref_idx][1:] for val in cat_vals]
out_dict[category] = {
'categories': cat_vals,
'averages': cat_avgs,
'errors': cat_errors}
return out_dict
def get_group_proportions(labels, results, low=None, high=None, num=100):
"""
Returns pass proportions for each group present in labels, according to
their results
Parameters
------------
labels : array_like
contains categorical labels
results : array_like
contains numeric or boolean values
low : float
if None, will default to min(results)
high : float
if None, will default to max(results)
num : int, default 100
number of thresholds to check
Returns
--------
prop_dict: dictionary
contains {group_name : [[thresholds, pass_proportions]]}
"""
if not low:
low = min(results)
if not high:
high = max(results)
thresholds = np.linspace(low, high, num).tolist()
groups = set(labels)
prop_dict = defaultdict(list)
for group in groups:
pass_props = []
for thresh in thresholds:
decs = [i <= thresh for i in results]
crosstab = pd.crosstab(pd.Series(labels), pd.Series(decs))
row = crosstab.loc[group]
pass_prop = row[True] / float(row.sum())
pass_props.append(pass_prop)
prop_dict[group].append(thresholds)
prop_dict[group].append(pass_props)
return prop_dict
def compare_groups(labels, results,
low=None, high=None, num=100,
comp_groups=None, print_skips=False):
"""
Function to plot proportion of largest and smallest bias groups and
get relative z scores
Parameters
--------
labels : array_like
contains categorical values like ['M', 'F']
results : array_like
contains real numbers, e.g. threshold scores or floats in (0,1)
low : float
lower threshold value
high : float
upper threshold value
num : int
number of thresholds to check
comp_groups : list of strings, optional
subset of labels to compare, e.g. ['white', 'black']
print_skips : bool
whether to display thresholds skipped
Returns
---------
min_props : dict
contains (key, value) of (threshold : max group/min group proportions)
z_ps : dict
contains (key, value) of (threshold : p-value of two tailed z test)
fisher_ps : dict
contains (key, value) of (threshold : p-value of fisher exact test)
chi_ps : dict
contains (key, value) of (threshold : p-value of chi squared test)
bayes_facts : dict
contains (key, value) of (threshold : bayes factor)
"""
# cast labels and scores to pandas Series
df = pd.DataFrame(list(zip(labels, results)), columns=['label', 'result'])
min_props = {}
fisher_ps = {}
chi_ps = {}
z_ps = {}
bayes_facts = {}
if comp_groups is not None:
df = df[df['label'].isin(comp_groups)]
# define range of values to test over if not inputted
if low is None:
low = min(results)
if high is None:
high = max(results)
thresholds = np.linspace(low, high, num)
skip_thresholds = []
for thresh in thresholds:
df['dec'] = [i >= thresh for i in results]
# compare rates of passing across groups
ctabs = pd.crosstab(df['label'], df['dec'])
# skip any thresholds for which the crosstabs are one-dimensional
if 1 in ctabs.shape:
skip_thresholds.append(thresh)
continue
normed_ctabs = ctabs.div(ctabs.sum(axis=1), axis=0)
true_val = max(set(df['dec']))
max_group = normed_ctabs[true_val].max()
normed_proportions = normed_ctabs[true_val] / max_group
min_proportion = normed_proportions.min()
# run statistical tests
if ctabs.shape == (2, 2):
test_results = test_multiple(df['label'].values, df['dec'].values)
z_pval = test_results.get('z_score')[1]
fisher_pval = test_results.get('fisher_p')[1]
chi2_pval = test_results.get('chi2_p')[1]
bayes_fact = test_results.get('BF')
else:
top_bottom_ctabs = top_bottom_crosstab(df['label'], df['dec'])
z_pval = crosstab_ztest(top_bottom_ctabs)[1]
fisher_pval = fisher_exact(top_bottom_ctabs)[1]
chi2_pval = chi2_contingency(ctabs)[1]
bayes_fact = crosstab_bayes_factor(ctabs)
min_props[thresh] = min_proportion
z_ps[thresh] = z_pval
fisher_ps[thresh] = fisher_pval
chi_ps[thresh] = chi2_pval
bayes_facts[thresh] = bayes_fact
if len(skip_thresholds) > 0 and print_skips:
print('One-dimensional thresholds were skipped: %s' % skip_thresholds)
return min_props, z_ps, fisher_ps, chi_ps, bayes_facts
def test_multiple(labels, decisions,
tests=('ztest', 'fisher', 'chi2', 'BF'), display=False):
"""
Function that returns p_values for z-score, fisher exact, and chi2 test
of 2x2 crosstab of passing rate by labels and decisions
See docs for z_test_ctabs, fisher_exact, chi2_contingency and
bf_ctabs for details of specific tests
Parameters
----------
labels : array_like
categorical labels for each corresponding value of `decision` ie. M/F
decisions : array_like
binary decision values, ie. True/False or 0/1
tests : list
a list of strings specifying the tests to run, valid options
are 'ztest', 'fisher', 'chi2' and 'bayes'. Defaults to all four.
-ztest: p-value for two-sided z-score for proportions
-fisher: p-value for Fisher's exact test for proportions
-chi2: p-value for chi-squared test of independence for proportions
-bayes: bayes factor for independence assuming uniform prior
display : bool
print the results of each test in addition to returning them
Returns
-------
results : dict
dictionary of values, one for each test.
Valid keys are: 'z_score', 'fisher_p', 'chi2_p', and 'BF'
Examples
--------
>>> # no real difference between groups
>>> labels = ['group1']*100 + ['group2']*100 + ['group3']*100
>>> decisions = [1,0,0]*100
>>> all_test_ctabs(dependent_ctabs)
(0.0, 1.0, 1.0, 0.26162148804907587)
>>> # massively biased ratio of hits/misses by group
>>> ind_ctabs = np.array([[75,50],[25,50]])
>>> all_test_ctabs(ind_ctabs)
(-3.651483716701106,
0.0004203304586999487,
0.0004558800052056139,
202.95548692414306)
>>> # correcting with a biased prior
>>> biased_prior = np.array([[5,10],[70,10]])
>>> all_test_ctabs(ind_ctabs, biased_prior)
(-3.651483716701106,
0.0004203304586999487,
0.0004558800052056139,
0.00012159518854984268)
"""
decisions = boolean_array(decisions)
crosstab = pd.crosstab(pd.Series(labels), pd.Series(decisions))
crosstab = crosstab.as_matrix()
# can only perform 2-group z-tests & fisher tests
# getting crosstabs for groups with highest and lowest pass rates
# as any difference between groups is considered biased
tb_crosstab = top_bottom_crosstab(labels, decisions)
results = {}
if 'ztest' in tests:
results['z_score'] = crosstab_ztest(tb_crosstab)
if 'fisher' in tests:
# although fisher's exact can be generalized to multiple groups
# scipy is limited to shape (2, 2)
# TODO make generalized fisher's exact test
# returns oddsratio and p-value
results['fisher_p'] = fisher_exact(tb_crosstab)[:2]
if 'chi2' in tests:
# returns chi2 test statistic and p-value
results['chi2_p'] = chi2_contingency(crosstab)[:2]
if 'BF' in tests:
results['BF'] = crosstab_bayes_factor(crosstab)
if display:
for key in results:
print("%s: %f" % (key, results[key]))
return results
def quick_bias_check(clf, df,
feature_names, categories,
thresh_pct=80, pass_ratio=.8):
"""
Useful for generating a bias_report more quickly than make_bias_report
simply uses np.percentile for checks
Parameters
-----------
clf : sklearn clf
fitted clf with predict object
df : pandas DataFrame
reference dataframe containing labeled features to test for bias
feature_names : list of strings
names of features used in fitting clf
categories : list of strings
names of categories to test for bias, e.g. ['gender', 'ethnicity']
thresh_pct : float, default 80
percentile in [0, 100] at which to check for pass rates
pass_ratio : float, default .8
cutoff which specifies whether ratio of min/max pass rates is acceptable
Returns
--------
passed: bool
indicates whether all groups have min/max pass rates >= `pass_ratio`
bias_report : dict
of form {'gender': {'categories':['F', 'M'],
'averages': [.2, .22],
'errors': [[.2, .2], [.22, .22]]}
}
min_bias_ratio : float
min of min_max_ratios across all categories
if this value is less than `pass_ratio`, passed == False
"""
bdf = df.copy()
X = bdf.loc[:, feature_names].values
decs = clf.decision_function(X)
bdf['score'] = decs
min_max_ratios = []
bias_report = {}
for category in categories:
cat_df = bdf[bdf[category].notnull()]
cat_df['pass'] = cat_df.score > np.percentile(cat_df.score, thresh_pct)
cat_group = gdf.groupby(category).mean()['pass']
cat_dict = cat_group.to_dict()
min_max_ratios.append(cat_group.min()/float(cat_group.max()))
bias_report[category] = {
'averages': cat_dict.values(),
'categories': cat_dict.keys(),
'errors': [[i, i] for i in cat_dict.values()]
}
passed = all(np.array(min_max_ratios) >= pass_ratio)
min_bias_ratio = min(min_max_ratios)
return passed, bias_report, min_bias_ratio
| [
"pandas.Series",
"scipy.stats.chi2_contingency",
"scipy.stats.f_oneway",
"scipy.stats.fisher_exact",
"pandas.crosstab",
"numpy.array",
"numpy.linspace",
"collections.defaultdict",
"numpy.percentile"
] | [((1443, 1467), 'scipy.stats.f_oneway', 'f_oneway', (['*score_vectors'], {}), '(*score_vectors)\n', (1451, 1467), False, 'from scipy.stats import chi2_contingency, fisher_exact, f_oneway\n'), ((6640, 6657), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6651, 6657), False, 'from collections import defaultdict\n'), ((8844, 8871), 'numpy.linspace', 'np.linspace', (['low', 'high', 'num'], {}), '(low, high, num)\n', (8855, 8871), True, 'import numpy as np\n'), ((9046, 9081), 'pandas.crosstab', 'pd.crosstab', (["df['label']", "df['dec']"], {}), "(df['label'], df['dec'])\n", (9057, 9081), True, 'import pandas as pd\n'), ((12685, 12702), 'pandas.Series', 'pd.Series', (['labels'], {}), '(labels)\n', (12694, 12702), True, 'import pandas as pd\n'), ((12704, 12724), 'pandas.Series', 'pd.Series', (['decisions'], {}), '(decisions)\n', (12713, 12724), True, 'import pandas as pd\n'), ((6562, 6589), 'numpy.linspace', 'np.linspace', (['low', 'high', 'num'], {}), '(low, high, num)\n', (6573, 6589), True, 'import numpy as np\n'), ((13367, 13392), 'scipy.stats.fisher_exact', 'fisher_exact', (['tb_crosstab'], {}), '(tb_crosstab)\n', (13379, 13392), False, 'from scipy.stats import chi2_contingency, fisher_exact, f_oneway\n'), ((13499, 13525), 'scipy.stats.chi2_contingency', 'chi2_contingency', (['crosstab'], {}), '(crosstab)\n', (13515, 13525), False, 'from scipy.stats import chi2_contingency, fisher_exact, f_oneway\n'), ((15342, 15381), 'numpy.percentile', 'np.percentile', (['cat_df.score', 'thresh_pct'], {}), '(cat_df.score, thresh_pct)\n', (15355, 15381), True, 'import numpy as np\n'), ((15816, 15840), 'numpy.array', 'np.array', (['min_max_ratios'], {}), '(min_max_ratios)\n', (15824, 15840), True, 'import numpy as np\n'), ((6827, 6844), 'pandas.Series', 'pd.Series', (['labels'], {}), '(labels)\n', (6836, 6844), True, 'import pandas as pd\n'), ((6846, 6861), 'pandas.Series', 'pd.Series', (['decs'], {}), '(decs)\n', (6855, 6861), True, 'import pandas as pd\n'), ((10044, 10074), 'scipy.stats.fisher_exact', 'fisher_exact', (['top_bottom_ctabs'], {}), '(top_bottom_ctabs)\n', (10056, 10074), False, 'from scipy.stats import chi2_contingency, fisher_exact, f_oneway\n'), ((10102, 10125), 'scipy.stats.chi2_contingency', 'chi2_contingency', (['ctabs'], {}), '(ctabs)\n', (10118, 10125), False, 'from scipy.stats import chi2_contingency, fisher_exact, f_oneway\n')] |
# Importing Necessary projects
import cv2
import numpy as np
# Creating the video capture object
cap = cv2.VideoCapture(0)
# Defining upper and lower ranges for yellow color
# If you don't have a yellow marker feel free to change the RGB values
Lower = np.array([20, 100, 100])
Upper = np.array([30, 255, 255])
# Defining kernel for Morphological operators
kernel = np.ones((5, 5), np.uint8)
# Defining starting point
x0, y0 = -1, -1
# Creating an empty image / white background with the same frame size
temp = np.ones((480, 640, 3), dtype=np.uint8)
temp = temp*255
while True:
ret, frame = cap.read()
s = frame.shape
# Flipping for mirror image
frame = cv2.flip(frame, 1)
# Getting a hsv version of the frame for easy colour detection and locating the mask
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, Lower, Upper)
# Performing morphological operators
mask = cv2.erode(mask, kernel, iterations=2)
final = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
final = cv2.dilate(mask, kernel, iterations=1)
# Finding contours in the mask
contours, _ = cv2.findContours(
final, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# Getting the largest contours assuming it would be the object of interest
if contours:
cnt = max(contours, key=cv2.contourArea)
x, y, width, height = cv2.boundingRect(cnt)
if x0 == -1:
x0, y0 = x+width//2, y+height//2
else:
# Drawing on the temporary masked image
temp = cv2.line(temp, (x0, y0), (x+width//2,
y+height//2), (0, 0, 255), 5)
# To track can be removed if necessary
frame = cv2.line(frame, (x0, y0), (x+width//2,
y+height//2), (255, 255, 255), 5)
x0, y0 = x+width//2, y+height//2
else:
x0, y0 = -1, -1
# Operations using bitwise functions for the written stuff on the Result image
# BLACK FOREGROUND AND WHITE BACKGROUND
temp_gray = cv2.cvtColor(temp, cv2.COLOR_BGR2GRAY)
# WHITE FOREGROUND AND BLACK BACKGROUND
temp_gray_inv = cv2.bitwise_not(temp_gray)
# Plain white background
white_background = np.full(temp.shape, 255, dtype=np.uint8)
bk = cv2.bitwise_or(white_background, white_background, mask=temp_gray_inv)
# 3 channeled temp_gray_inv
fg = cv2.bitwise_or(temp, temp, mask=temp_gray_inv)
# Red foreground and black background
Result = cv2.bitwise_or(frame, fg)
cv2.imshow('Result', Result)
# To end the program
key = cv2.waitKey(1) & 0xFF
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
| [
"numpy.ones",
"cv2.flip",
"numpy.full",
"cv2.inRange",
"cv2.erode",
"cv2.line",
"cv2.imshow",
"numpy.array",
"cv2.morphologyEx",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.bitwise_or",
"cv2.findContours",
"cv2.bitwise_not",
"cv2.dilate",
"cv2.waitKey",
"cv2.b... | [((103, 122), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (119, 122), False, 'import cv2\n'), ((255, 279), 'numpy.array', 'np.array', (['[20, 100, 100]'], {}), '([20, 100, 100])\n', (263, 279), True, 'import numpy as np\n'), ((288, 312), 'numpy.array', 'np.array', (['[30, 255, 255]'], {}), '([30, 255, 255])\n', (296, 312), True, 'import numpy as np\n'), ((369, 394), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (376, 394), True, 'import numpy as np\n'), ((516, 554), 'numpy.ones', 'np.ones', (['(480, 640, 3)'], {'dtype': 'np.uint8'}), '((480, 640, 3), dtype=np.uint8)\n', (523, 554), True, 'import numpy as np\n'), ((2692, 2715), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2713, 2715), False, 'import cv2\n'), ((676, 694), 'cv2.flip', 'cv2.flip', (['frame', '(1)'], {}), '(frame, 1)\n', (684, 694), False, 'import cv2\n'), ((794, 832), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (806, 832), False, 'import cv2\n'), ((844, 874), 'cv2.inRange', 'cv2.inRange', (['hsv', 'Lower', 'Upper'], {}), '(hsv, Lower, Upper)\n', (855, 874), False, 'import cv2\n'), ((927, 964), 'cv2.erode', 'cv2.erode', (['mask', 'kernel'], {'iterations': '(2)'}), '(mask, kernel, iterations=2)\n', (936, 964), False, 'import cv2\n'), ((977, 1023), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_OPEN', 'kernel'], {}), '(mask, cv2.MORPH_OPEN, kernel)\n', (993, 1023), False, 'import cv2\n'), ((1036, 1074), 'cv2.dilate', 'cv2.dilate', (['mask', 'kernel'], {'iterations': '(1)'}), '(mask, kernel, iterations=1)\n', (1046, 1074), False, 'import cv2\n'), ((1129, 1194), 'cv2.findContours', 'cv2.findContours', (['final', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(final, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (1145, 1194), False, 'import cv2\n'), ((2081, 2119), 'cv2.cvtColor', 'cv2.cvtColor', (['temp', 'cv2.COLOR_BGR2GRAY'], {}), '(temp, cv2.COLOR_BGR2GRAY)\n', (2093, 2119), False, 'import cv2\n'), ((2184, 2210), 'cv2.bitwise_not', 'cv2.bitwise_not', (['temp_gray'], {}), '(temp_gray)\n', (2199, 2210), False, 'import cv2\n'), ((2263, 2303), 'numpy.full', 'np.full', (['temp.shape', '(255)'], {'dtype': 'np.uint8'}), '(temp.shape, 255, dtype=np.uint8)\n', (2270, 2303), True, 'import numpy as np\n'), ((2313, 2383), 'cv2.bitwise_or', 'cv2.bitwise_or', (['white_background', 'white_background'], {'mask': 'temp_gray_inv'}), '(white_background, white_background, mask=temp_gray_inv)\n', (2327, 2383), False, 'import cv2\n'), ((2425, 2471), 'cv2.bitwise_or', 'cv2.bitwise_or', (['temp', 'temp'], {'mask': 'temp_gray_inv'}), '(temp, temp, mask=temp_gray_inv)\n', (2439, 2471), False, 'import cv2\n'), ((2527, 2552), 'cv2.bitwise_or', 'cv2.bitwise_or', (['frame', 'fg'], {}), '(frame, fg)\n', (2541, 2552), False, 'import cv2\n'), ((2558, 2586), 'cv2.imshow', 'cv2.imshow', (['"""Result"""', 'Result'], {}), "('Result', Result)\n", (2568, 2586), False, 'import cv2\n'), ((1379, 1400), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (1395, 1400), False, 'import cv2\n'), ((2622, 2636), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2633, 2636), False, 'import cv2\n'), ((1553, 1628), 'cv2.line', 'cv2.line', (['temp', '(x0, y0)', '(x + width // 2, y + height // 2)', '(0, 0, 255)', '(5)'], {}), '(temp, (x0, y0), (x + width // 2, y + height // 2), (0, 0, 255), 5)\n', (1561, 1628), False, 'import cv2\n'), ((1737, 1822), 'cv2.line', 'cv2.line', (['frame', '(x0, y0)', '(x + width // 2, y + height // 2)', '(255, 255, 255)', '(5)'], {}), '(frame, (x0, y0), (x + width // 2, y + height // 2), (255, 255, 255), 5\n )\n', (1745, 1822), False, 'import cv2\n')] |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) <NAME>.
# Distributed under the terms of the Modified BSD License.
__all__ = [
"example_function",
]
import numpy as np
def example_function(ax, data, above_color="r", below_color="k", **kwargs):
"""
An example function that makes a scatter plot with points colored differently
depending on if they are above or below `y=0`.
Parameters
----------
ax : matplotlib axis
The axis to plot on.
data : (N, 2) array-like
The data to make a plot from
above_color : color-like, default: 'r'
The color of points with `y>0`
below_color : color-like, default: 'k'
The color of points with `y<0`
kwargs :
Passed through to `ax.scatter`
Returns
-------
MarkerCollection
"""
colors = np.array([above_color] * data.shape[0])
colors[data[:, 1] < 0] = below_color
return ax.scatter(data[:, 0], data[:, 1], c=colors, **kwargs)
| [
"numpy.array"
] | [((833, 872), 'numpy.array', 'np.array', (['([above_color] * data.shape[0])'], {}), '([above_color] * data.shape[0])\n', (841, 872), True, 'import numpy as np\n')] |
import cv2
import math
import numpy as np
from utils.pPose_nms import pose_nms
def get_3rd_point(a, b):
"""Return vector c that perpendicular to (a - b)."""
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
"""Rotate the point by `rot_rad` degree."""
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale])
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def _center_scale_to_box(center, scale):
pixel_std = 1.0
w = scale[0] * pixel_std
h = scale[1] * pixel_std
xmin = center[0] - w * 0.5
ymin = center[1] - h * 0.5
xmax = xmin + w
ymax = ymin + h
bbox = [xmin, ymin, xmax, ymax]
return bbox
def _box_to_center_scale(x, y, w, h, aspect_ratio=1.0, scale_mult=1.25):
"""Convert box coordinates to center and scale.
adapted from https://github.com/Microsoft/human-pose-estimation.pytorch
"""
pixel_std = 1
center = np.zeros((2), dtype=np.float32)
center[0] = x + w * 0.5
center[1] = y + h * 0.5
if w > aspect_ratio * h:
h = w / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
scale = np.array(
[w * 1.0 / pixel_std, h * 1.0 / pixel_std], dtype=np.float32)
if center[0] != -1:
scale = scale * scale_mult
return center, scale
def preprocess(bgr_img, bbox, pose_h=256, pose_w=192):
# x, y, w, h = bbox
x = bbox[0]
y = bbox[1]
w = bbox[2]
h = bbox[3]
_aspect_ratio = float(pose_w) / pose_h # w / h
# TODO - test without roi align, crop directly
center, scale = _box_to_center_scale(
x, y, w, h,_aspect_ratio)
scale = scale * 1.0
trans = get_affine_transform(center, scale, 0, [pose_w, pose_h])
align_img = cv2.warpAffine(bgr_img, trans, (int(pose_w), int(pose_h)), flags=cv2.INTER_LINEAR)
align_bbox = _center_scale_to_box(center, scale)
# TODO - get data from yolo preprocess
rgb_align_img = cv2.cvtColor(align_img, cv2.COLOR_BGR2RGB)
align_img = np.transpose(rgb_align_img, (2, 0, 1)) # C*H*W
align_img = align_img / 255.0
align_img[0, :, :] += -0.406
align_img[1, :, :] += -0.457
align_img[2, :, :] += -0.48
return align_img, align_bbox
# postprocess function
def get_max_pred(heatmaps):
num_joints = heatmaps.shape[0]
width = heatmaps.shape[2]
heatmaps_reshaped = heatmaps.reshape((num_joints, -1))
idx = np.argmax(heatmaps_reshaped, 1)
maxvals = np.max(heatmaps_reshaped, 1)
maxvals = maxvals.reshape((num_joints, 1))
idx = idx.reshape((num_joints, 1))
preds = np.tile(idx, (1, 2)).astype(np.float32)
preds[:, 0] = (preds[:, 0]) % width
preds[:, 1] = np.floor((preds[:, 1]) / width)
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return preds, maxvals
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def transform_preds(coords, center, scale, output_size):
target_coords = np.zeros(coords.shape)
trans = get_affine_transform(center, scale, 0, output_size, inv=1)
target_coords[0:2] = affine_transform(coords[0:2], trans)
return target_coords
def heatmap_to_coord_simple(hms, bbox):
coords, maxvals = get_max_pred(hms)
hm_h = hms.shape[1]
hm_w = hms.shape[2]
# post-processing
for p in range(coords.shape[0]):
hm = hms[p]
px = int(round(float(coords[p][0])))
py = int(round(float(coords[p][1])))
if 1 < px < hm_w - 1 and 1 < py < hm_h - 1:
diff = np.array((hm[py][px + 1] - hm[py][px - 1],
hm[py + 1][px] - hm[py - 1][px]))
coords[p] += np.sign(diff) * .25
preds = np.zeros_like(coords)
# transform bbox to scale
xmin, ymin, xmax, ymax = bbox
w = xmax - xmin
h = ymax - ymin
center = np.array([xmin + w * 0.5, ymin + h * 0.5])
scale = np.array([w, h])
# Transform back
for i in range(coords.shape[0]):
preds[i] = transform_preds(coords[i], center, scale,
[hm_w, hm_h])
return preds, maxvals
def postprocess(pose_preds, align_bbox_list, yolo_preds):
# align_bbox_list: [[x1 y1 x2 y2], [x1 y1 x2 y2], ...]
# yolo_preds: [[[x y w h], score, cls],[[x y w h], score, cls], [[x y w h], score, cls]]
eval_joints = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
pose_coords = []
pose_scores = []
for pred, bbox in zip(pose_preds, align_bbox_list):
pred = np.squeeze(pred, axis=0)
pose_coord, pose_score = heatmap_to_coord_simple(pred[eval_joints], bbox)
pose_coords.append(np.expand_dims(pose_coord, axis=0))
pose_scores.append(np.expand_dims(pose_score, axis=0))
if len(pose_scores) == 0:
return []
preds_img = np.asarray(pose_coords) # [5, 1, 17, 1]
preds_img = np.squeeze(preds_img, axis=1)
preds_scores = np.asarray(pose_scores) # [5, 1, 17, 2]
preds_scores = np.squeeze(preds_scores, axis=1)
return pose_nms(yolo_preds, preds_img, preds_scores)
def draw(bgr_img, pred):
l_pair = [
(0, 1), (0, 2), (1, 3), (2, 4), # Head
(5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
(17, 11), (17, 12), # Body
(11, 13), (12, 14), (13, 15), (14, 16)
]
p_color = [(0, 255, 255), (0, 191, 255), (0, 255, 102), (0, 77, 255), (0, 255, 0), # Nose, LEye, REye, LEar, REar
(77, 255, 255), (77, 255, 204), (77, 204, 255), (191, 255, 77), (77, 191, 255), (191, 255, 77), # LShoulder, RShoulder, LElbow, RElbow, LWrist, RWrist
(204, 77, 255), (77, 255, 204), (191, 77, 255), (77, 255, 191), (127, 77, 255), (77, 255, 127), (0, 255, 255)] # LHip, RHip, LKnee, Rknee, LAnkle, RAnkle, Neck
line_color = [(0, 215, 255), (0, 255, 204), (0, 134, 255), (0, 255, 50),
(77, 255, 222), (77, 196, 255), (77, 135, 255), (191, 255, 77), (77, 255, 77),
(77, 222, 255), (255, 156, 127),
(0, 127, 255), (255, 127, 77), (0, 77, 255), (255, 77, 36)]
img = bgr_img.copy()
height, width = img.shape[:2]
img = cv2.resize(img, (int(width / 2), int(height / 2)))
for human in pred:
part_line = {}
kp_preds = human['keypoints']
kp_scores = human['kp_score']
kp_preds = np.concatenate((kp_preds, np.expand_dims((kp_preds[5, :] + kp_preds[6, :]) / 2, axis=0)), axis=0)
kp_scores = np.concatenate((kp_scores, np.expand_dims((kp_scores[5, :] + kp_scores[6, :]) / 2, axis=0)), axis=0)
# Draw keypoints
for n in range(kp_scores.shape[0]):
if kp_scores[n] <= 0.35:
continue
cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1])
part_line[n] = (int(cor_x / 2), int(cor_y / 2))
bg = img.copy()
cv2.circle(bg, (int(cor_x / 2), int(cor_y / 2)), 2, p_color[n], -1)
# Now create a mask of logo and create its inverse mask also
transparency = max(0, min(1, kp_scores[n]))
img = cv2.addWeighted(bg, transparency, img, 1 - transparency, 0)
# Draw limbs
for i, (start_p, end_p) in enumerate(l_pair):
if start_p in part_line and end_p in part_line:
start_xy = part_line[start_p]
end_xy = part_line[end_p]
bg = img.copy()
X = (start_xy[0], end_xy[0])
Y = (start_xy[1], end_xy[1])
mX = np.mean(X)
mY = np.mean(Y)
length = ((Y[0] - Y[1]) ** 2 + (X[0] - X[1]) ** 2) ** 0.5
angle = math.degrees(math.atan2(Y[0] - Y[1], X[0] - X[1]))
stickwidth = (kp_scores[start_p] + kp_scores[end_p]) + 1
polygon = cv2.ellipse2Poly((int(mX), int(mY)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
cv2.fillConvexPoly(bg, polygon, line_color[i])
# cv2.line(bg, start_xy, end_xy, line_color[i], (2 * (kp_scores[start_p] + kp_scores[end_p])) + 1)
transparency = max(0, min(1, 0.5 * (kp_scores[start_p] + kp_scores[end_p])))
img = cv2.addWeighted(bg, transparency, img, 1 - transparency, 0)
img = cv2.resize(img, (width, height), interpolation=cv2.INTER_CUBIC)
return img | [
"numpy.array",
"numpy.sin",
"numpy.mean",
"numpy.greater",
"numpy.asarray",
"numpy.max",
"cv2.addWeighted",
"numpy.dot",
"numpy.tile",
"numpy.floor",
"numpy.argmax",
"numpy.squeeze",
"numpy.cos",
"cv2.cvtColor",
"numpy.sign",
"utils.pPose_nms.pose_nms",
"math.atan2",
"cv2.resize",
... | [((706, 740), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'np.float32'}), '([0, 0], dtype=np.float32)\n', (714, 740), True, 'import numpy as np\n'), ((1089, 1128), 'numpy.array', 'np.array', (['[0, dst_w * -0.5]', 'np.float32'], {}), '([0, dst_w * -0.5], np.float32)\n', (1097, 1128), True, 'import numpy as np\n'), ((1140, 1174), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {'dtype': 'np.float32'}), '((3, 2), dtype=np.float32)\n', (1148, 1174), True, 'import numpy as np\n'), ((1185, 1219), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {'dtype': 'np.float32'}), '((3, 2), dtype=np.float32)\n', (1193, 1219), True, 'import numpy as np\n'), ((2231, 2260), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.float32'}), '(2, dtype=np.float32)\n', (2239, 2260), True, 'import numpy as np\n'), ((2450, 2520), 'numpy.array', 'np.array', (['[w * 1.0 / pixel_std, h * 1.0 / pixel_std]'], {'dtype': 'np.float32'}), '([w * 1.0 / pixel_std, h * 1.0 / pixel_std], dtype=np.float32)\n', (2458, 2520), True, 'import numpy as np\n'), ((3253, 3295), 'cv2.cvtColor', 'cv2.cvtColor', (['align_img', 'cv2.COLOR_BGR2RGB'], {}), '(align_img, cv2.COLOR_BGR2RGB)\n', (3265, 3295), False, 'import cv2\n'), ((3312, 3350), 'numpy.transpose', 'np.transpose', (['rgb_align_img', '(2, 0, 1)'], {}), '(rgb_align_img, (2, 0, 1))\n', (3324, 3350), True, 'import numpy as np\n'), ((3711, 3742), 'numpy.argmax', 'np.argmax', (['heatmaps_reshaped', '(1)'], {}), '(heatmaps_reshaped, 1)\n', (3720, 3742), True, 'import numpy as np\n'), ((3757, 3785), 'numpy.max', 'np.max', (['heatmaps_reshaped', '(1)'], {}), '(heatmaps_reshaped, 1)\n', (3763, 3785), True, 'import numpy as np\n'), ((3985, 4014), 'numpy.floor', 'np.floor', (['(preds[:, 1] / width)'], {}), '(preds[:, 1] / width)\n', (3993, 4014), True, 'import numpy as np\n'), ((4258, 4275), 'numpy.dot', 'np.dot', (['t', 'new_pt'], {}), '(t, new_pt)\n', (4264, 4275), True, 'import numpy as np\n'), ((4376, 4398), 'numpy.zeros', 'np.zeros', (['coords.shape'], {}), '(coords.shape)\n', (4384, 4398), True, 'import numpy as np\n'), ((5092, 5113), 'numpy.zeros_like', 'np.zeros_like', (['coords'], {}), '(coords)\n', (5105, 5113), True, 'import numpy as np\n'), ((5232, 5274), 'numpy.array', 'np.array', (['[xmin + w * 0.5, ymin + h * 0.5]'], {}), '([xmin + w * 0.5, ymin + h * 0.5])\n', (5240, 5274), True, 'import numpy as np\n'), ((5287, 5303), 'numpy.array', 'np.array', (['[w, h]'], {}), '([w, h])\n', (5295, 5303), True, 'import numpy as np\n'), ((6200, 6223), 'numpy.asarray', 'np.asarray', (['pose_coords'], {}), '(pose_coords)\n', (6210, 6223), True, 'import numpy as np\n'), ((6256, 6285), 'numpy.squeeze', 'np.squeeze', (['preds_img'], {'axis': '(1)'}), '(preds_img, axis=1)\n', (6266, 6285), True, 'import numpy as np\n'), ((6305, 6328), 'numpy.asarray', 'np.asarray', (['pose_scores'], {}), '(pose_scores)\n', (6315, 6328), True, 'import numpy as np\n'), ((6364, 6396), 'numpy.squeeze', 'np.squeeze', (['preds_scores'], {'axis': '(1)'}), '(preds_scores, axis=1)\n', (6374, 6396), True, 'import numpy as np\n'), ((6408, 6453), 'utils.pPose_nms.pose_nms', 'pose_nms', (['yolo_preds', 'preds_img', 'preds_scores'], {}), '(yolo_preds, preds_img, preds_scores)\n', (6416, 6453), False, 'from utils.pPose_nms import pose_nms\n'), ((9612, 9675), 'cv2.resize', 'cv2.resize', (['img', '(width, height)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (width, height), interpolation=cv2.INTER_CUBIC)\n', (9622, 9675), False, 'import cv2\n'), ((197, 248), 'numpy.array', 'np.array', (['[-direct[1], direct[0]]'], {'dtype': 'np.float32'}), '([-direct[1], direct[0]], dtype=np.float32)\n', (205, 248), True, 'import numpy as np\n'), ((344, 359), 'numpy.sin', 'np.sin', (['rot_rad'], {}), '(rot_rad)\n', (350, 359), True, 'import numpy as np\n'), ((361, 376), 'numpy.cos', 'np.cos', (['rot_rad'], {}), '(rot_rad)\n', (367, 376), True, 'import numpy as np\n'), ((865, 889), 'numpy.array', 'np.array', (['[scale, scale]'], {}), '([scale, scale])\n', (873, 889), True, 'import numpy as np\n'), ((1375, 1411), 'numpy.array', 'np.array', (['[dst_w * 0.5, dst_h * 0.5]'], {}), '([dst_w * 0.5, dst_h * 0.5])\n', (1383, 1411), True, 'import numpy as np\n'), ((4042, 4066), 'numpy.greater', 'np.greater', (['maxvals', '(0.0)'], {}), '(maxvals, 0.0)\n', (4052, 4066), True, 'import numpy as np\n'), ((4214, 4243), 'numpy.array', 'np.array', (['[pt[0], pt[1], 1.0]'], {}), '([pt[0], pt[1], 1.0])\n', (4222, 4243), True, 'import numpy as np\n'), ((5901, 5925), 'numpy.squeeze', 'np.squeeze', (['pred'], {'axis': '(0)'}), '(pred, axis=0)\n', (5911, 5925), True, 'import numpy as np\n'), ((1581, 1596), 'numpy.float32', 'np.float32', (['dst'], {}), '(dst)\n', (1591, 1596), True, 'import numpy as np\n'), ((1598, 1613), 'numpy.float32', 'np.float32', (['src'], {}), '(src)\n', (1608, 1613), True, 'import numpy as np\n'), ((1664, 1679), 'numpy.float32', 'np.float32', (['src'], {}), '(src)\n', (1674, 1679), True, 'import numpy as np\n'), ((1681, 1696), 'numpy.float32', 'np.float32', (['dst'], {}), '(dst)\n', (1691, 1696), True, 'import numpy as np\n'), ((3886, 3906), 'numpy.tile', 'np.tile', (['idx', '(1, 2)'], {}), '(idx, (1, 2))\n', (3893, 3906), True, 'import numpy as np\n'), ((4928, 5004), 'numpy.array', 'np.array', (['(hm[py][px + 1] - hm[py][px - 1], hm[py + 1][px] - hm[py - 1][px])'], {}), '((hm[py][px + 1] - hm[py][px - 1], hm[py + 1][px] - hm[py - 1][px]))\n', (4936, 5004), True, 'import numpy as np\n'), ((6035, 6069), 'numpy.expand_dims', 'np.expand_dims', (['pose_coord'], {'axis': '(0)'}), '(pose_coord, axis=0)\n', (6049, 6069), True, 'import numpy as np\n'), ((6098, 6132), 'numpy.expand_dims', 'np.expand_dims', (['pose_score'], {'axis': '(0)'}), '(pose_score, axis=0)\n', (6112, 6132), True, 'import numpy as np\n'), ((8438, 8497), 'cv2.addWeighted', 'cv2.addWeighted', (['bg', 'transparency', 'img', '(1 - transparency)', '(0)'], {}), '(bg, transparency, img, 1 - transparency, 0)\n', (8453, 8497), False, 'import cv2\n'), ((5059, 5072), 'numpy.sign', 'np.sign', (['diff'], {}), '(diff)\n', (5066, 5072), True, 'import numpy as np\n'), ((7730, 7791), 'numpy.expand_dims', 'np.expand_dims', (['((kp_preds[5, :] + kp_preds[6, :]) / 2)'], {'axis': '(0)'}), '((kp_preds[5, :] + kp_preds[6, :]) / 2, axis=0)\n', (7744, 7791), True, 'import numpy as np\n'), ((7849, 7912), 'numpy.expand_dims', 'np.expand_dims', (['((kp_scores[5, :] + kp_scores[6, :]) / 2)'], {'axis': '(0)'}), '((kp_scores[5, :] + kp_scores[6, :]) / 2, axis=0)\n', (7863, 7912), True, 'import numpy as np\n'), ((8866, 8876), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (8873, 8876), True, 'import numpy as np\n'), ((8898, 8908), 'numpy.mean', 'np.mean', (['Y'], {}), '(Y)\n', (8905, 8908), True, 'import numpy as np\n'), ((9264, 9310), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['bg', 'polygon', 'line_color[i]'], {}), '(bg, polygon, line_color[i])\n', (9282, 9310), False, 'import cv2\n'), ((9541, 9600), 'cv2.addWeighted', 'cv2.addWeighted', (['bg', 'transparency', 'img', '(1 - transparency)', '(0)'], {}), '(bg, transparency, img, 1 - transparency, 0)\n', (9556, 9600), False, 'import cv2\n'), ((9020, 9056), 'math.atan2', 'math.atan2', (['(Y[0] - Y[1])', '(X[0] - X[1])'], {}), '(Y[0] - Y[1], X[0] - X[1])\n', (9030, 9056), False, 'import math\n')] |
import numpy as np
from collections import OrderedDict
from alfred.utils.misc import keep_two_signif_digits, check_params_defined_twice
from alfred.utils.directory_tree import DirectoryTree
from pathlib import Path
import packageName
# (1) Enter the algorithms to be run for each experiment
ALG_NAMES = ['simpleMLP']
# (2) Enter the task (dataset or rl-environment) to be used for each experiment
TASK_NAMES = ['MNIST']
# (3) Enter the seeds to be run for each experiment
N_SEEDS = 3
SEEDS = [1 + x for x in range(N_SEEDS)]
# (4) Enter the number of experiments to sample
N_EXPERIMENTS = 50
# (5) Hyper-parameters. For each hyperparam, enter the function that you want the random-search to sample from.
# For each experiment, a set of hyperparameters will be sampled using these functions
# Examples:
# int: np.random.randint(low=64, high=512)
# float: np.random.uniform(low=-3., high=1.)
# bool: bool(np.random.binomial(n=1, p=0.5))
# exp_float: 10.**np.random.uniform(low=-3., high=1.)
# fixed_value: fixed_value
def sample_experiment():
sampled_config = OrderedDict({
'learning_rate': 10. ** np.random.uniform(low=-8., high=-3.),
'optimizer': "sgd",
})
# Security check to make sure seed, alg_name and task_name are not defined as hyperparams
assert "seed" not in sampled_config.keys()
assert "alg_name" not in sampled_config.keys()
assert "task_name" not in sampled_config.keys()
# Simple security check to make sure every specified parameter is defined only once
check_params_defined_twice(keys=list(sampled_config.keys()))
# (6) Function that returns the hyperparameters for the current search
def get_run_args(overwritten_cmd_line):
raise NotImplementedError
# Setting up alfred's DirectoryTree
DirectoryTree.default_root = "./storage"
DirectoryTree.git_repos_to_track['mlProject'] = str(Path(__file__).parents[2])
DirectoryTree.git_repos_to_track['someDependency'] = str(Path(packageName.__file__).parents[1])
| [
"numpy.random.uniform",
"pathlib.Path"
] | [((1889, 1903), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1893, 1903), False, 'from pathlib import Path\n'), ((1973, 1999), 'pathlib.Path', 'Path', (['packageName.__file__'], {}), '(packageName.__file__)\n', (1977, 1999), False, 'from pathlib import Path\n'), ((1148, 1186), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-8.0)', 'high': '(-3.0)'}), '(low=-8.0, high=-3.0)\n', (1165, 1186), True, 'import numpy as np\n')] |
import numpy
import numpy.fft
import pytest
import numpy.testing
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import librosa
import librosa.display
import pandas
import emlearn
import eml_audio
FFT_SIZES = [
64,
128,
256,
512,
1024,
]
@pytest.mark.parametrize('n_fft', FFT_SIZES)
def test_rfft_simple(n_fft):
signal = numpy.arange(0, n_fft)
ref = numpy.fft.fft(signal, n=n_fft).real
out = eml_audio.rfft(signal)
diff = (out - ref)
numpy.testing.assert_allclose(out, ref, rtol=1e-4)
def test_rfft_not_power2_length():
with pytest.raises(Exception) as e:
eml_audio.rfft(numpy.array([0,1,3,4,5]))
def fft_freqs(sr, n_fft):
return numpy.linspace(0, float(sr)/2, int(1 + n_fft//2), endpoint=True)
def fft_freq(sr, n_fft, n):
end = float(sr)/2
steps = int(1 + n_fft//2) - 1
return n*end/steps
def fft_freqs2(sr, n_fft):
steps = int(1 + n_fft//2)
return numpy.array([ fft_freq(sr, n_fft, n) for n in range(steps) ])
# Based on librosa
def melfilter(frames, sr, n_fft, n_mels=128, fmin=0.0, fmax=None, htk=True, norm=None):
np = numpy
if fmax is None:
fmax = float(sr) / 2
if norm is not None and norm != 1 and norm != np.inf:
raise ParameterError('Unsupported norm: {}'.format(repr(norm)))
# Initialize the weights
n_mels = int(n_mels)
weights = np.zeros((n_mels, int(1 + n_fft // 2)))
# Center freqs of each FFT bin
fftfreqs = fft_freqs(sr=sr, n_fft=n_fft)
fftfreqs2 = fft_freqs2(sr=sr, n_fft=n_fft)
assert fftfreqs.shape == fftfreqs2.shape
numpy.testing.assert_almost_equal(fftfreqs, fftfreqs2)
# 'Center freqs' of mel bands - uniformly spaced between limits
mel_f = librosa.core.time_frequency.mel_frequencies(n_mels + 2, fmin=fmin, fmax=fmax, htk=htk)
fdiff = np.diff(mel_f)
#ramps = np.subtract.outer(mel_f, fftfreqs)
for i in range(n_mels):
# lower and upper slopes for all bins
rlow = mel_f[i] - fftfreqs
rupper = mel_f[i+2] - fftfreqs
lower = -rlow / fdiff[i]
upper = rupper / fdiff[i+1]
# .. then intersect them with each other and zero
w = np.maximum(0, np.minimum(lower, upper))
if i == 4:
print('wei', i, w[10:40])
weights[i] = w
refweighs = librosa.filters.mel(sr, n_fft, n_mels, fmin, fmax, htk=htk, norm=norm)
numpy.testing.assert_allclose(weights, refweighs)
return numpy.dot(frames, weights.T)
#return numpy.dot(weights, frames)
# Basis for our C implementation,
# a mix of https://haythamfayek.com/2016/04/21/speech-processing-for-machine-learning.html
# and librosa
def melfilter_ref(pow_frames, sr, n_mels, n_fft):
NFFT=n_fft
sample_rate=sr
nfilt=n_mels
low_freq_mel = 0
high_freq_mel = (2595 * numpy.log10(1 + (sample_rate / 2) / 700)) # Convert Hz to Mel
mel_points = numpy.linspace(low_freq_mel, high_freq_mel, nfilt + 2) # Equally spaced in Mel scale
hz_points = (700 * (10 ** (mel_points / 2595) - 1)) # Convert Mel to Hz
bin = numpy.floor((NFFT + 1) * hz_points / sample_rate)
fftfreqs = fft_freqs2(sr=sr, n_fft=n_fft)
fbank = numpy.zeros((nfilt, int(numpy.floor(NFFT / 2 + 1))))
for m in range(1, nfilt + 1):
f_m_minus = int(bin[m - 1]) # left
f_m = int(bin[m]) # center
f_m_plus = int(bin[m + 1]) # right
i = m-1
fdifflow = hz_points[m] - hz_points[m - 1]
fdiffupper = hz_points[m + 1] - hz_points[m]
# TODO: fix/check divergence with librosa, who seems to compute
# the peak filter value twice and select the lowest
# sometimes the one below can give over 1.0 results at the center,
# hence the clamp to 1.0, which does not seem right
for k in range(f_m_minus, f_m+1):
ramp = hz_points[i] - fftfreqs[k]
w = -ramp / fdifflow
w = max(min(w, 1), 0)
fbank[i, k] = w
for k in range(f_m, f_m_plus):
ramp = hz_points[i+2] - fftfreqs[k+1]
w = ramp / fdiffupper
w = max(min(w, 1), 0)
fbank[i, k+1] = w
if i == 4:
print('f', i, fbank[i][10:40])
refweighs = librosa.filters.mel(sr, n_fft, n_mels, fmin=0, fmax=22050//2, htk=True, norm=None)
#numpy.testing.assert_allclose(fbank, refweighs)
filtered = numpy.dot(pow_frames, fbank.T)
return filtered
def test_melfilter_basic():
n_mels = 16
n_fft = 512
length = 1 + n_fft//2
sr = 22050
fmin = 0
fmax = sr//2
#input = numpy.ones(shape=length)
input = numpy.random.rand(length)
out = eml_audio.melfilter(input, sr, n_fft, n_mels, fmin, fmax)
ref = librosa.feature.melspectrogram(S=input, htk=True, norm=None, sr=sr, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
ref2 = melfilter(input, sr, n_mels=n_mels, n_fft=n_fft)
numpy.testing.assert_allclose(ref2, ref, rtol=1e-5)
ref3 = melfilter_ref(input, sr, n_mels, n_fft)
numpy.testing.assert_allclose(ref3, ref, rtol=1e-3)
diff = out - ref
fig, (ref_ax, out_ax, diff_ax) = plt.subplots(3)
pandas.Series(out).plot(ax=out_ax)
pandas.Series(ref).plot(ax=ref_ax)
pandas.Series(diff).plot(ax=diff_ax)
fig.savefig('melfilter.basic.png')
assert ref.shape == out.shape
numpy.testing.assert_allclose(out, ref, rtol=1e-3)
def test_melfilter_librosa():
filename = librosa.util.example_audio_file()
y, sr = librosa.load(filename, offset=1.0, duration=0.3)
n_fft = 1024
hop_length = 256
fmin = 500
fmax = 5000
n_mels = 16
spec = numpy.abs(librosa.core.stft(y, n_fft=n_fft, hop_length=hop_length))**2
spec1 = spec[:,0]
ref = librosa.feature.melspectrogram(S=spec1, sr=sr, norm=None, htk=True, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
out = eml_audio.melfilter(spec1, sr, n_fft, n_mels, fmin, fmax)
fig, (ref_ax, out_ax) = plt.subplots(2)
def specshow(d, ax):
s = librosa.amplitude_to_db(d, ref=numpy.max)
librosa.display.specshow(s, ax=ax, x_axis='time')
specshow(ref.reshape(-1, 1), ax=ref_ax)
specshow(out.reshape(-1, 1), ax=out_ax)
fig.savefig('melfilter.librosa.png')
assert ref.shape == out.shape
numpy.testing.assert_allclose(ref, out, rtol=0.01)
@pytest.mark.skip('broken')
def test_melspectrogram():
filename = librosa.util.example_audio_file()
y, sr = librosa.load(filename, offset=1.0, duration=0.3)
n_mels = 64
n_fft = 1024
fmin = 500
fmax = 5000
hop_size = n_fft
# Only do one frame
y = y[0:n_fft]
ref = librosa.feature.melspectrogram(y, sr, norm=None, htk=True,
fmin=fmin, fmax=fmax, n_fft=n_fft, n_mels=n_mels, hop_length=hop_size)
out = eml_audio.melspectrogram(y, sr, n_fft, n_mels, fmin, fmax)
ref = ref[:,0:1]
out = out.reshape(-1,1)
#out = melspec(y, sr, n_fft, n_mels, fmin, fmax, hop_length=hop_size)[:,:10]
print('r', ref.shape)
assert out.shape == ref.shape
fig, (ref_ax, out_ax) = plt.subplots(2)
def specshow(d, ax):
s = librosa.amplitude_to_db(d, ref=numpy.max)
librosa.display.specshow(s, ax=ax, x_axis='time')
#librosa.display.specshow(s, ax=ax, x_axis='time', y_axis='mel', fmin=fmin, fmax=fmax)
specshow(ref, ax=ref_ax)
specshow(out, ax=out_ax)
fig.savefig('melspec.png')
print('mean', numpy.mean(ref), numpy.mean(out))
print('std', numpy.std(ref), numpy.std(out))
s = numpy.mean(ref) / numpy.mean(out)
print('scale', s)
out = out * s
#print(out-ref)
numpy.testing.assert_allclose(out, ref, rtol=1e-6);
def test_sparse_filterbank_ref():
# testcase based on working example in STM32AI Function pack, mel_filters_lut_30.c
mel = librosa.filters.mel(sr=16000, n_fft=1024, n_mels=30, htk=False)
sparse = emlearn.signal.sparse_filterbank(mel)
expected_starts = [1, 7, 13, 19, 25, 32, 38, 44, 50, 57, 63, 69, 77, 85, 93,
103, 114, 126, 139, 154, 170, 188, 208, 230, 254, 281, 311, 343, 379, 419]
expected_ends = [12, 18, 24, 31, 37, 43, 49, 56, 62, 68, 76, 84, 92, 102, 113,
125, 138, 153, 169, 187, 207, 229, 253, 280, 310, 342, 378, 418, 463, 511]
starts, ends, coeffs = sparse
assert starts == expected_starts
assert ends == expected_ends
assert len(coeffs) == 968
assert coeffs[0] == pytest.approx(1.6503363149e-03)
assert coeffs[-1] == pytest.approx(2.8125530662e-05)
def test_sparse_filterbank_apply():
n_fft = 1024
n_mels = 30
mel_basis = librosa.filters.mel(sr=16000, n_fft=n_fft, n_mels=n_mels, htk=False)
sparse = emlearn.signal.sparse_filterbank(mel_basis)
starts, ends, coeffs = sparse
assert len(starts) == n_mels
assert len(ends) == n_mels
name = 'fofofo'
c = emlearn.signal.sparse_filterbank_serialize(sparse, name=name)
assert name+'_lut' in c
assert name+'_ends' in c
assert name+'_starts' in c
assert 'static const float' in c
assert 'static const int' in c
assert str(n_mels) in c
data = numpy.ones(shape=mel_basis.shape[1]) * 100
ref = numpy.dot(mel_basis, data)
py_out = emlearn.signal.sparse_filterbank_reduce(sparse, data)
numpy.testing.assert_allclose(py_out, ref, rtol=1e-5)
c_out = eml_audio.sparse_filterbank(data, starts, ends, coeffs)
numpy.testing.assert_allclose(c_out, ref, rtol=1e-5)
| [
"numpy.log10",
"numpy.random.rand",
"librosa.util.example_audio_file",
"eml_audio.melspectrogram",
"numpy.array",
"numpy.arange",
"librosa.load",
"numpy.mean",
"numpy.testing.assert_allclose",
"numpy.fft.fft",
"eml_audio.sparse_filterbank",
"eml_audio.melfilter",
"numpy.testing.assert_almost... | [((86, 107), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (100, 107), False, 'import matplotlib\n'), ((293, 336), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_fft"""', 'FFT_SIZES'], {}), "('n_fft', FFT_SIZES)\n", (316, 336), False, 'import pytest\n'), ((6340, 6366), 'pytest.mark.skip', 'pytest.mark.skip', (['"""broken"""'], {}), "('broken')\n", (6356, 6366), False, 'import pytest\n'), ((379, 401), 'numpy.arange', 'numpy.arange', (['(0)', 'n_fft'], {}), '(0, n_fft)\n', (391, 401), False, 'import numpy\n'), ((459, 481), 'eml_audio.rfft', 'eml_audio.rfft', (['signal'], {}), '(signal)\n', (473, 481), False, 'import eml_audio\n'), ((510, 562), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['out', 'ref'], {'rtol': '(0.0001)'}), '(out, ref, rtol=0.0001)\n', (539, 562), False, 'import numpy\n'), ((1617, 1671), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['fftfreqs', 'fftfreqs2'], {}), '(fftfreqs, fftfreqs2)\n', (1650, 1671), False, 'import numpy\n'), ((1753, 1844), 'librosa.core.time_frequency.mel_frequencies', 'librosa.core.time_frequency.mel_frequencies', (['(n_mels + 2)'], {'fmin': 'fmin', 'fmax': 'fmax', 'htk': 'htk'}), '(n_mels + 2, fmin=fmin, fmax=\n fmax, htk=htk)\n', (1796, 1844), False, 'import librosa\n'), ((2343, 2413), 'librosa.filters.mel', 'librosa.filters.mel', (['sr', 'n_fft', 'n_mels', 'fmin', 'fmax'], {'htk': 'htk', 'norm': 'norm'}), '(sr, n_fft, n_mels, fmin, fmax, htk=htk, norm=norm)\n', (2362, 2413), False, 'import librosa\n'), ((2418, 2467), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['weights', 'refweighs'], {}), '(weights, refweighs)\n', (2447, 2467), False, 'import numpy\n'), ((2480, 2508), 'numpy.dot', 'numpy.dot', (['frames', 'weights.T'], {}), '(frames, weights.T)\n', (2489, 2508), False, 'import numpy\n'), ((2920, 2974), 'numpy.linspace', 'numpy.linspace', (['low_freq_mel', 'high_freq_mel', '(nfilt + 2)'], {}), '(low_freq_mel, high_freq_mel, nfilt + 2)\n', (2934, 2974), False, 'import numpy\n'), ((3093, 3142), 'numpy.floor', 'numpy.floor', (['((NFFT + 1) * hz_points / sample_rate)'], {}), '((NFFT + 1) * hz_points / sample_rate)\n', (3104, 3142), False, 'import numpy\n'), ((4251, 4339), 'librosa.filters.mel', 'librosa.filters.mel', (['sr', 'n_fft', 'n_mels'], {'fmin': '(0)', 'fmax': '(22050 // 2)', 'htk': '(True)', 'norm': 'None'}), '(sr, n_fft, n_mels, fmin=0, fmax=22050 // 2, htk=True,\n norm=None)\n', (4270, 4339), False, 'import librosa\n'), ((4403, 4433), 'numpy.dot', 'numpy.dot', (['pow_frames', 'fbank.T'], {}), '(pow_frames, fbank.T)\n', (4412, 4433), False, 'import numpy\n'), ((4637, 4662), 'numpy.random.rand', 'numpy.random.rand', (['length'], {}), '(length)\n', (4654, 4662), False, 'import numpy\n'), ((4673, 4730), 'eml_audio.melfilter', 'eml_audio.melfilter', (['input', 'sr', 'n_fft', 'n_mels', 'fmin', 'fmax'], {}), '(input, sr, n_fft, n_mels, fmin, fmax)\n', (4692, 4730), False, 'import eml_audio\n'), ((4741, 4863), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', ([], {'S': 'input', 'htk': '(True)', 'norm': 'None', 'sr': 'sr', 'n_fft': 'n_fft', 'n_mels': 'n_mels', 'fmin': 'fmin', 'fmax': 'fmax'}), '(S=input, htk=True, norm=None, sr=sr, n_fft=\n n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)\n', (4771, 4863), False, 'import librosa\n'), ((4924, 4976), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['ref2', 'ref'], {'rtol': '(1e-05)'}), '(ref2, ref, rtol=1e-05)\n', (4953, 4976), False, 'import numpy\n'), ((5031, 5083), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['ref3', 'ref'], {'rtol': '(0.001)'}), '(ref3, ref, rtol=0.001)\n', (5060, 5083), False, 'import numpy\n'), ((5143, 5158), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)'], {}), '(3)\n', (5155, 5158), True, 'from matplotlib import pyplot as plt\n'), ((5356, 5407), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['out', 'ref'], {'rtol': '(0.001)'}), '(out, ref, rtol=0.001)\n', (5385, 5407), False, 'import numpy\n'), ((5454, 5487), 'librosa.util.example_audio_file', 'librosa.util.example_audio_file', ([], {}), '()\n', (5485, 5487), False, 'import librosa\n'), ((5500, 5548), 'librosa.load', 'librosa.load', (['filename'], {'offset': '(1.0)', 'duration': '(0.3)'}), '(filename, offset=1.0, duration=0.3)\n', (5512, 5548), False, 'import librosa\n'), ((5750, 5872), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', ([], {'S': 'spec1', 'sr': 'sr', 'norm': 'None', 'htk': '(True)', 'n_fft': 'n_fft', 'n_mels': 'n_mels', 'fmin': 'fmin', 'fmax': 'fmax'}), '(S=spec1, sr=sr, norm=None, htk=True, n_fft=\n n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)\n', (5780, 5872), False, 'import librosa\n'), ((5878, 5935), 'eml_audio.melfilter', 'eml_audio.melfilter', (['spec1', 'sr', 'n_fft', 'n_mels', 'fmin', 'fmax'], {}), '(spec1, sr, n_fft, n_mels, fmin, fmax)\n', (5897, 5935), False, 'import eml_audio\n'), ((5965, 5980), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (5977, 5980), True, 'from matplotlib import pyplot as plt\n'), ((6286, 6336), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['ref', 'out'], {'rtol': '(0.01)'}), '(ref, out, rtol=0.01)\n', (6315, 6336), False, 'import numpy\n'), ((6410, 6443), 'librosa.util.example_audio_file', 'librosa.util.example_audio_file', ([], {}), '()\n', (6441, 6443), False, 'import librosa\n'), ((6457, 6505), 'librosa.load', 'librosa.load', (['filename'], {'offset': '(1.0)', 'duration': '(0.3)'}), '(filename, offset=1.0, duration=0.3)\n', (6469, 6505), False, 'import librosa\n'), ((6646, 6780), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', (['y', 'sr'], {'norm': 'None', 'htk': '(True)', 'fmin': 'fmin', 'fmax': 'fmax', 'n_fft': 'n_fft', 'n_mels': 'n_mels', 'hop_length': 'hop_size'}), '(y, sr, norm=None, htk=True, fmin=fmin, fmax=\n fmax, n_fft=n_fft, n_mels=n_mels, hop_length=hop_size)\n', (6676, 6780), False, 'import librosa\n'), ((6827, 6885), 'eml_audio.melspectrogram', 'eml_audio.melspectrogram', (['y', 'sr', 'n_fft', 'n_mels', 'fmin', 'fmax'], {}), '(y, sr, n_fft, n_mels, fmin, fmax)\n', (6851, 6885), False, 'import eml_audio\n'), ((7108, 7123), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (7120, 7123), True, 'from matplotlib import pyplot as plt\n'), ((7655, 7706), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['out', 'ref'], {'rtol': '(1e-06)'}), '(out, ref, rtol=1e-06)\n', (7684, 7706), False, 'import numpy\n'), ((7840, 7903), 'librosa.filters.mel', 'librosa.filters.mel', ([], {'sr': '(16000)', 'n_fft': '(1024)', 'n_mels': '(30)', 'htk': '(False)'}), '(sr=16000, n_fft=1024, n_mels=30, htk=False)\n', (7859, 7903), False, 'import librosa\n'), ((7917, 7954), 'emlearn.signal.sparse_filterbank', 'emlearn.signal.sparse_filterbank', (['mel'], {}), '(mel)\n', (7949, 7954), False, 'import emlearn\n'), ((8649, 8717), 'librosa.filters.mel', 'librosa.filters.mel', ([], {'sr': '(16000)', 'n_fft': 'n_fft', 'n_mels': 'n_mels', 'htk': '(False)'}), '(sr=16000, n_fft=n_fft, n_mels=n_mels, htk=False)\n', (8668, 8717), False, 'import librosa\n'), ((8732, 8775), 'emlearn.signal.sparse_filterbank', 'emlearn.signal.sparse_filterbank', (['mel_basis'], {}), '(mel_basis)\n', (8764, 8775), False, 'import emlearn\n'), ((8903, 8964), 'emlearn.signal.sparse_filterbank_serialize', 'emlearn.signal.sparse_filterbank_serialize', (['sparse'], {'name': 'name'}), '(sparse, name=name)\n', (8945, 8964), False, 'import emlearn\n'), ((9219, 9245), 'numpy.dot', 'numpy.dot', (['mel_basis', 'data'], {}), '(mel_basis, data)\n', (9228, 9245), False, 'import numpy\n'), ((9259, 9312), 'emlearn.signal.sparse_filterbank_reduce', 'emlearn.signal.sparse_filterbank_reduce', (['sparse', 'data'], {}), '(sparse, data)\n', (9298, 9312), False, 'import emlearn\n'), ((9317, 9371), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['py_out', 'ref'], {'rtol': '(1e-05)'}), '(py_out, ref, rtol=1e-05)\n', (9346, 9371), False, 'import numpy\n'), ((9384, 9439), 'eml_audio.sparse_filterbank', 'eml_audio.sparse_filterbank', (['data', 'starts', 'ends', 'coeffs'], {}), '(data, starts, ends, coeffs)\n', (9411, 9439), False, 'import eml_audio\n'), ((9444, 9497), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['c_out', 'ref'], {'rtol': '(1e-05)'}), '(c_out, ref, rtol=1e-05)\n', (9473, 9497), False, 'import numpy\n'), ((413, 443), 'numpy.fft.fft', 'numpy.fft.fft', (['signal'], {'n': 'n_fft'}), '(signal, n=n_fft)\n', (426, 443), False, 'import numpy\n'), ((606, 630), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (619, 630), False, 'import pytest\n'), ((2840, 2878), 'numpy.log10', 'numpy.log10', (['(1 + sample_rate / 2 / 700)'], {}), '(1 + sample_rate / 2 / 700)\n', (2851, 2878), False, 'import numpy\n'), ((6018, 6059), 'librosa.amplitude_to_db', 'librosa.amplitude_to_db', (['d'], {'ref': 'numpy.max'}), '(d, ref=numpy.max)\n', (6041, 6059), False, 'import librosa\n'), ((6068, 6117), 'librosa.display.specshow', 'librosa.display.specshow', (['s'], {'ax': 'ax', 'x_axis': '"""time"""'}), "(s, ax=ax, x_axis='time')\n", (6092, 6117), False, 'import librosa\n'), ((7161, 7202), 'librosa.amplitude_to_db', 'librosa.amplitude_to_db', (['d'], {'ref': 'numpy.max'}), '(d, ref=numpy.max)\n', (7184, 7202), False, 'import librosa\n'), ((7211, 7260), 'librosa.display.specshow', 'librosa.display.specshow', (['s'], {'ax': 'ax', 'x_axis': '"""time"""'}), "(s, ax=ax, x_axis='time')\n", (7235, 7260), False, 'import librosa\n'), ((7465, 7480), 'numpy.mean', 'numpy.mean', (['ref'], {}), '(ref)\n', (7475, 7480), False, 'import numpy\n'), ((7482, 7497), 'numpy.mean', 'numpy.mean', (['out'], {}), '(out)\n', (7492, 7497), False, 'import numpy\n'), ((7516, 7530), 'numpy.std', 'numpy.std', (['ref'], {}), '(ref)\n', (7525, 7530), False, 'import numpy\n'), ((7532, 7546), 'numpy.std', 'numpy.std', (['out'], {}), '(out)\n', (7541, 7546), False, 'import numpy\n'), ((7556, 7571), 'numpy.mean', 'numpy.mean', (['ref'], {}), '(ref)\n', (7566, 7571), False, 'import numpy\n'), ((7574, 7589), 'numpy.mean', 'numpy.mean', (['out'], {}), '(out)\n', (7584, 7589), False, 'import numpy\n'), ((8473, 8503), 'pytest.approx', 'pytest.approx', (['(0.0016503363149)'], {}), '(0.0016503363149)\n', (8486, 8503), False, 'import pytest\n'), ((8530, 8561), 'pytest.approx', 'pytest.approx', (['(2.8125530662e-05)'], {}), '(2.8125530662e-05)\n', (8543, 8561), False, 'import pytest\n'), ((9166, 9202), 'numpy.ones', 'numpy.ones', ([], {'shape': 'mel_basis.shape[1]'}), '(shape=mel_basis.shape[1])\n', (9176, 9202), False, 'import numpy\n'), ((660, 688), 'numpy.array', 'numpy.array', (['[0, 1, 3, 4, 5]'], {}), '([0, 1, 3, 4, 5])\n', (671, 688), False, 'import numpy\n'), ((5163, 5181), 'pandas.Series', 'pandas.Series', (['out'], {}), '(out)\n', (5176, 5181), False, 'import pandas\n'), ((5202, 5220), 'pandas.Series', 'pandas.Series', (['ref'], {}), '(ref)\n', (5215, 5220), False, 'import pandas\n'), ((5241, 5260), 'pandas.Series', 'pandas.Series', (['diff'], {}), '(diff)\n', (5254, 5260), False, 'import pandas\n'), ((5656, 5712), 'librosa.core.stft', 'librosa.core.stft', (['y'], {'n_fft': 'n_fft', 'hop_length': 'hop_length'}), '(y, n_fft=n_fft, hop_length=hop_length)\n', (5673, 5712), False, 'import librosa\n'), ((3226, 3251), 'numpy.floor', 'numpy.floor', (['(NFFT / 2 + 1)'], {}), '(NFFT / 2 + 1)\n', (3237, 3251), False, 'import numpy\n')] |
import numpy as np
import matplotlib.pyplot as plt
#Heat equation
A1 = np.array([[4,-1,0,-1,0,0,0,0,0],
[-1,4,-1,0,-1,0,0,0,0],
[0,-1,4,0,0,-1,0,0,0],
[-1,0,0,4,-1,0,-1,0,0],
[0,-1,0,-1,4,-1,0,-1,0],
[0,0,-1,0,-1,4,0,0,-1],
[0,0,0,-1,0,0,4,-1,0],
[0,0,0,0,-1,0,-1,4,-1],
[0,0,0,0,0,-1,0,-1,4]])
b= np.array([0,0,100,0,0,100,200,200,300])
T = np.matmul(np.linalg.inv(A1),b)
print("Temprature at given points:",T)
Z = np.array([[0,0,0,0,50],
[0,T[0],T[1],T[2],100],
[0,T[3],T[4],T[5],100],
[0,T[6],T[7],T[8],100],
[100,200,200,200,150]])
plt.imshow(Z)
plt.colorbar(label="Temprature")
plt.title('Heat Map of temperatures at defined points')
plt.show()
#1-D wave equation
A=np.linalg.inv(np.array([[4,-1,0,0],
[-1,4,-1,0],
[0,-1,4,-1],
[0,0,-1,4]]))
Tj0= np.array([np.sin(0.4*np.pi),
np.sin(0.2*np.pi)+np.sin(0.6*np.pi),
np.sin(0.4*np.pi)+np.sin(0.8*np.pi),
np.sin(0.6*np.pi)])
Tu1=np.matmul(A,Tj0)
u11,u21,u31,u41=Tu1
print("\n"+"Tempratures at t= 0.04:",Tu1)
Tj1=np.array([u21,u31+u11,u41+u21,u31])
u12,u22,u32,u42=np.matmul(A,Tj1)
Tu2=np.array([u12,u22,u32,u42])
print("Tempratures at t= 0.08:",Tu2)
Tj2=np.array([u22,u12+u32,u22+u42,u32])
u13,u23,u33,u43=np.matmul(A,Tj2)
Tu3=np.array([u13,u23,u33,u43])
print("Tempratures at t= 0.12:",Tu3)
Tj3=np.array([u23,u13+u33,u23+u43,u33])
u14,u24,u34,u44=np.matmul(A,Tj3)
Tu4=np.array([u14,u24,u34,u44])
print("Tempratures at t= 0.16:",Tu4)
Tj4=np.array([u24,u14+u34,u24+u44,u34])
u15,u25,u35,u45=np.matmul(A,Tj4)
Tu5=np.array([u15,u25,u35,u45])
print("Tempratures at t= 0.20:",Tu5)
#The border points are =0 meaning that
#appending them on will give the correct
#result when plotted
u1=np.append([0],np.append(Tu1,[0]))
u2=np.append([0],np.append(Tu2,[0]))
u3=np.append([0],np.append(Tu3,[0]))
u4=np.append([0],np.append(Tu4,[0]))
u5=np.append([0],np.append(Tu5,[0]))
x=np.arange(0,1.2,0.2)
plt.plot(x,u1,label='t=0.04',c='k')
plt.scatter(x,u1,c='k')
plt.plot(x,u2,label='t=0.08',c='b')
plt.scatter(x,u2,c='b')
plt.plot(x,u3,label='t=0.12',c='g')
plt.scatter(x,u3,c='g')
plt.plot(x,u4,label='t=0.16',c='y')
plt.scatter(x,u4,c='y')
plt.plot(x,u5,label='t=0.20',c='c')
plt.scatter(x,u5,c='c')
plt.title("Temperature against distance with varying times")
plt.ylabel("Temprature")
plt.xlabel("Distance")
plt.legend(loc=0)
plt.show() | [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.plot",
"numpy.append",
"numpy.array",
"numpy.linalg.inv",
"numpy.matmul",
"matplotlib.pyplot.scatter",
"numpy.sin",
"matplotlib.pyp... | [((74, 382), 'numpy.array', 'np.array', (['[[4, -1, 0, -1, 0, 0, 0, 0, 0], [-1, 4, -1, 0, -1, 0, 0, 0, 0], [0, -1, 4, \n 0, 0, -1, 0, 0, 0], [-1, 0, 0, 4, -1, 0, -1, 0, 0], [0, -1, 0, -1, 4, -\n 1, 0, -1, 0], [0, 0, -1, 0, -1, 4, 0, 0, -1], [0, 0, 0, -1, 0, 0, 4, -1,\n 0], [0, 0, 0, 0, -1, 0, -1, 4, -1], [0, 0, 0, 0, 0, -1, 0, -1, 4]]'], {}), '([[4, -1, 0, -1, 0, 0, 0, 0, 0], [-1, 4, -1, 0, -1, 0, 0, 0, 0], [0,\n -1, 4, 0, 0, -1, 0, 0, 0], [-1, 0, 0, 4, -1, 0, -1, 0, 0], [0, -1, 0, -\n 1, 4, -1, 0, -1, 0], [0, 0, -1, 0, -1, 4, 0, 0, -1], [0, 0, 0, -1, 0, 0,\n 4, -1, 0], [0, 0, 0, 0, -1, 0, -1, 4, -1], [0, 0, 0, 0, 0, -1, 0, -1, 4]])\n', (82, 382), True, 'import numpy as np\n'), ((318, 365), 'numpy.array', 'np.array', (['[0, 0, 100, 0, 0, 100, 200, 200, 300]'], {}), '([0, 0, 100, 0, 0, 100, 200, 200, 300])\n', (326, 365), True, 'import numpy as np\n'), ((439, 583), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 50], [0, T[0], T[1], T[2], 100], [0, T[3], T[4], T[5], 100],\n [0, T[6], T[7], T[8], 100], [100, 200, 200, 200, 150]]'], {}), '([[0, 0, 0, 0, 50], [0, T[0], T[1], T[2], 100], [0, T[3], T[4], T[5\n ], 100], [0, T[6], T[7], T[8], 100], [100, 200, 200, 200, 150]])\n', (447, 583), True, 'import numpy as np\n'), ((568, 581), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Z'], {}), '(Z)\n', (578, 581), True, 'import matplotlib.pyplot as plt\n'), ((583, 615), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'label': '"""Temprature"""'}), "(label='Temprature')\n", (595, 615), True, 'import matplotlib.pyplot as plt\n'), ((617, 672), 'matplotlib.pyplot.title', 'plt.title', (['"""Heat Map of temperatures at defined points"""'], {}), "('Heat Map of temperatures at defined points')\n", (626, 672), True, 'import matplotlib.pyplot as plt\n'), ((674, 684), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (682, 684), True, 'import matplotlib.pyplot as plt\n'), ((928, 945), 'numpy.matmul', 'np.matmul', (['A', 'Tj0'], {}), '(A, Tj0)\n', (937, 945), True, 'import numpy as np\n'), ((1014, 1056), 'numpy.array', 'np.array', (['[u21, u31 + u11, u41 + u21, u31]'], {}), '([u21, u31 + u11, u41 + u21, u31])\n', (1022, 1056), True, 'import numpy as np\n'), ((1067, 1084), 'numpy.matmul', 'np.matmul', (['A', 'Tj1'], {}), '(A, Tj1)\n', (1076, 1084), True, 'import numpy as np\n'), ((1089, 1119), 'numpy.array', 'np.array', (['[u12, u22, u32, u42]'], {}), '([u12, u22, u32, u42])\n', (1097, 1119), True, 'import numpy as np\n'), ((1160, 1202), 'numpy.array', 'np.array', (['[u22, u12 + u32, u22 + u42, u32]'], {}), '([u22, u12 + u32, u22 + u42, u32])\n', (1168, 1202), True, 'import numpy as np\n'), ((1213, 1230), 'numpy.matmul', 'np.matmul', (['A', 'Tj2'], {}), '(A, Tj2)\n', (1222, 1230), True, 'import numpy as np\n'), ((1235, 1265), 'numpy.array', 'np.array', (['[u13, u23, u33, u43]'], {}), '([u13, u23, u33, u43])\n', (1243, 1265), True, 'import numpy as np\n'), ((1306, 1348), 'numpy.array', 'np.array', (['[u23, u13 + u33, u23 + u43, u33]'], {}), '([u23, u13 + u33, u23 + u43, u33])\n', (1314, 1348), True, 'import numpy as np\n'), ((1359, 1376), 'numpy.matmul', 'np.matmul', (['A', 'Tj3'], {}), '(A, Tj3)\n', (1368, 1376), True, 'import numpy as np\n'), ((1381, 1411), 'numpy.array', 'np.array', (['[u14, u24, u34, u44]'], {}), '([u14, u24, u34, u44])\n', (1389, 1411), True, 'import numpy as np\n'), ((1454, 1496), 'numpy.array', 'np.array', (['[u24, u14 + u34, u24 + u44, u34]'], {}), '([u24, u14 + u34, u24 + u44, u34])\n', (1462, 1496), True, 'import numpy as np\n'), ((1507, 1524), 'numpy.matmul', 'np.matmul', (['A', 'Tj4'], {}), '(A, Tj4)\n', (1516, 1524), True, 'import numpy as np\n'), ((1529, 1559), 'numpy.array', 'np.array', (['[u15, u25, u35, u45]'], {}), '([u15, u25, u35, u45])\n', (1537, 1559), True, 'import numpy as np\n'), ((1892, 1914), 'numpy.arange', 'np.arange', (['(0)', '(1.2)', '(0.2)'], {}), '(0, 1.2, 0.2)\n', (1901, 1914), True, 'import numpy as np\n'), ((1914, 1952), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'u1'], {'label': '"""t=0.04"""', 'c': '"""k"""'}), "(x, u1, label='t=0.04', c='k')\n", (1922, 1952), True, 'import matplotlib.pyplot as plt\n'), ((1951, 1976), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'u1'], {'c': '"""k"""'}), "(x, u1, c='k')\n", (1962, 1976), True, 'import matplotlib.pyplot as plt\n'), ((1976, 2014), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'u2'], {'label': '"""t=0.08"""', 'c': '"""b"""'}), "(x, u2, label='t=0.08', c='b')\n", (1984, 2014), True, 'import matplotlib.pyplot as plt\n'), ((2013, 2038), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'u2'], {'c': '"""b"""'}), "(x, u2, c='b')\n", (2024, 2038), True, 'import matplotlib.pyplot as plt\n'), ((2038, 2076), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'u3'], {'label': '"""t=0.12"""', 'c': '"""g"""'}), "(x, u3, label='t=0.12', c='g')\n", (2046, 2076), True, 'import matplotlib.pyplot as plt\n'), ((2075, 2100), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'u3'], {'c': '"""g"""'}), "(x, u3, c='g')\n", (2086, 2100), True, 'import matplotlib.pyplot as plt\n'), ((2100, 2138), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'u4'], {'label': '"""t=0.16"""', 'c': '"""y"""'}), "(x, u4, label='t=0.16', c='y')\n", (2108, 2138), True, 'import matplotlib.pyplot as plt\n'), ((2137, 2162), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'u4'], {'c': '"""y"""'}), "(x, u4, c='y')\n", (2148, 2162), True, 'import matplotlib.pyplot as plt\n'), ((2162, 2200), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'u5'], {'label': '"""t=0.20"""', 'c': '"""c"""'}), "(x, u5, label='t=0.20', c='c')\n", (2170, 2200), True, 'import matplotlib.pyplot as plt\n'), ((2199, 2224), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'u5'], {'c': '"""c"""'}), "(x, u5, c='c')\n", (2210, 2224), True, 'import matplotlib.pyplot as plt\n'), ((2224, 2284), 'matplotlib.pyplot.title', 'plt.title', (['"""Temperature against distance with varying times"""'], {}), "('Temperature against distance with varying times')\n", (2233, 2284), True, 'import matplotlib.pyplot as plt\n'), ((2286, 2310), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Temprature"""'], {}), "('Temprature')\n", (2296, 2310), True, 'import matplotlib.pyplot as plt\n'), ((2312, 2334), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Distance"""'], {}), "('Distance')\n", (2322, 2334), True, 'import matplotlib.pyplot as plt\n'), ((2336, 2353), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (2346, 2353), True, 'import matplotlib.pyplot as plt\n'), ((2355, 2365), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2363, 2365), True, 'import matplotlib.pyplot as plt\n'), ((373, 390), 'numpy.linalg.inv', 'np.linalg.inv', (['A1'], {}), '(A1)\n', (386, 390), True, 'import numpy as np\n'), ((722, 794), 'numpy.array', 'np.array', (['[[4, -1, 0, 0], [-1, 4, -1, 0], [0, -1, 4, -1], [0, 0, -1, 4]]'], {}), '([[4, -1, 0, 0], [-1, 4, -1, 0], [0, -1, 4, -1], [0, 0, -1, 4]])\n', (730, 794), True, 'import numpy as np\n'), ((1717, 1736), 'numpy.append', 'np.append', (['Tu1', '[0]'], {}), '(Tu1, [0])\n', (1726, 1736), True, 'import numpy as np\n'), ((1755, 1774), 'numpy.append', 'np.append', (['Tu2', '[0]'], {}), '(Tu2, [0])\n', (1764, 1774), True, 'import numpy as np\n'), ((1793, 1812), 'numpy.append', 'np.append', (['Tu3', '[0]'], {}), '(Tu3, [0])\n', (1802, 1812), True, 'import numpy as np\n'), ((1831, 1850), 'numpy.append', 'np.append', (['Tu4', '[0]'], {}), '(Tu4, [0])\n', (1840, 1850), True, 'import numpy as np\n'), ((1869, 1888), 'numpy.append', 'np.append', (['Tu5', '[0]'], {}), '(Tu5, [0])\n', (1878, 1888), True, 'import numpy as np\n'), ((806, 825), 'numpy.sin', 'np.sin', (['(0.4 * np.pi)'], {}), '(0.4 * np.pi)\n', (812, 825), True, 'import numpy as np\n'), ((903, 922), 'numpy.sin', 'np.sin', (['(0.6 * np.pi)'], {}), '(0.6 * np.pi)\n', (909, 922), True, 'import numpy as np\n'), ((827, 846), 'numpy.sin', 'np.sin', (['(0.2 * np.pi)'], {}), '(0.2 * np.pi)\n', (833, 846), True, 'import numpy as np\n'), ((845, 864), 'numpy.sin', 'np.sin', (['(0.6 * np.pi)'], {}), '(0.6 * np.pi)\n', (851, 864), True, 'import numpy as np\n'), ((865, 884), 'numpy.sin', 'np.sin', (['(0.4 * np.pi)'], {}), '(0.4 * np.pi)\n', (871, 884), True, 'import numpy as np\n'), ((883, 902), 'numpy.sin', 'np.sin', (['(0.8 * np.pi)'], {}), '(0.8 * np.pi)\n', (889, 902), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.applications import imagenet_utils
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from pathlib import Path
from sklearn.preprocessing import LabelEncoder
from utils import HDF5DatasetWriter
import random
import progressbar
import os
import PIL
import PIL.Image
print(tf.__version__)
'''
drawings:
---------> spiral
---------------> training
-------------------> healthy
-------------------> parkinson
---------------> testing
-------------------> healthy
-------------------> parkinson
---------> wave
---------------> training
-------------------> healthy
-------------------> parkinson
---------------> testing
-------------------> healthy
-------------------> parkinson
'''
'''
Extract features using ResNet50. Apply to all test and train images. Save in hdf5
'''
# make this args
data_dir = Path(r'D:\Docs\Python_code\ParkinsonsSketch\178338_401677_bundle_archive\drawings')
feature_out_dir = r'Features\wave_features.hdf5' #args['output']
print('[INFO] loading data...')
wave_train_images = list(data_dir.glob(r'wave/training/*/*.png'))
random.shuffle(wave_train_images) # returns in place
NUM_TRAIN_IMAGES = len(wave_train_images)
print(f'[INFO] number of training images: {NUM_TRAIN_IMAGES}')
wave_test_images = list(data_dir.glob(r'wave/testing/*/*.png'))
random.shuffle(wave_test_images)
NUM_TEST_IMAGES = len(wave_test_images)
print(f'[INFO] number of test images: {NUM_TEST_IMAGES}')
imagePaths = wave_train_images + wave_test_images
print(f'[INFO] total number of images: {len(imagePaths)}')
labels = [x.parent.stem for x in imagePaths]
le = LabelEncoder()
labels = le.fit_transform(labels)
print("[INFO] loading network...")
model = ResNet50(weights="imagenet", include_top = False)
# 2048 of 7 * 7 = 100352 filters as output of ResNet50 layer before FC
dataset = HDF5DatasetWriter((len(imagePaths), 2048 * 7 * 7),
feature_out_dir, dataKey="features",
bufSize=1000)
dataset.storeClassLabels(le.classes_)
test_image = load_img(imagePaths[0]) #PIL image instance
print(f'[INFO] Original image shape: {np.array(test_image).shape}')
# fig, axs = plt.subplots()
# plt.imshow(test_image)
# plt.title(le.inverse_transform([labels[0]]))
# plt.show()
widgets = ["Evaluating: ", progressbar.Percentage(), " ",
progressbar.Bar(), " ", progressbar.ETA()]
pbar = progressbar.ProgressBar(maxval=len(imagePaths),
widgets=widgets).start()
bs = 16
# loop in batches
for i in np.arange(0, len(imagePaths), bs):
batchPaths = imagePaths[i:i + bs]
batchLabels = labels[i:i + bs]
batchImages = []
# preprocess each image
for j, imagePath in enumerate(batchPaths):
image = load_img(imagePath, target_size=(224, 224), interpolation='bilinear')
image = img_to_array(image)
# expand dims and subtract mean RGB
image = np.expand_dims(image, axis=0)
image = imagenet_utils.preprocess_input(image)
if j==0 and i==0:
fig, axs = plt.subplots()
plt.imshow(image.reshape((224,224,3)).astype(np.uint8),clim=(0,255), interpolation=None)
plt.show()
batchImages.append(image)
batchImages = np.vstack(batchImages)
# extract features
features = model.predict(batchImages, batch_size=bs)
features = features.reshape((features.shape[0], 100352))
# then added to dataset
dataset.add(features, batchLabels)
pbar.update(i)
dataset.close()
pbar.finish()
| [
"tensorflow.keras.preprocessing.image.load_img",
"progressbar.Bar",
"sklearn.preprocessing.LabelEncoder",
"random.shuffle",
"matplotlib.pyplot.show",
"pathlib.Path",
"numpy.array",
"progressbar.Percentage",
"numpy.vstack",
"tensorflow.keras.applications.ResNet50",
"progressbar.ETA",
"numpy.exp... | [((1038, 1135), 'pathlib.Path', 'Path', (['"""D:\\\\Docs\\\\Python_code\\\\ParkinsonsSketch\\\\178338_401677_bundle_archive\\\\drawings"""'], {}), "(\n 'D:\\\\Docs\\\\Python_code\\\\ParkinsonsSketch\\\\178338_401677_bundle_archive\\\\drawings'\n )\n", (1042, 1135), False, 'from pathlib import Path\n'), ((1286, 1319), 'random.shuffle', 'random.shuffle', (['wave_train_images'], {}), '(wave_train_images)\n', (1300, 1319), False, 'import random\n'), ((1508, 1540), 'random.shuffle', 'random.shuffle', (['wave_test_images'], {}), '(wave_test_images)\n', (1522, 1540), False, 'import random\n'), ((1799, 1813), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1811, 1813), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1892, 1939), 'tensorflow.keras.applications.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (1900, 1939), False, 'from tensorflow.keras.applications import ResNet50\n'), ((2234, 2257), 'tensorflow.keras.preprocessing.image.load_img', 'load_img', (['imagePaths[0]'], {}), '(imagePaths[0])\n', (2242, 2257), False, 'from tensorflow.keras.preprocessing.image import load_img\n'), ((2487, 2511), 'progressbar.Percentage', 'progressbar.Percentage', ([], {}), '()\n', (2509, 2511), False, 'import progressbar\n'), ((2529, 2546), 'progressbar.Bar', 'progressbar.Bar', ([], {}), '()\n', (2544, 2546), False, 'import progressbar\n'), ((2553, 2570), 'progressbar.ETA', 'progressbar.ETA', ([], {}), '()\n', (2568, 2570), False, 'import progressbar\n'), ((3431, 3453), 'numpy.vstack', 'np.vstack', (['batchImages'], {}), '(batchImages)\n', (3440, 3453), True, 'import numpy as np\n'), ((2938, 3007), 'tensorflow.keras.preprocessing.image.load_img', 'load_img', (['imagePath'], {'target_size': '(224, 224)', 'interpolation': '"""bilinear"""'}), "(imagePath, target_size=(224, 224), interpolation='bilinear')\n", (2946, 3007), False, 'from tensorflow.keras.preprocessing.image import load_img\n'), ((3024, 3043), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['image'], {}), '(image)\n', (3036, 3043), False, 'from tensorflow.keras.preprocessing.image import img_to_array\n'), ((3105, 3134), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (3119, 3134), True, 'import numpy as np\n'), ((3151, 3189), 'tensorflow.keras.applications.imagenet_utils.preprocess_input', 'imagenet_utils.preprocess_input', (['image'], {}), '(image)\n', (3182, 3189), False, 'from tensorflow.keras.applications import imagenet_utils\n'), ((3239, 3253), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3251, 3253), True, 'import matplotlib.pyplot as plt\n'), ((3367, 3377), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3375, 3377), True, 'import matplotlib.pyplot as plt\n'), ((2316, 2336), 'numpy.array', 'np.array', (['test_image'], {}), '(test_image)\n', (2324, 2336), True, 'import numpy as np\n')] |
from operator import le
import os
import math
import warnings
warnings.filterwarnings('ignore', 'The iteration is not making good progress')
import numpy as np
np.set_printoptions(suppress=True)
import scipy
import scipy.stats
from scipy.stats import poisson, uniform, norm
from scipy.fftpack import fft, ifft
from scipy import optimize as opti
import scipy.special as special
from scipy.signal import convolve
from scipy.signal import savgol_filter
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import cm
from matplotlib import colors
from mpl_axes_aligner import align
import h5py
from scipy.interpolate import interp1d
from sklearn.linear_model import orthogonal_mp
from numba import njit
import warnings
warnings.filterwarnings('ignore')
matplotlib.use('pgf')
plt.style.use('default')
plt.rcParams['savefig.dpi'] = 100
plt.rcParams['figure.dpi'] = 100
plt.rcParams['font.size'] = 20
plt.rcParams['lines.markersize'] = 4.0
plt.rcParams['lines.linewidth'] = 2.0
# plt.rcParams['mathtext.fontset'] = 'cm'
plt.rcParams['text.usetex'] = True
plt.rcParams['pgf.texsystem'] = 'pdflatex'
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['pgf.preamble'] = r'\usepackage[detect-all,locale=DE]{siunitx}'
nshannon = 1
window = 1029
gmu = 160.
gsigma = 40.
std = 1.
p = [8., 0.5, 24.]
Thres = {'mcmc':std / gsigma, 'xiaopeip':0, 'lucyddm':0.2, 'fbmp':0, 'fftrans':0.1, 'findpeak':0.1, 'threshold':0, 'firstthres':0, 'omp':0}
d_history = [('TriggerNo', np.uint32), ('ChannelID', np.uint32), ('step', np.uint32), ('loc', np.float32)]
proposal = np.array((1, 1, 2)) / 4
def xiaopeip_old(wave, spe_pre, eta=0):
l = len(wave)
flag = 1
lowp = np.argwhere(wave > 5 * spe_pre['std']).flatten()
if len(lowp) != 0:
fitp = np.arange(lowp.min() - round(spe_pre['mar_l']), lowp.max() + round(spe_pre['mar_r']))
fitp = np.unique(np.clip(fitp, 0, len(wave)-1))
pet = lowp - spe_pre['peak_c']
pet = np.unique(np.clip(pet, 0, len(wave)-1))
if len(pet) != 0:
# cha, ped = xiaopeip_core(wave, spe_pre['spe'], fitp, pet, eta=eta)
cha = xiaopeip_core(wave[fitp], spe_pre['spe'], fitp, pet, eta=eta)
else:
flag = 0
else:
flag = 0
if flag == 0:
pet = np.array([np.argmax(wave[spe_pre['peak_c']:])])
cha = np.array([1])
# return pet, cha, ped
return pet, cha
def xiaopeip(wave, spe_pre, Tau, Sigma, Thres, p, eta=0):
'''
eta is the hyperparameter level of LASSO passed to xiaopeip_core.
'''
_, wave_r, tlist, _, _, _, left_wave, right_wave = initial_params(wave, spe_pre, Tau, Sigma, gmu, Thres, p, is_t0=False, is_delta=False, n=1)
fitp = np.arange(left_wave, right_wave)
# cha, ped = xiaopeip_core(wave_r, spe_pre['spe'], fitp, tlist.astype(int), eta=eta)
cha = xiaopeip_core(wave_r, spe_pre['spe'], fitp, tlist.astype(int), eta=eta)
return tlist, cha
# def xiaopeip_core(wave, spe, fitp, possible, eta=0):
# l = len(wave)
# spe = np.concatenate([spe, np.zeros(l - len(spe))])
# ans0 = np.zeros(len(possible)+1).astype(np.float64)
# ans0[-1] = wave.min()
# b = np.zeros((len(possible)+1, 2)).astype(np.float64)
# b[-1, 0] = -np.inf
# b[:, 1] = np.inf
# mne = spe[np.mod(fitp.reshape(len(fitp), 1) - possible.reshape(1, len(possible)), l)]
# ans = opti.fmin_l_bfgs_b(norm_fit, ans0, args=(mne, wave[fitp], eta), approx_grad=True, bounds=b, maxfun=500000)
# # ans = opti.fmin_slsqp(norm_fit, ans0, args=(mne, wave[fitp]), bounds=b, iprint=-1, iter=500000)
# # ans = opti.fmin_tnc(norm_fit, ans0, args=(mne, wave[fitp]), approx_grad=True, bounds=b, messages=0, maxfun=500000)
# pf = ans[0]
# return pf[:-1], pf[-1]
# def norm_fit(x, M, y, eta=0):
# return np.power(y - x[-1] - np.matmul(M, x[:-1]), 2).sum() + eta * x.sum()
def xiaopeip_core(wave_r, spe, fitp, possible, eta=0):
l = window
spe = np.concatenate([spe, np.zeros(l - len(spe))])
ans0 = np.ones(len(possible)).astype(np.float64)
b = np.zeros((len(possible), 2)).astype(np.float64)
b[:, 1] = np.inf
mne = spe[np.mod(fitp.reshape(len(fitp), 1) - possible.reshape(1, len(possible)), l)]
try:
ans = opti.fmin_l_bfgs_b(norm_fit, ans0, args=(mne, wave_r, eta), approx_grad=True, bounds=b, maxfun=500000)
except ValueError:
ans = [np.ones(len(possible)) * 0.2]
# ans = opti.fmin_slsqp(norm_fit, ans0, args=(mne, wave_r), bounds=b, iprint=-1, iter=500000)
# ans = opti.fmin_tnc(norm_fit, ans0, args=(mne, wave_r), approx_grad=True, bounds=b, messages=0, maxfun=500000)
return ans[0]
def norm_fit(x, M, y, eta=0):
return np.power(y - np.matmul(M, x), 2).sum() + eta * x.sum()
def lucyddm(waveform, spe_pre):
'''Lucy deconvolution
References
----------
.. [1] https://en.wikipedia.org/wiki/Richardson%E2%80%93Lucy_deconvolution
.. [2] https://github.com/scikit-image/scikit-image/blob/master/skimage/restoration/deconvolution.py#L329
'''
spe = np.append(np.zeros(len(spe_pre) - 1), np.abs(spe_pre))
waveform = np.clip(waveform, 1e-6, np.inf)
spe = np.clip(spe, 1e-6, np.inf)
waveform = waveform / gmu
wave_deconv = waveform.copy()
spe_mirror = spe[::-1]
while True:
relative_blur = waveform / np.convolve(wave_deconv, spe, mode='same')
new_wave_deconv = wave_deconv * np.convolve(relative_blur, spe_mirror, mode='same')
if np.max(np.abs(wave_deconv - new_wave_deconv)) < 1e-4:
break
else:
wave_deconv = new_wave_deconv
return np.arange(len(waveform)), wave_deconv
def omp(wave, A, tlist, factor):
coef = orthogonal_mp(A, wave[:, None])
return tlist, coef * factor
def waveformfft(wave, spe_pre):
w = savgol_filter(wave, 11, 2)
lowp = np.argwhere(w > 5 * spe_pre['std']).flatten()
if len(lowp) != 0:
left = np.clip(lowp.min() - round(2 * spe_pre['mar_l']), 0, len(wave) - 1)
right = np.clip(lowp.max() + round(2 * spe_pre['mar_r']), 0, len(wave) - 1)
pet = np.arange(left, right)
w = w[left:right]
length = len(w)
spefft = fft(spe_pre['spe'], 2*length)
wavef = fft(w, 2*length)
wavef[(length-round(length*0.8)):(length+round(length*0.8))] = 0
signalf = np.true_divide(wavef, spefft)
recon = np.real(ifft(signalf, 2*length))
cha = recon[:length]
cha = np.abs(cha / np.sum(cha) * np.abs(np.sum(wave)) / np.sum(spe_pre['spe']))
else:
pet = np.argmax(wave[spe_pre['peak_c']:]).flatten()
cha = np.array([1])
return pet, cha
def threshold(wave, spe_pre):
pet = np.argwhere(wave[spe_pre['peak_c']:] > spe_pre['std'] * 5).flatten()
cha = wave[spe_pre['peak_c']:][pet]
cha = cha / cha.sum() * np.abs(wave.sum()) / spe_pre['spe'].sum()
if len(pet) == 0:
pet = np.array([np.argmax(wave[spe_pre['peak_c']:])])
cha = np.array([1])
return pet, cha
def firstthres(wave, spe_pre):
pet = np.argwhere(wave[spe_pre['peak_c']:] > spe_pre['std'] * 5).flatten()
if len(pet) == 0:
pet = np.array([np.argmax(wave[spe_pre['peak_c']:])])
else:
pet = pet[:1]
cha = np.array([1])
return pet, cha
def findpeak(wave, spe_pre):
w = savgol_filter(wave, 11, 2)
dpta = np.where(np.diff(w, prepend=w[0]) > 0, 1, -1)
dpta = np.diff(dpta, prepend=dpta[0])
petr = np.argwhere((w > spe_pre['std'] * 5) & (dpta < 0)).flatten() - spe_pre['peak_c']
pet = petr[petr >= 0]
cha = wave[pet + spe_pre['peak_c']]
cha = cha / np.sum(cha) * np.abs(np.sum(wave)) / np.sum(spe_pre['spe'])
if len(pet) == 0:
pet = np.array([np.argmax(wave[spe_pre['peak_c']:])])
cha = np.array([1])
return pet, cha
def combine(A, cx, t):
'''
combine neighbouring dictionaries to represent sub-bin locations
'''
frac, ti = np.modf(t - 0.5)
ti = int(ti)
alpha = np.array((1 - frac, frac))
return alpha @ A[:, ti:(ti+2)].T, alpha @ cx[:, ti:(ti+2)].T
def move(A_vec, c_vec, z, step, mus, sig2s, A):
'''
A_vec: 行向量
c_vec: 行向量
step
====
1: 在 t 加一个 PE
-1: 在 t 减一个 PE
'''
fsig2s = step * sig2s
# Eq. (30) sig2s = 1 sigma^2 - 0 sigma^2
beta_under = (1 + fsig2s * np.dot(A_vec, c_vec))
beta = fsig2s / beta_under
# Eq. (31) # sign of mus[t] and sig2s[t] cancels
Δν = 0.5 * (beta * (z @ c_vec + mus / sig2s) ** 2 - mus ** 2 / fsig2s)
# sign of space factor in Eq. (31) is reversed. Because Eq. (82) is in the denominator.
Δν -= 0.5 * np.log(beta_under) # space
# accept, prepare for the next
# Eq. (33) istar is now n_pre. It crosses n_pre and n, thus is in vector form.
Δcx = -np.einsum('n,m,mp->np', beta * c_vec, c_vec, A, optimize=True)
# Eq. (34)
Δz = -step * A_vec * mus
return Δν, Δcx, Δz
def flow(cx, p1, z, N, sig2s, sig2w, mus, A, p_cha, mu_t):
'''
flow
====
连续时间游走
cx: Cov^-1 * A, 详见 FBMP
s: list of PE locations
mu_t: LucyDDM 的估算 PE 数
z: residue waveform
'''
# istar [0, 1) 之间的随机数,用于点中 PE
istar = np.random.rand(TRIALS)
# 同时可用于创生位置的选取
c_cha = np.cumsum(p_cha) # cha: charge; p_cha: pdf of LucyDDM charge (由 charge 引导的 PE 强度流先验)
home_s = np.interp(istar, xp=np.insert(c_cha, 0, 0), fp=np.arange(N+1)) # 根据 p_cha 采样得到的 PE 序列。供以后的产生过程使用。这两行是使用了 InverseCDF 算法进行的MC采样。
NPE0 = int(mu_t + 0.5) # mu_t: μ_total,LucyDDM 给出的 μ 猜测;NPE0 是 PE 序列初值 s_0 的 PE 数。
# t 的位置,取值为 [0, N)
s = list(np.interp((np.arange(NPE0) + 0.5) / NPE0, xp=np.insert(c_cha, 0, 0), fp=np.arange(N+1))) # MCMC 链的 PE configuration 初值 s0
ν = 0
for t in s: # 从空序列开始逐渐加 PE 以计算 s0 的 ν, cx, z
Δν, Δcx, Δz = move(*combine(A, cx, t), z, 1, mus, sig2s, A)
ν += Δν
cx += Δcx
z += Δz
# s 的记录方式:使用定长 compound array es_history 存储(存在 'loc' 里),但由于 s 实际上变长,每一个有相同 'step' 的 'loc' 属于一个 s,si 作为临时变量用于分割成不定长片段,每一段是一个 s。
si = 0
es_history = np.zeros(TRIALS * (NPE0 + 5) * N, dtype=d_history)
wander_s = np.random.normal(size=TRIALS)
# step: +1 创生一个 PE, -1 消灭一个 PE, +2 向左或向右移动
flip = np.random.choice((-1, 1, 2), TRIALS, p=proposal)
Δν_history = np.zeros(TRIALS) # list of Δν's
log_mu = np.log(mu_t) # 猜测的 Poisson 流强度
T_list = []
c_star_list = []
for i, (t, step, home, wander, accept) in enumerate(zip(istar, flip, home_s, wander_s, np.log(np.random.rand(TRIALS)))):
# 不设左右边界
NPE = len(s)
if NPE == 0:
step = 1 # 只能创生
accept += np.log(1 / proposal[1]) # 惩罚
elif NPE == 1 and step == -1:
# 1 -> 0: 行动后从 0 脱出的几率大,需要鼓励
accept -= np.log(1 / proposal[0])
if step == 1: # 创生
if home >= 0.5 and home <= N - 0.5:
Δν, Δcx, Δz = move(*combine(A, cx, home), z, 1, mus, sig2s, A)
Δν += log_mu - np.log(NPE + 1)
if Δν >= accept:
s.append(home)
else: # p(w|s) 无定义
Δν = -np.inf
else:
op = int(t * NPE) # 操作的 PE 编号
loc = s[op] # 待操作 PE 的位置
Δν, Δcx, Δz = move(*combine(A, cx, loc), z, -1, mus, sig2s, A)
if step == -1: # 消灭
Δν -= log_mu - np.log(NPE)
if Δν >= accept:
del s[op]
elif step == 2: # 移动
nloc = loc + wander # 待操作 PE 的新位置
if nloc >= 0.5 and nloc <= N - 0.5: # p(w|s) 无定义
Δν1, Δcx1, Δz1 = move(*combine(A, cx + Δcx, nloc), z + Δz, 1, mus, sig2s, A)
Δν += Δν1
Δν += np.log(p_cha[int(nloc)]) - np.log(p_cha[int(loc)])
if Δν >= accept:
s[op] = nloc
Δcx += Δcx1
Δz += Δz1
else: # p(w|s) 无定义
Δν = -np.inf
if Δν >= accept:
cx += Δcx
z += Δz
si1 = si + len(s)
es_history[si:si1]['step'] = i
es_history[si:si1]['loc'] = s
si = si1
else: # reject proposal
Δν = 0
step = 0
T_list.append(np.sort(np.digitize(s, bins=np.arange(N)) - 1))
t, c = np.unique(T_list[-1], return_counts=True)
c_star = np.zeros(N, dtype=int)
c_star[t] = c
c_star_list.append(c_star)
Δν_history[i] = Δν
flip[i] = step
return flip, [Δν_history, ν], es_history[:si1], c_star_list, T_list
def metropolis_fbmp(y, A, sig2w, sig2s, mus, p1, p_cha, mu_t):
'''
p1: prior probability for each bin.
sig2w: variance of white noise.
sig2s: variance of signal x_i.
mus: mean of signal x_i.
'''
# Only for multi-gaussian with arithmetic sequence of mu and sigma
M, N = A.shape
# nu_root: nu for all s_n=0.
nu_root = -0.5 * np.linalg.norm(y) ** 2 / sig2w - 0.5 * M * np.log(2 * np.pi)
nu_root -= 0.5 * M * np.log(sig2w)
nu_root += poisson.logpmf(0, p1).sum()
# Eq. (29)
cx_root = A / sig2w
# mu = 0 => (y - A * mu -> z)
z = y.copy()
# Metropolis flow
flip, Δν_history, es_history, c_star_list, T_list = flow(cx_root, p1, z, N, sig2s, sig2w, mus, A, p_cha, mu_t)
num = len(T_list)
c_star = np.vstack(c_star_list)
nu_star = np.cumsum(Δν_history[0]) + nu_root + Δν_history[1]
burn = num // 5
nu_star = nu_star[burn:]
T_list = T_list[burn:]
c_star = c_star[burn:, :]
flip[np.abs(flip) == 2] = 0 # 平移不改变 PE 数
NPE_evo = np.cumsum(np.insert(flip, 0, int(mu_t + 0.5)))[burn:]
es_history = es_history[es_history['step'] >= burn]
return nu_star, T_list, c_star, es_history, NPE_evo
def nu_direct(y, A, nx, mus, sig2s, sig2w, la):
M, N = A.shape
Phi_s = Phi(y, A, nx, mus, sig2s, sig2w)
z = y - np.dot(A, (mus * nx))
invPhi = np.linalg.inv(Phi_s)
nu = -0.5 * np.matmul(np.matmul(z, invPhi), z) - 0.5 * M * np.log(2 * np.pi)
nu -= 0.5 * np.log(np.linalg.det(Phi_s))
nu = nu + poisson.logpmf(nx, mu=la).sum()
return nu
def Phi(y, A, nx, mus, sig2s, sig2w):
M, N = A.shape
return np.matmul(np.matmul(A, np.diagflat(sig2s * nx)), A.T) + np.eye(M) * sig2w
def elbo(nu_star_prior):
q = np.exp(nu_star_prior - nu_star_prior.max()) / np.sum(np.exp(nu_star_prior - nu_star_prior.max()))
e = np.sum(q * nu_star_prior) - np.sum(q * np.log(q))
# e_star = special.logsumexp(nu_star_prior)
# assert abs(e_star - e) < 1e-4
return e
@njit(nogil=True, cache=True)
def unique_with_indices(values):
unq = np.unique(values)
idx = np.zeros_like(unq, dtype=np.int_)
idx[0] = 0
i = 0
for j in range(1, len(values)):
if values[j] != unq[i]:
i += 1
idx[i] = j
return unq, idx
@njit(nogil=True, cache=True)
def group_by_sorted_count_sum(idx, a):
unique_idx, idx_of_idx = unique_with_indices(idx)
counts = np.zeros_like(unique_idx, dtype=np.int_)
sums = np.zeros_like(unique_idx, dtype=np.float64)
for i in range(0, len(idx_of_idx)):
start = idx_of_idx[i]
if i < len(idx_of_idx) - 1:
end = idx_of_idx[i + 1]
else:
end = len(idx)
counts[i] = end - start
sums[i] = np.sum(a[start:end])
return unique_idx, counts, sums
@njit(nogil=True, cache=True)
def jit_logsumexp(values, b):
a_max = np.max(values)
s = np.sum(b * np.exp(values - a_max))
return np.log(s) + a_max
@njit(nogil=True, cache=True)
def group_by_logsumexp(idx, a, b):
unique_idx, idx_of_idx = unique_with_indices(idx)
res = np.zeros_like(unique_idx, dtype=np.float64)
for i in range(0, len(idx_of_idx)):
start = idx_of_idx[i]
if i < len(idx_of_idx) - 1:
end = idx_of_idx[i + 1]
else:
end = len(idx)
res[i] = jit_logsumexp(a[start:end], b[start:end])
return unique_idx, res
def jit_agg_NPE(step, f, size):
step, NPE, f_vec = group_by_sorted_count_sum(step, f)
f_vec_merged = np.zeros(
len(step),
dtype=np.dtype([("NPE", np.int_), ("f_vec", np.float64), ("repeat", np.int_)]),
)
f_vec_merged["NPE"] = NPE
f_vec_merged["f_vec"] = f_vec
f_vec_merged["repeat"] = np.diff(np.append(step, int(size)))
f_vec_merged = np.sort(f_vec_merged, order="NPE")
indices, NPE_vec = group_by_logsumexp(
f_vec_merged["NPE"], f_vec_merged["f_vec"], f_vec_merged["repeat"]
)
return indices, NPE_vec
def rss_alpha(alpha, outputs, inputs, mnecpu):
r = np.power(alpha * np.matmul(mnecpu, outputs) - inputs, 2).sum()
return r
def shannon_interpolation(w, n):
t = np.arange(0, len(w), 1 / n)
l = np.arange(len(w))
y = np.sum(np.sinc(t[:, None] - l) * w, axis=1)
return y
def read_model(spe_path, n=1):
with h5py.File(spe_path, 'r', libver='latest', swmr=True) as speFile:
cid = speFile['SinglePE'].attrs['ChannelID']
epulse = speFile['SinglePE'].attrs['Epulse']
spe = speFile['SinglePE'].attrs['SpePositive']
std = speFile['SinglePE'].attrs['Std']
if 'parameters' in list(speFile['SinglePE'].attrs.keys()):
p = speFile['SinglePE'].attrs['parameters']
else:
p = [None,] * len(spe)
spe_pre = {}
fig = plt.figure()
# fig.tight_layout()
gs = gridspec.GridSpec(1, 1, figure=fig, left=0.1, right=0.85, top=0.95, bottom=0.15, wspace=0.4, hspace=0.5)
ax = fig.add_subplot(gs[0, 0])
for i in range(len(spe)):
peak_c = np.argmax(spe[i])
ft = interp1d(np.arange(0, len(spe[i]) - peak_c), 0.1 - spe[i][peak_c:])
t = opti.fsolve(ft, x0=np.argwhere(spe[i][peak_c:] < 0.1).flatten()[0])[0] + peak_c
fl = interp1d(np.arange(0, peak_c), spe[i][:peak_c] - 5 * std[i])
mar_l = opti.fsolve(fl, x0=np.sum(spe[i][:peak_c] < 5 * std[i]))[0]
fr = interp1d(np.arange(0, len(spe[i]) - peak_c), 5 * std[i] - spe[i][peak_c:])
mar_r = t - (opti.fsolve(fr, x0=np.sum(spe[i][peak_c:] > 5 * std[i]))[0] + peak_c)
# mar_l = 0
# mar_r = 0
ax.plot(spe[i])
spe_pre_i = {'spe':interp1d(np.arange(len(spe[i])), spe[i])(np.arange(0, len(spe[i]) - 1, 1 / n)), 'epulse':epulse, 'peak_c':peak_c * n, 'mar_l':mar_l * n, 'mar_r':mar_r * n, 'std':std[i], 'parameters':p[i]}
spe_pre.update({cid[i]:spe_pre_i})
ax.grid()
ax.set_xlabel(r'$\mathrm{Time}/\si{ns}$')
ax.set_ylabel(r'$\mathrm{Voltage}/\si{mV}$')
# fig.savefig('Note/figures/pmtspe.pdf')
plt.close()
return spe_pre
def clip(pet, cha, thres):
if len(pet[cha > thres]) == 0:
pet = np.array([pet[np.argmax(cha)]])
cha = np.array([1])
else:
pet = pet[cha > thres]
cha = cha[cha > thres]
return pet, cha
def glow(n, tau):
return np.random.exponential(tau, size=n)
def transit(n, sigma):
return np.random.normal(0, sigma, size=n)
def time(n, tau, sigma):
if tau == 0:
return np.sort(transit(n, sigma))
elif sigma == 0:
return np.sort(glow(n, tau))
else:
return np.sort(glow(n, tau) + transit(n, sigma))
def convolve_exp_norm(x, tau, sigma):
if tau == 0.0:
y = norm.pdf(x, loc=0, scale=sigma)
elif sigma == 0.0:
y = np.where(x >= 0.0, 1.0 / tau * np.exp(-x / tau), 0.0)
else:
alpha = 1 / tau
co = alpha / 2.0 * np.exp(alpha * alpha * sigma * sigma / 2.0)
x_erf = (alpha * sigma * sigma - x) / (np.sqrt(2.) * sigma)
y = co * (1.0 - special.erf(x_erf)) * np.exp(-alpha * x)
return y
def log_convolve_exp_norm(x, tau, sigma):
if tau == 0.0:
y = norm.logpdf(x, loc=0, scale=sigma)
elif sigma == 0.0:
y = np.where(x >= 0.0, -np.log(tau) - x / tau, -np.inf)
else:
alpha = 1.0 / tau
co = -np.log(2.0 * tau) + alpha * alpha * sigma * sigma / 2.0
x_erf = (alpha * sigma * sigma - x) / (np.sqrt(2.0) * sigma)
y = co + np.log(1.0 - special.erf(x_erf)) - alpha * x
return np.clip(y, np.log(np.finfo(np.float64).tiny), np.inf)
def spe(t, tau, sigma, A, gmu=gmu, window=window):
return A * np.exp(-1 / 2 * (np.log(t / tau) / sigma) ** 2)
# return np.where(t == 0, gmu, 0)
# return np.ones_like(t) / window * gmu
def charge(n, gmu, gsigma, thres=0):
chargesam = norm.ppf(1 - uniform.rvs(scale=1-norm.cdf(thres, loc=gmu, scale=gsigma), size=n), loc=gmu, scale=gsigma)
return chargesam
def probcharhitt(t0, hitt, probcharge, Tau, Sigma, npe):
prob = np.where(npe >= 0, probcharge * np.power(convolve_exp_norm(hitt - t0, Tau, Sigma), npe), 0)
prob = np.sum(prob, axis=1) / np.sum(probcharge, axis=1)
return prob
def npeprobcharge(charge, npe, gmu, gsigma, s0):
scale = np.where(npe != 0, gsigma * np.sqrt(npe), gsigma * np.sqrt(s0))
prob = np.where(npe >= 0, norm.pdf(charge, loc=gmu * npe, scale=scale) / (1 - norm.cdf(0, loc=gmu * npe, scale=scale)), 0)
return prob
def likelihoodt0(hitt, char, gmu, Tau, Sigma, mode='charge', is_delta=False):
b = [0., 600.]
tlist = np.arange(b[0], b[1] + 1e-6, 0.2)
if mode == 'charge':
logL = lambda t0 : -1 * np.sum(np.log(np.clip(convolve_exp_norm(hitt - t0, Tau, Sigma), np.finfo(np.float64).tiny, np.inf)) * char / gmu)
elif mode == 'all':
logL = lambda t0 : -1 * np.sum(np.log(np.clip(convolve_exp_norm(hitt - t0, Tau, Sigma), np.finfo(np.float64).tiny, np.inf)))
logLv_tlist = np.vectorize(logL)(tlist)
t0 = opti.fmin_l_bfgs_b(logL, x0=[tlist[np.argmin(logLv_tlist)]], approx_grad=True, bounds=[b], maxfun=500000)[0]
t0delta = None
if is_delta:
logLvdelta = np.vectorize(lambda t : np.abs(logL(t) - logL(t0) - 0.5))
t0delta = abs(opti.fmin_l_bfgs_b(logLvdelta, x0=[tlist[np.argmin(np.abs(logLv_tlist - logL(t0) - 0.5))]], approx_grad=True, bounds=[b], maxfun=500000)[0] - t0)
return t0, t0delta
def initial_params(wave, spe_pre, Tau, Sigma, gmu, Thres, p, nsp=4, nstd=3, is_t0=False, is_delta=False, n=1):
hitt_r, char_r = lucyddm(wave, spe_pre['spe'])
hitt_r, char_r = clip(hitt_r, char_r, Thres)
hitt = np.arange(hitt_r.min(), hitt_r.max() + 1)
hitt[np.isin(hitt, hitt_r)] = hitt_r
char = np.zeros(len(hitt))
char[np.isin(hitt, hitt_r)] = char_r
char = char / char.sum() * np.clip(np.abs(wave.sum()), 1e-6, np.inf)
tlist = np.unique(np.clip(np.hstack(hitt[:, None] + np.arange(-nsp, nsp+1)), 0, len(wave) - 1))
index_prom = np.hstack([np.argwhere(savgol_filter(wave, 11, 4) > nstd * spe_pre['std']).flatten(), hitt])
left_wave = np.clip(round(min(index_prom.min(), tlist.min()) - 3 * spe_pre['mar_l']), 0, len(wave) - 1)
right_wave = np.clip(round(max(index_prom.max(), tlist.max()) + 3 * spe_pre['mar_r']), 0, len(wave) - 1)
wave = wave[left_wave:right_wave]
npe_init = np.zeros(len(tlist))
npe_init[np.isin(tlist, hitt)] = char / gmu
npe_init = np.repeat(npe_init, n) / n
tlist = np.unique(np.sort(np.hstack(tlist[:, None] + np.linspace(0, 1, n, endpoint=False) - (n // 2) / n)))
if len(tlist) != 1:
assert abs(np.diff(tlist).min() - 1 / n) < 1e-3, 'tlist anomalous'
t_auto = np.arange(left_wave, right_wave)[:, None] - tlist
A = spe((t_auto + np.abs(t_auto)) / 2, p[0], p[1], p[2])
t0_init = None
t0_init_delta = None
if is_t0:
t0_init, t0_init_delta = likelihoodt0(hitt=hitt, char=char, gmu=gmu, Tau=Tau, Sigma=Sigma, mode='charge', is_delta=is_delta)
return A, wave, tlist, t0_init, t0_init_delta, npe_init, left_wave, right_wave
def stdrmoutlier(array, r):
arrayrmoutlier = array[np.abs(array - np.mean(array)) < r * np.std(array, ddof=-1)]
std = np.std(arrayrmoutlier, ddof=-1)
return std, len(arrayrmoutlier)
def demo(pet_sub, cha_sub, tth, spe_pre, window, wave, cid, p, full=False, fold='Note/figures', ext='.pgf'):
penum = len(tth)
print('PEnum is {}'.format(penum))
pan = np.arange(window)
pet_ans_0 = tth['HitPosInWindow']
cha_ans = tth['Charge'] / spe_pre['spe'].sum()
pet_ans = np.unique(pet_ans_0)
cha_ans = np.array([np.sum(cha_ans[pet_ans_0 == i]) for i in pet_ans])
ylabel = r'$\mathrm{Charge}$'
distd = '(W/ns,C/mV*ns)'
distl = 'cdiff'
edist = (cha_sub.sum() - cha_ans.sum()) * spe_pre['spe'].sum()
print('truth HitPosInWindow = {}, Weight = {}'.format(pet_ans, cha_ans))
wav_ans = np.sum([np.where(pan > pet_ans[j], spe(pan - pet_ans[j], tau=p[0], sigma=p[1], A=p[2]) * cha_ans[j], 0) for j in range(len(pet_ans))], axis=0)
print('truth RSS = {}'.format(np.power(wave - wav_ans, 2).sum()))
print('HitPosInWindow = {}, Weight = {}'.format(pet_sub, cha_sub))
wdist = scipy.stats.wasserstein_distance(pet_ans, pet_sub, u_weights=cha_ans, v_weights=cha_sub)
print('wdist = {}, '.format(wdist) + distl + ' = {}'.format(edist))
wav_sub = np.sum([np.where(pan > pet_sub[j], spe(pan - pet_sub[j], tau=p[0], sigma=p[1], A=p[2]) * cha_sub[j], 0) for j in range(len(pet_sub))], axis=0)
print('RSS = {}'.format(np.power(wav_ans - wav_sub, 2).sum()))
fig = plt.figure(figsize=(10, 10))
fig.tight_layout()
ax0 = fig.add_axes((.1, .2, .85, .3))
ax0.plot(pan, wave, c='b', label='noisy waveform')
ax0.plot(pan, wav_ans, c='k', label='true waveform')
ax0.plot(pan, wav_sub, c='g', label='reconstructed waveform')
ax0.set_ylabel('$\mathrm{Voltage}/\si{mV}$')
ax0.hlines(5 * spe_pre['std'], 0, window, color='c', label='threshold')
ax0.set_xticklabels([])
ax0.set_ylim(min(wave)-5, max(wave)+5)
ax0.legend(loc=1)
ax0.grid()
if full:
ax0.set_xlim(0, window)
else:
ax0.set_xlim(max(pet_ans.min()-50, 0), min(pet_ans.max()+150, window))
ax1 = fig.add_axes((.1, .5, .85, .2))
ax1.vlines(pet_ans, 0, cha_ans, color='k', label='true charge')
ax1.set_ylabel(ylabel)
ax1.set_xticklabels([])
ax1.set_xlim(ax0.get_xlim())
ax1.set_ylim(0, max(max(cha_ans), max(cha_sub))*1.1)
ax1.set_yticks(np.arange(0, max(max(cha_ans), max(cha_sub)), 0.2))
ax1.legend(loc=1)
ax1.grid()
ax2 = fig.add_axes((.1, .7, .85, .2))
ax2.vlines(pet_sub, 0, cha_sub, color='g', label='reconstructed charge')
ax2.set_ylabel(ylabel)
ax2.set_xticklabels([])
ax2.set_xlim(ax0.get_xlim())
ax2.set_ylim(0, max(max(cha_ans), max(cha_sub))*1.1)
ax2.set_yticks(np.arange(0, max(max(cha_ans), max(cha_sub)), 0.2))
ax2.legend(loc=1)
ax2.grid()
ax3 = fig.add_axes((.1, .1, .85, .1))
ax3.scatter(pan, wav_sub - wave, c='k', label='residuals', marker='.')
ax3.set_xlabel('$\mathrm{t}/\si{ns}$')
ax3.set_ylabel('$\mathrm{Voltage}/\si{mV}$')
ax3.set_xlim(ax0.get_xlim())
dh = int((max(np.abs(wav_sub - wave))//5+1)*5)
ax3.set_yticks(np.linspace(-dh, dh, int(2*dh//5+1)))
ax3.legend(loc=1)
ax3.grid()
if ext != '.pgf':
fig.suptitle('eid={},cid={},'.format(tth['TriggerNo'][0], tth['PMTId'][0])+distd+'-dist={:.02f},{:.02f}'.format(wdist, edist), y=0.95)
fig.savefig(fold + '/demoe{}c{}'.format(tth['TriggerNo'][0], tth['PMTId'][0]) + ext)
fig.savefig(fold + '/demoe{}c{}'.format(tth['TriggerNo'][0], tth['PMTId'][0]) + '.pdf')
fig.clf()
plt.close(fig)
| [
"numpy.clip",
"numpy.convolve",
"numpy.sqrt",
"numpy.random.rand",
"numpy.log",
"scipy.signal.savgol_filter",
"numpy.random.exponential",
"numpy.isin",
"numpy.array",
"numpy.einsum",
"scipy.fftpack.fft",
"scipy.stats.norm.logpdf",
"numpy.linalg.norm",
"scipy.stats.norm.cdf",
"numpy.arang... | [((62, 140), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""', '"""The iteration is not making good progress"""'], {}), "('ignore', 'The iteration is not making good progress')\n", (85, 140), False, 'import warnings\n'), ((161, 195), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (180, 195), True, 'import numpy as np\n'), ((768, 801), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (791, 801), False, 'import warnings\n'), ((803, 824), 'matplotlib.use', 'matplotlib.use', (['"""pgf"""'], {}), "('pgf')\n", (817, 824), False, 'import matplotlib\n'), ((825, 849), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""default"""'], {}), "('default')\n", (838, 849), True, 'import matplotlib.pyplot as plt\n'), ((14566, 14594), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'cache': '(True)'}), '(nogil=True, cache=True)\n', (14570, 14594), False, 'from numba import njit\n'), ((14857, 14885), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'cache': '(True)'}), '(nogil=True, cache=True)\n', (14861, 14885), False, 'from numba import njit\n'), ((15380, 15408), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'cache': '(True)'}), '(nogil=True, cache=True)\n', (15384, 15408), False, 'from numba import njit\n'), ((15540, 15568), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'cache': '(True)'}), '(nogil=True, cache=True)\n', (15544, 15568), False, 'from numba import njit\n'), ((1603, 1622), 'numpy.array', 'np.array', (['(1, 1, 2)'], {}), '((1, 1, 2))\n', (1611, 1622), True, 'import numpy as np\n'), ((2738, 2770), 'numpy.arange', 'np.arange', (['left_wave', 'right_wave'], {}), '(left_wave, right_wave)\n', (2747, 2770), True, 'import numpy as np\n'), ((5128, 5160), 'numpy.clip', 'np.clip', (['waveform', '(1e-06)', 'np.inf'], {}), '(waveform, 1e-06, np.inf)\n', (5135, 5160), True, 'import numpy as np\n'), ((5170, 5197), 'numpy.clip', 'np.clip', (['spe', '(1e-06)', 'np.inf'], {}), '(spe, 1e-06, np.inf)\n', (5177, 5197), True, 'import numpy as np\n'), ((5707, 5738), 'sklearn.linear_model.orthogonal_mp', 'orthogonal_mp', (['A', 'wave[:, None]'], {}), '(A, wave[:, None])\n', (5720, 5738), False, 'from sklearn.linear_model import orthogonal_mp\n'), ((5812, 5838), 'scipy.signal.savgol_filter', 'savgol_filter', (['wave', '(11)', '(2)'], {}), '(wave, 11, 2)\n', (5825, 5838), False, 'from scipy.signal import savgol_filter\n'), ((7247, 7260), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (7255, 7260), True, 'import numpy as np\n'), ((7319, 7345), 'scipy.signal.savgol_filter', 'savgol_filter', (['wave', '(11)', '(2)'], {}), '(wave, 11, 2)\n', (7332, 7345), False, 'from scipy.signal import savgol_filter\n'), ((7414, 7444), 'numpy.diff', 'np.diff', (['dpta'], {'prepend': 'dpta[0]'}), '(dpta, prepend=dpta[0])\n', (7421, 7444), True, 'import numpy as np\n'), ((7935, 7951), 'numpy.modf', 'np.modf', (['(t - 0.5)'], {}), '(t - 0.5)\n', (7942, 7951), True, 'import numpy as np\n'), ((7981, 8007), 'numpy.array', 'np.array', (['(1 - frac, frac)'], {}), '((1 - frac, frac))\n', (7989, 8007), True, 'import numpy as np\n'), ((9163, 9185), 'numpy.random.rand', 'np.random.rand', (['TRIALS'], {}), '(TRIALS)\n', (9177, 9185), True, 'import numpy as np\n'), ((9217, 9233), 'numpy.cumsum', 'np.cumsum', (['p_cha'], {}), '(p_cha)\n', (9226, 9233), True, 'import numpy as np\n'), ((10026, 10076), 'numpy.zeros', 'np.zeros', (['(TRIALS * (NPE0 + 5) * N)'], {'dtype': 'd_history'}), '(TRIALS * (NPE0 + 5) * N, dtype=d_history)\n', (10034, 10076), True, 'import numpy as np\n'), ((10093, 10122), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'TRIALS'}), '(size=TRIALS)\n', (10109, 10122), True, 'import numpy as np\n'), ((10182, 10230), 'numpy.random.choice', 'np.random.choice', (['(-1, 1, 2)', 'TRIALS'], {'p': 'proposal'}), '((-1, 1, 2), TRIALS, p=proposal)\n', (10198, 10230), True, 'import numpy as np\n'), ((10250, 10266), 'numpy.zeros', 'np.zeros', (['TRIALS'], {}), '(TRIALS)\n', (10258, 10266), True, 'import numpy as np\n'), ((10293, 10305), 'numpy.log', 'np.log', (['mu_t'], {}), '(mu_t)\n', (10299, 10305), True, 'import numpy as np\n'), ((13346, 13368), 'numpy.vstack', 'np.vstack', (['c_star_list'], {}), '(c_star_list)\n', (13355, 13368), True, 'import numpy as np\n'), ((13927, 13947), 'numpy.linalg.inv', 'np.linalg.inv', (['Phi_s'], {}), '(Phi_s)\n', (13940, 13947), True, 'import numpy as np\n'), ((14638, 14655), 'numpy.unique', 'np.unique', (['values'], {}), '(values)\n', (14647, 14655), True, 'import numpy as np\n'), ((14666, 14699), 'numpy.zeros_like', 'np.zeros_like', (['unq'], {'dtype': 'np.int_'}), '(unq, dtype=np.int_)\n', (14679, 14699), True, 'import numpy as np\n'), ((14992, 15032), 'numpy.zeros_like', 'np.zeros_like', (['unique_idx'], {'dtype': 'np.int_'}), '(unique_idx, dtype=np.int_)\n', (15005, 15032), True, 'import numpy as np\n'), ((15044, 15087), 'numpy.zeros_like', 'np.zeros_like', (['unique_idx'], {'dtype': 'np.float64'}), '(unique_idx, dtype=np.float64)\n', (15057, 15087), True, 'import numpy as np\n'), ((15451, 15465), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (15457, 15465), True, 'import numpy as np\n'), ((15668, 15711), 'numpy.zeros_like', 'np.zeros_like', (['unique_idx'], {'dtype': 'np.float64'}), '(unique_idx, dtype=np.float64)\n', (15681, 15711), True, 'import numpy as np\n'), ((16364, 16398), 'numpy.sort', 'np.sort', (['f_vec_merged'], {'order': '"""NPE"""'}), "(f_vec_merged, order='NPE')\n", (16371, 16398), True, 'import numpy as np\n'), ((18976, 19010), 'numpy.random.exponential', 'np.random.exponential', (['tau'], {'size': 'n'}), '(tau, size=n)\n', (18997, 19010), True, 'import numpy as np\n'), ((19046, 19080), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma'], {'size': 'n'}), '(0, sigma, size=n)\n', (19062, 19080), True, 'import numpy as np\n'), ((21225, 21259), 'numpy.arange', 'np.arange', (['b[0]', '(b[1] + 1e-06)', '(0.2)'], {}), '(b[0], b[1] + 1e-06, 0.2)\n', (21234, 21259), True, 'import numpy as np\n'), ((23836, 23867), 'numpy.std', 'np.std', (['arrayrmoutlier'], {'ddof': '(-1)'}), '(arrayrmoutlier, ddof=-1)\n', (23842, 23867), True, 'import numpy as np\n'), ((24084, 24101), 'numpy.arange', 'np.arange', (['window'], {}), '(window)\n', (24093, 24101), True, 'import numpy as np\n'), ((24205, 24225), 'numpy.unique', 'np.unique', (['pet_ans_0'], {}), '(pet_ans_0)\n', (24214, 24225), True, 'import numpy as np\n'), ((24838, 24930), 'scipy.stats.wasserstein_distance', 'scipy.stats.wasserstein_distance', (['pet_ans', 'pet_sub'], {'u_weights': 'cha_ans', 'v_weights': 'cha_sub'}), '(pet_ans, pet_sub, u_weights=cha_ans,\n v_weights=cha_sub)\n', (24870, 24930), False, 'import scipy\n'), ((25234, 25262), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (25244, 25262), True, 'import matplotlib.pyplot as plt\n'), ((27359, 27373), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (27368, 27373), True, 'import matplotlib.pyplot as plt\n'), ((2375, 2388), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (2383, 2388), True, 'import numpy as np\n'), ((4261, 4368), 'scipy.optimize.fmin_l_bfgs_b', 'opti.fmin_l_bfgs_b', (['norm_fit', 'ans0'], {'args': '(mne, wave_r, eta)', 'approx_grad': '(True)', 'bounds': 'b', 'maxfun': '(500000)'}), '(norm_fit, ans0, args=(mne, wave_r, eta), approx_grad=\n True, bounds=b, maxfun=500000)\n', (4279, 4368), True, 'from scipy import optimize as opti\n'), ((5096, 5111), 'numpy.abs', 'np.abs', (['spe_pre'], {}), '(spe_pre)\n', (5102, 5111), True, 'import numpy as np\n'), ((6100, 6122), 'numpy.arange', 'np.arange', (['left', 'right'], {}), '(left, right)\n', (6109, 6122), True, 'import numpy as np\n'), ((6190, 6221), 'scipy.fftpack.fft', 'fft', (["spe_pre['spe']", '(2 * length)'], {}), "(spe_pre['spe'], 2 * length)\n", (6193, 6221), False, 'from scipy.fftpack import fft, ifft\n'), ((6236, 6254), 'scipy.fftpack.fft', 'fft', (['w', '(2 * length)'], {}), '(w, 2 * length)\n', (6239, 6254), False, 'from scipy.fftpack import fft, ifft\n'), ((6344, 6373), 'numpy.true_divide', 'np.true_divide', (['wavef', 'spefft'], {}), '(wavef, spefft)\n', (6358, 6373), True, 'import numpy as np\n'), ((6624, 6637), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (6632, 6637), True, 'import numpy as np\n'), ((6976, 6989), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (6984, 6989), True, 'import numpy as np\n'), ((7656, 7678), 'numpy.sum', 'np.sum', (["spe_pre['spe']"], {}), "(spe_pre['spe'])\n", (7662, 7678), True, 'import numpy as np\n'), ((7777, 7790), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (7785, 7790), True, 'import numpy as np\n'), ((8619, 8637), 'numpy.log', 'np.log', (['beta_under'], {}), '(beta_under)\n', (8625, 8637), True, 'import numpy as np\n'), ((8775, 8837), 'numpy.einsum', 'np.einsum', (['"""n,m,mp->np"""', '(beta * c_vec)', 'c_vec', 'A'], {'optimize': '(True)'}), "('n,m,mp->np', beta * c_vec, c_vec, A, optimize=True)\n", (8784, 8837), True, 'import numpy as np\n'), ((12312, 12353), 'numpy.unique', 'np.unique', (['T_list[-1]'], {'return_counts': '(True)'}), '(T_list[-1], return_counts=True)\n', (12321, 12353), True, 'import numpy as np\n'), ((12371, 12393), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'int'}), '(N, dtype=int)\n', (12379, 12393), True, 'import numpy as np\n'), ((13025, 13038), 'numpy.log', 'np.log', (['sig2w'], {}), '(sig2w)\n', (13031, 13038), True, 'import numpy as np\n'), ((13892, 13911), 'numpy.dot', 'np.dot', (['A', '(mus * nx)'], {}), '(A, mus * nx)\n', (13898, 13911), True, 'import numpy as np\n'), ((14417, 14442), 'numpy.sum', 'np.sum', (['(q * nu_star_prior)'], {}), '(q * nu_star_prior)\n', (14423, 14442), True, 'import numpy as np\n'), ((15321, 15341), 'numpy.sum', 'np.sum', (['a[start:end]'], {}), '(a[start:end])\n', (15327, 15341), True, 'import numpy as np\n'), ((15520, 15529), 'numpy.log', 'np.log', (['s'], {}), '(s)\n', (15526, 15529), True, 'import numpy as np\n'), ((16887, 16939), 'h5py.File', 'h5py.File', (['spe_path', '"""r"""'], {'libver': '"""latest"""', 'swmr': '(True)'}), "(spe_path, 'r', libver='latest', swmr=True)\n", (16896, 16939), False, 'import h5py\n'), ((17367, 17379), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (17377, 17379), True, 'import matplotlib.pyplot as plt\n'), ((17422, 17531), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(1)'], {'figure': 'fig', 'left': '(0.1)', 'right': '(0.85)', 'top': '(0.95)', 'bottom': '(0.15)', 'wspace': '(0.4)', 'hspace': '(0.5)'}), '(1, 1, figure=fig, left=0.1, right=0.85, top=0.95, bottom=\n 0.15, wspace=0.4, hspace=0.5)\n', (17439, 17531), True, 'import matplotlib.gridspec as gridspec\n'), ((18686, 18697), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (18695, 18697), True, 'import matplotlib.pyplot as plt\n'), ((18840, 18853), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (18848, 18853), True, 'import numpy as np\n'), ((19361, 19392), 'scipy.stats.norm.pdf', 'norm.pdf', (['x'], {'loc': '(0)', 'scale': 'sigma'}), '(x, loc=0, scale=sigma)\n', (19369, 19392), False, 'from scipy.stats import poisson, uniform, norm\n'), ((19807, 19841), 'scipy.stats.norm.logpdf', 'norm.logpdf', (['x'], {'loc': '(0)', 'scale': 'sigma'}), '(x, loc=0, scale=sigma)\n', (19818, 19841), False, 'from scipy.stats import poisson, uniform, norm\n'), ((20780, 20800), 'numpy.sum', 'np.sum', (['prob'], {'axis': '(1)'}), '(prob, axis=1)\n', (20786, 20800), True, 'import numpy as np\n'), ((20803, 20829), 'numpy.sum', 'np.sum', (['probcharge'], {'axis': '(1)'}), '(probcharge, axis=1)\n', (20809, 20829), True, 'import numpy as np\n'), ((21605, 21623), 'numpy.vectorize', 'np.vectorize', (['logL'], {}), '(logL)\n', (21617, 21623), True, 'import numpy as np\n'), ((22329, 22350), 'numpy.isin', 'np.isin', (['hitt', 'hitt_r'], {}), '(hitt, hitt_r)\n', (22336, 22350), True, 'import numpy as np\n'), ((22401, 22422), 'numpy.isin', 'np.isin', (['hitt', 'hitt_r'], {}), '(hitt, hitt_r)\n', (22408, 22422), True, 'import numpy as np\n'), ((23022, 23042), 'numpy.isin', 'np.isin', (['tlist', 'hitt'], {}), '(tlist, hitt)\n', (23029, 23042), True, 'import numpy as np\n'), ((23072, 23094), 'numpy.repeat', 'np.repeat', (['npe_init', 'n'], {}), '(npe_init, n)\n', (23081, 23094), True, 'import numpy as np\n'), ((1710, 1748), 'numpy.argwhere', 'np.argwhere', (["(wave > 5 * spe_pre['std'])"], {}), "(wave > 5 * spe_pre['std'])\n", (1721, 1748), True, 'import numpy as np\n'), ((5339, 5381), 'numpy.convolve', 'np.convolve', (['wave_deconv', 'spe'], {'mode': '"""same"""'}), "(wave_deconv, spe, mode='same')\n", (5350, 5381), True, 'import numpy as np\n'), ((5422, 5473), 'numpy.convolve', 'np.convolve', (['relative_blur', 'spe_mirror'], {'mode': '"""same"""'}), "(relative_blur, spe_mirror, mode='same')\n", (5433, 5473), True, 'import numpy as np\n'), ((5850, 5885), 'numpy.argwhere', 'np.argwhere', (["(w > 5 * spe_pre['std'])"], {}), "(w > 5 * spe_pre['std'])\n", (5861, 5885), True, 'import numpy as np\n'), ((6398, 6423), 'scipy.fftpack.ifft', 'ifft', (['signalf', '(2 * length)'], {}), '(signalf, 2 * length)\n', (6402, 6423), False, 'from scipy.fftpack import fft, ifft\n'), ((6699, 6757), 'numpy.argwhere', 'np.argwhere', (["(wave[spe_pre['peak_c']:] > spe_pre['std'] * 5)"], {}), "(wave[spe_pre['peak_c']:] > spe_pre['std'] * 5)\n", (6710, 6757), True, 'import numpy as np\n'), ((7052, 7110), 'numpy.argwhere', 'np.argwhere', (["(wave[spe_pre['peak_c']:] > spe_pre['std'] * 5)"], {}), "(wave[spe_pre['peak_c']:] > spe_pre['std'] * 5)\n", (7063, 7110), True, 'import numpy as np\n'), ((7366, 7390), 'numpy.diff', 'np.diff', (['w'], {'prepend': 'w[0]'}), '(w, prepend=w[0])\n', (7373, 7390), True, 'import numpy as np\n'), ((8326, 8346), 'numpy.dot', 'np.dot', (['A_vec', 'c_vec'], {}), '(A_vec, c_vec)\n', (8332, 8346), True, 'import numpy as np\n'), ((9335, 9357), 'numpy.insert', 'np.insert', (['c_cha', '(0)', '(0)'], {}), '(c_cha, 0, 0)\n', (9344, 9357), True, 'import numpy as np\n'), ((9362, 9378), 'numpy.arange', 'np.arange', (['(N + 1)'], {}), '(N + 1)\n', (9371, 9378), True, 'import numpy as np\n'), ((10596, 10619), 'numpy.log', 'np.log', (['(1 / proposal[1])'], {}), '(1 / proposal[1])\n', (10602, 10619), True, 'import numpy as np\n'), ((12982, 12999), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (12988, 12999), True, 'import numpy as np\n'), ((13054, 13075), 'scipy.stats.poisson.logpmf', 'poisson.logpmf', (['(0)', 'p1'], {}), '(0, p1)\n', (13068, 13075), False, 'from scipy.stats import poisson, uniform, norm\n'), ((13383, 13407), 'numpy.cumsum', 'np.cumsum', (['Δν_history[0]'], {}), '(Δν_history[0])\n', (13392, 13407), True, 'import numpy as np\n'), ((13550, 13562), 'numpy.abs', 'np.abs', (['flip'], {}), '(flip)\n', (13556, 13562), True, 'import numpy as np\n'), ((14011, 14028), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (14017, 14028), True, 'import numpy as np\n'), ((14052, 14072), 'numpy.linalg.det', 'np.linalg.det', (['Phi_s'], {}), '(Phi_s)\n', (14065, 14072), True, 'import numpy as np\n'), ((14259, 14268), 'numpy.eye', 'np.eye', (['M'], {}), '(M)\n', (14265, 14268), True, 'import numpy as np\n'), ((15485, 15507), 'numpy.exp', 'np.exp', (['(values - a_max)'], {}), '(values - a_max)\n', (15491, 15507), True, 'import numpy as np\n'), ((16135, 16207), 'numpy.dtype', 'np.dtype', (["[('NPE', np.int_), ('f_vec', np.float64), ('repeat', np.int_)]"], {}), "([('NPE', np.int_), ('f_vec', np.float64), ('repeat', np.int_)])\n", (16143, 16207), True, 'import numpy as np\n'), ((16796, 16819), 'numpy.sinc', 'np.sinc', (['(t[:, None] - l)'], {}), '(t[:, None] - l)\n', (16803, 16819), True, 'import numpy as np\n'), ((17621, 17638), 'numpy.argmax', 'np.argmax', (['spe[i]'], {}), '(spe[i])\n', (17630, 17638), True, 'import numpy as np\n'), ((20936, 20948), 'numpy.sqrt', 'np.sqrt', (['npe'], {}), '(npe)\n', (20943, 20948), True, 'import numpy as np\n'), ((20959, 20970), 'numpy.sqrt', 'np.sqrt', (['s0'], {}), '(s0)\n', (20966, 20970), True, 'import numpy as np\n'), ((21002, 21046), 'scipy.stats.norm.pdf', 'norm.pdf', (['charge'], {'loc': '(gmu * npe)', 'scale': 'scale'}), '(charge, loc=gmu * npe, scale=scale)\n', (21010, 21046), False, 'from scipy.stats import poisson, uniform, norm\n'), ((23323, 23355), 'numpy.arange', 'np.arange', (['left_wave', 'right_wave'], {}), '(left_wave, right_wave)\n', (23332, 23355), True, 'import numpy as np\n'), ((24250, 24281), 'numpy.sum', 'np.sum', (['cha_ans[pet_ans_0 == i]'], {}), '(cha_ans[pet_ans_0 == i])\n', (24256, 24281), True, 'import numpy as np\n'), ((2323, 2358), 'numpy.argmax', 'np.argmax', (["wave[spe_pre['peak_c']:]"], {}), "(wave[spe_pre['peak_c']:])\n", (2332, 2358), True, 'import numpy as np\n'), ((5492, 5529), 'numpy.abs', 'np.abs', (['(wave_deconv - new_wave_deconv)'], {}), '(wave_deconv - new_wave_deconv)\n', (5498, 5529), True, 'import numpy as np\n'), ((6516, 6538), 'numpy.sum', 'np.sum', (["spe_pre['spe']"], {}), "(spe_pre['spe'])\n", (6522, 6538), True, 'import numpy as np\n'), ((6564, 6599), 'numpy.argmax', 'np.argmax', (["wave[spe_pre['peak_c']:]"], {}), "(wave[spe_pre['peak_c']:])\n", (6573, 6599), True, 'import numpy as np\n'), ((6924, 6959), 'numpy.argmax', 'np.argmax', (["wave[spe_pre['peak_c']:]"], {}), "(wave[spe_pre['peak_c']:])\n", (6933, 6959), True, 'import numpy as np\n'), ((7167, 7202), 'numpy.argmax', 'np.argmax', (["wave[spe_pre['peak_c']:]"], {}), "(wave[spe_pre['peak_c']:])\n", (7176, 7202), True, 'import numpy as np\n'), ((7456, 7506), 'numpy.argwhere', 'np.argwhere', (["((w > spe_pre['std'] * 5) & (dpta < 0))"], {}), "((w > spe_pre['std'] * 5) & (dpta < 0))\n", (7467, 7506), True, 'import numpy as np\n'), ((7619, 7630), 'numpy.sum', 'np.sum', (['cha'], {}), '(cha)\n', (7625, 7630), True, 'import numpy as np\n'), ((7640, 7652), 'numpy.sum', 'np.sum', (['wave'], {}), '(wave)\n', (7646, 7652), True, 'import numpy as np\n'), ((7725, 7760), 'numpy.argmax', 'np.argmax', (["wave[spe_pre['peak_c']:]"], {}), "(wave[spe_pre['peak_c']:])\n", (7734, 7760), True, 'import numpy as np\n'), ((9611, 9633), 'numpy.insert', 'np.insert', (['c_cha', '(0)', '(0)'], {}), '(c_cha, 0, 0)\n', (9620, 9633), True, 'import numpy as np\n'), ((9638, 9654), 'numpy.arange', 'np.arange', (['(N + 1)'], {}), '(N + 1)\n', (9647, 9654), True, 'import numpy as np\n'), ((10460, 10482), 'numpy.random.rand', 'np.random.rand', (['TRIALS'], {}), '(TRIALS)\n', (10474, 10482), True, 'import numpy as np\n'), ((10726, 10749), 'numpy.log', 'np.log', (['(1 / proposal[0])'], {}), '(1 / proposal[0])\n', (10732, 10749), True, 'import numpy as np\n'), ((13974, 13994), 'numpy.matmul', 'np.matmul', (['z', 'invPhi'], {}), '(z, invPhi)\n', (13983, 13994), True, 'import numpy as np\n'), ((14088, 14113), 'scipy.stats.poisson.logpmf', 'poisson.logpmf', (['nx'], {'mu': 'la'}), '(nx, mu=la)\n', (14102, 14113), False, 'from scipy.stats import poisson, uniform, norm\n'), ((14226, 14249), 'numpy.diagflat', 'np.diagflat', (['(sig2s * nx)'], {}), '(sig2s * nx)\n', (14237, 14249), True, 'import numpy as np\n'), ((14456, 14465), 'numpy.log', 'np.log', (['q'], {}), '(q)\n', (14462, 14465), True, 'import numpy as np\n'), ((17846, 17866), 'numpy.arange', 'np.arange', (['(0)', 'peak_c'], {}), '(0, peak_c)\n', (17855, 17866), True, 'import numpy as np\n'), ((19543, 19586), 'numpy.exp', 'np.exp', (['(alpha * alpha * sigma * sigma / 2.0)'], {}), '(alpha * alpha * sigma * sigma / 2.0)\n', (19549, 19586), True, 'import numpy as np\n'), ((19701, 19719), 'numpy.exp', 'np.exp', (['(-alpha * x)'], {}), '(-alpha * x)\n', (19707, 19719), True, 'import numpy as np\n'), ((20195, 20215), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (20203, 20215), True, 'import numpy as np\n'), ((21054, 21093), 'scipy.stats.norm.cdf', 'norm.cdf', (['(0)'], {'loc': '(gmu * npe)', 'scale': 'scale'}), '(0, loc=gmu * npe, scale=scale)\n', (21062, 21093), False, 'from scipy.stats import poisson, uniform, norm\n'), ((23395, 23409), 'numpy.abs', 'np.abs', (['t_auto'], {}), '(t_auto)\n', (23401, 23409), True, 'import numpy as np\n'), ((23802, 23824), 'numpy.std', 'np.std', (['array'], {'ddof': '(-1)'}), '(array, ddof=-1)\n', (23808, 23824), True, 'import numpy as np\n'), ((9577, 9592), 'numpy.arange', 'np.arange', (['NPE0'], {}), '(NPE0)\n', (9586, 9592), True, 'import numpy as np\n'), ((10939, 10954), 'numpy.log', 'np.log', (['(NPE + 1)'], {}), '(NPE + 1)\n', (10945, 10954), True, 'import numpy as np\n'), ((11314, 11325), 'numpy.log', 'np.log', (['NPE'], {}), '(NPE)\n', (11320, 11325), True, 'import numpy as np\n'), ((12939, 12956), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {}), '(y)\n', (12953, 12956), True, 'import numpy as np\n'), ((18808, 18822), 'numpy.argmax', 'np.argmax', (['cha'], {}), '(cha)\n', (18817, 18822), True, 'import numpy as np\n'), ((19459, 19475), 'numpy.exp', 'np.exp', (['(-x / tau)'], {}), '(-x / tau)\n', (19465, 19475), True, 'import numpy as np\n'), ((19634, 19646), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (19641, 19646), True, 'import numpy as np\n'), ((19979, 19996), 'numpy.log', 'np.log', (['(2.0 * tau)'], {}), '(2.0 * tau)\n', (19985, 19996), True, 'import numpy as np\n'), ((20082, 20094), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (20089, 20094), True, 'import numpy as np\n'), ((22562, 22586), 'numpy.arange', 'np.arange', (['(-nsp)', '(nsp + 1)'], {}), '(-nsp, nsp + 1)\n', (22571, 22586), True, 'import numpy as np\n'), ((23780, 23794), 'numpy.mean', 'np.mean', (['array'], {}), '(array)\n', (23787, 23794), True, 'import numpy as np\n'), ((24719, 24746), 'numpy.power', 'np.power', (['(wave - wav_ans)', '(2)'], {}), '(wave - wav_ans, 2)\n', (24727, 24746), True, 'import numpy as np\n'), ((25184, 25214), 'numpy.power', 'np.power', (['(wav_ans - wav_sub)', '(2)'], {}), '(wav_ans - wav_sub, 2)\n', (25192, 25214), True, 'import numpy as np\n'), ((4720, 4735), 'numpy.matmul', 'np.matmul', (['M', 'x'], {}), '(M, x)\n', (4729, 4735), True, 'import numpy as np\n'), ((6479, 6490), 'numpy.sum', 'np.sum', (['cha'], {}), '(cha)\n', (6485, 6490), True, 'import numpy as np\n'), ((6500, 6512), 'numpy.sum', 'np.sum', (['wave'], {}), '(wave)\n', (6506, 6512), True, 'import numpy as np\n'), ((16626, 16652), 'numpy.matmul', 'np.matmul', (['mnecpu', 'outputs'], {}), '(mnecpu, outputs)\n', (16635, 16652), True, 'import numpy as np\n'), ((17937, 17973), 'numpy.sum', 'np.sum', (['(spe[i][:peak_c] < 5 * std[i])'], {}), '(spe[i][:peak_c] < 5 * std[i])\n', (17943, 17973), True, 'import numpy as np\n'), ((19679, 19697), 'scipy.special.erf', 'special.erf', (['x_erf'], {}), '(x_erf)\n', (19690, 19697), True, 'import scipy.special as special\n'), ((19897, 19908), 'numpy.log', 'np.log', (['tau'], {}), '(tau)\n', (19903, 19908), True, 'import numpy as np\n'), ((20315, 20330), 'numpy.log', 'np.log', (['(t / tau)'], {}), '(t / tau)\n', (20321, 20330), True, 'import numpy as np\n'), ((20515, 20553), 'scipy.stats.norm.cdf', 'norm.cdf', (['thres'], {'loc': 'gmu', 'scale': 'gsigma'}), '(thres, loc=gmu, scale=gsigma)\n', (20523, 20553), False, 'from scipy.stats import poisson, uniform, norm\n'), ((21675, 21697), 'numpy.argmin', 'np.argmin', (['logLv_tlist'], {}), '(logLv_tlist)\n', (21684, 21697), True, 'import numpy as np\n'), ((23156, 23192), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {'endpoint': '(False)'}), '(0, 1, n, endpoint=False)\n', (23167, 23192), True, 'import numpy as np\n'), ((26868, 26890), 'numpy.abs', 'np.abs', (['(wav_sub - wave)'], {}), '(wav_sub - wave)\n', (26874, 26890), True, 'import numpy as np\n'), ((12277, 12289), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (12286, 12289), True, 'import numpy as np\n'), ((20134, 20152), 'scipy.special.erf', 'special.erf', (['x_erf'], {}), '(x_erf)\n', (20145, 20152), True, 'import scipy.special as special\n'), ((22647, 22673), 'scipy.signal.savgol_filter', 'savgol_filter', (['wave', '(11)', '(4)'], {}), '(wave, 11, 4)\n', (22660, 22673), False, 'from scipy.signal import savgol_filter\n'), ((23254, 23268), 'numpy.diff', 'np.diff', (['tlist'], {}), '(tlist)\n', (23261, 23268), True, 'import numpy as np\n'), ((18114, 18150), 'numpy.sum', 'np.sum', (['(spe[i][peak_c:] > 5 * std[i])'], {}), '(spe[i][peak_c:] > 5 * std[i])\n', (18120, 18150), True, 'import numpy as np\n'), ((21550, 21570), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (21558, 21570), True, 'import numpy as np\n'), ((17759, 17793), 'numpy.argwhere', 'np.argwhere', (['(spe[i][peak_c:] < 0.1)'], {}), '(spe[i][peak_c:] < 0.1)\n', (17770, 17793), True, 'import numpy as np\n'), ((21380, 21400), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (21388, 21400), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import sys
import hashlib
import io
import os
from . import glob_var
from . import structures
from . import type_conversions
def decompress_motifs_from_bitstring(bitstring):
motifs_list = []
total_length = len(bitstring)
current_spot = 0
while current_spot < total_length:
stem_length = bitstring[current_spot]
loop_length = bitstring[current_spot + 1]
full_length = stem_length + loop_length
curr_sequence = np.frombuffer(bitstring[current_spot + 2 : current_spot + 2 + full_length], dtype=np.uint8)
curr_structure = np.frombuffer(bitstring[current_spot + 2 + full_length :
current_spot + 2 + 2*full_length], dtype=np.uint8)
md5_checksum = bitstring[current_spot + 2 + 2*full_length: current_spot + 2 + 2*full_length + 16]
current_motif = structures.w_motif(stem_length, loop_length)
current_motif.sequence = curr_sequence
current_motif.structure = curr_structure
current_motif.adjust_linear_length()
current_motif.compress()
# current_motif.print_sequence()
# current_motif.print_structure()
assert(md5_checksum == current_motif.md5)
motifs_list.append(current_motif)
current_spot += 2 + 2*full_length + 16
return motifs_list
def read_rna_bin_file(inp_file):
with open(inp_file, 'rb') as rb:
bitstring = rb.read()
seq_objects_dict, seq_objects_order = decompress_named_sequences(bitstring)
return seq_objects_dict, seq_objects_order
def read_motif_file(inp_file):
with open(inp_file, 'rb') as rf:
full_bitstring = rf.read()
motifs_list = decompress_motifs_from_bitstring(full_bitstring)
return motifs_list
def read_fasta(infile, do_print = False, how_often_print = 1000):
tr_dict_loc = {}
seqs_order = []
with open(infile, 'r') as f:
split_string = f.read().split('>')
for ind, entry in enumerate(split_string):
if entry == '':
continue
seq_start = entry.find('\n')
annotation = entry[:seq_start]
sequence_string = entry[seq_start + 1:].replace('\n', '')
current_sequence = structures.w_sequence(len(sequence_string))
current_sequence.from_sequence(sequence_string)
tr_dict_loc[annotation] = current_sequence
seqs_order.append(annotation)
if ind % how_often_print == 0:
if do_print:
print("Read sequence number ", ind)
return tr_dict_loc, seqs_order
def read_bin_sequences(rna_bin_filename):
seqs_dict, seqs_order = read_rna_bin_file(rna_bin_filename)
w_seqs_list = [seqs_dict[name] for name in seqs_order]
n_seqs_list = type_conversions.w_to_n_sequences_list(w_seqs_list)
return n_seqs_list
def compress_named_sequences(seq_objects_dict, seqs_order,
do_print=False, how_often_print=1000):
seq_batch_byte_list = []
for ind, name in enumerate(seqs_order):
current_byte_string = b''
full_length_name = name.ljust(glob_var.MAX_SEQ_NAME_LENGTH)
name_in_bytes = full_length_name.encode('utf-8')
assert(len(name_in_bytes) == glob_var.MAX_SEQ_NAME_LENGTH)
current_byte_string += name_in_bytes
seq_objects_dict[name].compress()
current_byte_string += seq_objects_dict[name].bytestring
seq_batch_byte_list.append(current_byte_string)
if ind % how_often_print == 0:
if do_print:
print("Compressed sequence number ", ind)
seq_batch_byte_string = b''.join(seq_batch_byte_list)
return seq_batch_byte_string
def decompress_named_sequences(bitstring,
do_print=False, how_often_print=10000):
seq_objects_dict = {}
seq_objects_order = []
total_length = len(bitstring)
current_spot = 0
counter = 0
while current_spot < total_length:
name_in_bytes = bitstring[current_spot : current_spot + glob_var.MAX_SEQ_NAME_LENGTH]
length_bitstring = bitstring[current_spot + glob_var.MAX_SEQ_NAME_LENGTH :
current_spot + glob_var.MAX_SEQ_NAME_LENGTH + 4]
seq_length_np = np.frombuffer(length_bitstring, dtype=np.uint32)
seq_length = seq_length_np[0]
sequence_bitstring = bitstring[current_spot + glob_var.MAX_SEQ_NAME_LENGTH + 4 :
current_spot + glob_var.MAX_SEQ_NAME_LENGTH + 4 + seq_length]
md5_bitstring = bitstring[current_spot + glob_var.MAX_SEQ_NAME_LENGTH + 4 + seq_length :
current_spot + glob_var.MAX_SEQ_NAME_LENGTH + 4 + seq_length + 16]
current_spot += glob_var.MAX_SEQ_NAME_LENGTH + 4 + seq_length + 16
full_length_name = name_in_bytes.decode('utf-8')
name = full_length_name.rstrip()
current_sequence = structures.w_sequence(seq_length)
current_sequence.nts = np.frombuffer(sequence_bitstring, dtype = np.uint8)
current_sequence.compress()
assert (md5_bitstring == current_sequence.md5)
seq_objects_dict[name] = current_sequence
seq_objects_order.append(name)
counter += 1
if counter % how_often_print == 0:
if do_print:
print("Compressed sequence number ", counter)
return seq_objects_dict, seq_objects_order
def write_named_seq_to_fasta(seq_objects_dict, seq_objects_order):
strings_list = []
for ind, name in enumerate(seq_objects_order):
seq_string = seq_objects_dict[name].print_sequence(return_string = True)
strings_list.append(">%s\n%s\n" % (name, seq_string))
full_string = ''.join(strings_list)
return full_string
def decompress_profiles(bitstring,
do_print=False, how_often_print=10000):
profiles_list = []
total_length = len(bitstring)
current_spot = 0
counter = 0
while current_spot < total_length:
# get the length of the profile
length_bitstring = bitstring[current_spot : current_spot + 4]
profile_length_np = np.frombuffer(length_bitstring, dtype=np.uint32)
profile_length = profile_length_np[0]
# figure out how long is the profile packed into bits
# if profile length // 8 > 0, it will take one additional byte
if profile_length % 8 != 0:
length_packed = (profile_length // 8) + 1
else:
length_packed = profile_length // 8
values_bitstring = bitstring[current_spot + 4 : current_spot + 4 + length_packed]
md5_bitstring = bitstring[current_spot + 4 + length_packed :
current_spot + 4 + length_packed + 16]
current_spot += 4 + length_packed + 16
values_packed_bits = np.frombuffer(values_bitstring, dtype=np.uint8)
values = np.unpackbits(values_packed_bits)
values = values[0 : profile_length]
current_profile = structures.w_profile(profile_length)
current_profile.values = values
current_profile.compress()
assert (md5_bitstring == current_profile.md5)
profiles_list.append(current_profile.values)
counter += 1
if counter % how_often_print == 0:
if do_print:
print("Decompressed profile number ", counter)
profiles_array = np.array(profiles_list, dtype=np.bool)
return profiles_array
def decompress_profiles_indices(bitstring,
do_print=False, how_often_print=10000):
profiles_list = []
total_length = len(bitstring)
current_spot = 0
counter = 0
while current_spot < total_length:
# get the length of the profile
length_bitstring = bitstring[current_spot : current_spot + 4]
profile_length_np = np.frombuffer(length_bitstring, dtype=np.uint32)
length = profile_length_np[0]
# get the number of indices (of True) of the profile
N_indices_bitstring = bitstring[current_spot + 4 : current_spot + 8]
N_indices_np = np.frombuffer(N_indices_bitstring, dtype=np.uint32)
N_indices = N_indices_np[0]
# get the number of bits used per index (compression width)
width_bitstring = bitstring[current_spot + 8 : current_spot + 12]
width_np = np.frombuffer(width_bitstring, dtype=np.uint32)
width = width_np[0]
# figure out how many bytes do we need to read out
length_packed = N_indices * width
if length_packed % 8 != 0:
length_packed = (length_packed // 8) + 1
else:
length_packed = length_packed // 8
# read out bitstring of the proper size
values_bitstring = bitstring[current_spot + 12 : current_spot + 12 + length_packed]
md5_bitstring = bitstring[current_spot + 12 + length_packed :
current_spot + 12 + length_packed + 16]
current_spot += 12 + length_packed + 16
# convert bitsting to 32-bit arrays representing indices
indices_packed_uint8 = np.frombuffer(values_bitstring, dtype=np.uint8)
binary_bytes_array = np.unpackbits(indices_packed_uint8)
binary_bytes_array = binary_bytes_array[0 : N_indices * width]
reshaped_binary_array = binary_bytes_array.reshape(N_indices, width)
full_binary_array = np.zeros((N_indices, 32), dtype=np.bool)
full_binary_array[:, 0:width] = reshaped_binary_array
# convert 32-bit arrays into a uint32 indices
reshaped_full_binary_array = full_binary_array.flatten()
reshaped_full_binary_string = np.packbits(reshaped_full_binary_array)
true_indices = np.frombuffer(reshaped_full_binary_string, dtype=np.uint32)
# create a new profile
curr_profile = structures.w_profile(length)
curr_profile.values[true_indices] = True
curr_profile.compress_indices()
assert (md5_bitstring == curr_profile.md5_indices)
profiles_list.append(curr_profile.values)
counter += 1
if counter % how_often_print == 0:
if do_print:
print("Decompressed profile number ", counter)
profiles_array = np.array(profiles_list, dtype=np.bool)
return profiles_array
def decompress_exp_mask_file(bitstring):
length_bitstring = bitstring[0 : 4]
mask_length_np = np.frombuffer(length_bitstring, dtype=np.uint32)
mask_length = mask_length_np[0]
index_bitstring = bitstring[4 : 4 + mask_length] # dtype = np.bool
values_bitstring = bitstring[4 + mask_length : 4 + mask_length + mask_length*4] # dtype=np.float32
index_array = np.frombuffer(index_bitstring, dtype=np.bool)
values_array = np.frombuffer(values_bitstring, dtype=np.float32)
return index_array, values_array
def unpack_mask_file(exp_mask_file, do_print=False):
with open(exp_mask_file, 'rb') as rf:
bitstring = rf.read()
index_array, values_array = decompress_exp_mask_file(bitstring)
if do_print:
print("Expression values are provided for %d out of %d transcripts in the reference transcriptome" %
(index_array.sum(), index_array.shape[0]))
return index_array, values_array
def unpack_profiles_file(profiles_bin_file,
indices_mode = False,
do_print=False):
with open(profiles_bin_file, 'rb') as rf:
bitstring = rf.read()
if indices_mode:
decompressed_profiles_array = decompress_profiles_indices(bitstring)
else:
decompressed_profiles_array = decompress_profiles(bitstring)
if do_print:
print("%d profiles have been loaded" % len(decompressed_profiles_array))
return decompressed_profiles_array
def unpack_profiles_and_mask(profiles_bin_file, exp_mask_file, indices_mode = False, do_print=False):
decompressed_profiles_array = unpack_profiles_file(profiles_bin_file, indices_mode, do_print)
index_array, values_array = unpack_mask_file(exp_mask_file, do_print)
try:
assert (decompressed_profiles_array[0].shape[0] == index_array.shape[0])
except AssertionError:
print("Error: occurence profiles were calculated for some other reference transcriptome. The length of the "
"profiles is %d and the length of the transcriptome provided is %d" %
(decompressed_profiles_array[0].shape[0], index_array.shape[0]))
sys.exit(1)
return decompressed_profiles_array, index_array, values_array
def write_MI_values(MI_values_array, nbins, MI_values_file):
length_uint32 = np.array([MI_values_array.shape[0]], dtype=np.uint32)
length_bitstring = length_uint32.tobytes()
nbins_uint32 = np.array([nbins], dtype=np.uint32)
nbins_bitstring = nbins_uint32.tobytes()
values_bytes = MI_values_array.tobytes()
MI_values_bytestring = length_bitstring + nbins_bitstring + values_bytes
with open(MI_values_file, 'wb') as wf:
wf.write(MI_values_bytestring)
def decompres_MI_values(bitstring):
length_nbins_bitstring = bitstring[0: 8]
MI_array_length_nbins_np = np.frombuffer(length_nbins_bitstring, dtype=np.uint32)
MI_array_length = MI_array_length_nbins_np[0]
nbins = MI_array_length_nbins_np[1]
MI_array_bitstring = bitstring[8 : 8 + MI_array_length * 8] # np.float64 takes 8 bytes
# to check it, you could run print(np.dtype(np.float32).itemsize)
MI_array = np.frombuffer(MI_array_bitstring, dtype=np.float64)
return MI_array, nbins
def read_MI_values(MI_values_file):
with open(MI_values_file, 'rb') as rf:
bitstring = rf.read()
MI_values_array, nbins = decompres_MI_values(bitstring)
return MI_values_array, nbins
# def write_seed_significancy_threshold(last_positive_seed, threshold_file):
# threshold_bytes = np.uint32(last_positive_seed).tobytes()
# md5 = hashlib.md5()
# md5.update(threshold_bytes)
# md5_checksum = md5.digest()
# byte_string = threshold_bytes + md5_checksum
# with open(threshold_file, 'wb') as wf:
# wf.write(byte_string)
#
#
# def read_seed_significancy_threshold(threshold_file):
# with open(threshold_file, 'rb') as rf:
# bitstring = rf.read()
# threshold_bytes = bitstring[0: 4]
# md5_checksum_saved = bitstring[4:]
# threshold_value = np.frombuffer(threshold_bytes, dtype=np.uint32)
# threshold_bytes_recode = np.uint32(threshold_value).tobytes()
# md5 = hashlib.md5()
# md5.update(threshold_bytes_recode)
# md5_checksum = md5.digest()
# assert(md5_checksum == md5_checksum_saved)
# return threshold_value[0]
#
#
# def decompres_seed_threshold(bitstring):
# length_nbins_bitstring = bitstring[0: 8]
# MI_array_length_nbins_np = np.frombuffer(length_nbins_bitstring, dtype=np.uint32)
# MI_array_length = MI_array_length_nbins_np[0]
# nbins = MI_array_length_nbins_np[1]
# MI_array_bitstring = bitstring[8 : 8 + MI_array_length * 8] # np.float64 takes 8 bytes
# # to check it, you could run print(np.dtype(np.float32).itemsize)
# MI_array = np.frombuffer(MI_array_bitstring, dtype=np.float64)
# return MI_array, nbins
def read_seed_pass_individual_file(inp_filename):
with open(inp_filename, 'rb') as rf:
bitstring = rf.read()
length_bytes = bitstring[0: 4]
length_value = np.frombuffer(length_bytes, dtype=np.uint32)
if length_value == 0:
return []
motifs_bitstring = bitstring[4: ]
motifs_list = decompress_motifs_from_bitstring(motifs_bitstring)
assert(len(motifs_list) == length_value)
return motifs_list
def read_profile_pass_individual_file(inp_filename,
indices_mode = False):
with open(inp_filename, 'rb') as rf:
bitstring = rf.read()
length_bytes = bitstring[0: 4]
length_value = np.frombuffer(length_bytes, dtype=np.uint32)
if length_value == 0:
return []
profiles_bitstring = bitstring[4: ]
if indices_mode:
profiles_array = decompress_profiles_indices(profiles_bitstring)
else:
profiles_array = decompress_profiles(profiles_bitstring)
assert(len(profiles_array) == length_value)
return profiles_array
def write_list_of_seeds(seeds_passed_list, combined_seeds_filename):
seeds_bitstrings = []
for motif in seeds_passed_list:
motif.compress()
seeds_bitstrings.append(motif.bytestring)
total_bitstring = b''.join(seeds_bitstrings)
#print("Total number is: ", len(seeds_bitstrings))
with open(combined_seeds_filename, 'wb') as wf:
wf.write(total_bitstring)
def write_array_of_profiles(profiles_passed_array, combined_profiles_filename,
indices_mode = False, index_bit_width = 24):
with open(combined_profiles_filename, 'wb') as wf:
for i in range(profiles_passed_array.shape[0]):
current_profile = structures.w_profile(profiles_passed_array[i].shape[0])
current_profile.values = profiles_passed_array[i]
if indices_mode:
current_profile.compress_indices(width = index_bit_width)
wf.write(current_profile.bytestring_indices)
else:
current_profile.compress()
wf.write(current_profile.bytestring)
def write_classification_array(classification_array, classification_filename):
length_uint32 = np.array([classification_array.shape[0]], dtype=np.uint32)
length_bitstring = length_uint32.tobytes()
array_bitstring = length_bitstring + classification_array.tobytes()
md5 = hashlib.md5()
md5.update(array_bitstring)
md5_checksum = md5.digest()
assert (md5.digest_size == 16) # md5 checksum is always 16 bytes long, see wiki: https://en.wikipedia.org/wiki/MD5
full_bytestring = array_bitstring + md5_checksum
with open(classification_filename, 'wb') as wf:
wf.write(full_bytestring)
def read_classification_array(classification_filename):
with open(classification_filename, 'rb') as rf:
bitstring = rf.read()
length_bytes = bitstring[0: 4]
length_value = np.frombuffer(length_bytes, dtype=np.uint32)[0]
classification_bitstring = bitstring[4 : 4 + 4*length_value]
md5_checksum = bitstring[4 + 4*length_value : ]
classification_array = np.frombuffer(classification_bitstring, dtype=np.uint32)
length_uint32 = np.array([classification_array.shape[0]], dtype=np.uint32)
length_bitstring = length_uint32.tobytes()
array_bitstring = length_bitstring + classification_array.tobytes()
md5 = hashlib.md5()
md5.update(array_bitstring)
md5_read = md5.digest()
assert(md5_checksum == md5_read)
assert(length_value == classification_array.shape[0])
return classification_array
def write_np_array(inp_array, out_filename, return_bytestring = False):
shape_n_dimensions = len(inp_array.shape)
n_dimentions_bitstring = np.uint8(shape_n_dimensions).tobytes()
shape_array_bitstring = np.array(inp_array.shape, dtype=np.uint32).tobytes()
array_bitstring = inp_array.tobytes()
md5 = hashlib.md5()
md5.update(array_bitstring)
md5_checksum = md5.digest()
full_bytestring = n_dimentions_bitstring + shape_array_bitstring + array_bitstring + md5_checksum
if return_bytestring:
return full_bytestring
else:
with open(out_filename, 'wb') as wf:
wf.write(full_bytestring)
def read_np_array_from_bitstring(bitstring, dtype, return_length = False):
n_dimentions_bitstring = bitstring[0 : 1]
n_dimentions = np.frombuffer(n_dimentions_bitstring, dtype=np.uint8)[0]
shape_array_bitstring = bitstring[1 : 1 + 4 * n_dimentions]
shape_array = np.frombuffer(shape_array_bitstring, dtype=np.uint32)
flatten_length = int(np.prod(shape_array))
output_bitstring = bitstring[1 + 4 * n_dimentions :
1 + 4 * n_dimentions + dtype.itemsize * flatten_length]
md5_checksum = bitstring[1 + 4 * n_dimentions + dtype.itemsize * flatten_length :
1 + 4 * n_dimentions + dtype.itemsize * flatten_length + 16]
output_array = np.frombuffer(output_bitstring, dtype=dtype)
reshaped_array = np.reshape(output_array, shape_array, order='C')
#
# print(reshaped_array.shape)
# print(reshaped_array)
# print(md5_checksum)
array_bitstring = reshaped_array.tobytes()
md5 = hashlib.md5()
md5.update(array_bitstring)
md5_read = md5.digest()
assert(md5_checksum == md5_read)
if not return_length:
return reshaped_array
else:
current_array_bitstr_length = 1 + 4 * n_dimentions + dtype.itemsize * flatten_length + 16
return reshaped_array, current_array_bitstr_length
def read_np_array(inp_filename, dtype):
with open(inp_filename, 'rb') as rf:
bitstring = rf.read()
reshaped_array = read_np_array_from_bitstring(bitstring, dtype)
return reshaped_array
def read_multiple_np_arrays(inp_filename, dtype):
with open(inp_filename, 'rb') as rf:
bitstring = rf.read()
arrays_list = []
while len(bitstring) > 0:
curr_array, current_coord = read_np_array_from_bitstring(bitstring, dtype,
return_length = True)
arrays_list.append(curr_array)
bitstring = bitstring[current_coord:]
return arrays_list
def read_shape_file_for_many_sequences(infile):
shape_profiles_dict = {}
with open(infile, 'r') as f:
split_string = f.read().split('>')
for ind, entry in enumerate(split_string):
if entry == '':
continue
seq_start = entry.find('\n')
annotation = entry[:seq_start]
shape_table_string = entry[seq_start + 1:]
shape_df = pd.read_csv(io.StringIO(shape_table_string), sep='\t', header = None)
shape_profiles_dict[annotation] = shape_df
return shape_profiles_dict
def write_individual_shape_file(shape_dataframe,
outfile):
shape_dataframe.to_csv(outfile, sep='\t', index=False, header=False)
def create_folder(folder):
if not os.path.exists(folder):
os.makedirs(folder)
def read_fasta_no_compression(infile):
tr_dict_loc = {}
seqs_order = []
with open(infile, 'r') as f:
split_string = f.read().split('>')
for entry in split_string:
if entry == '':
continue
seq_start = entry.find('\n')
annotation = entry[:seq_start]
sequence = entry[seq_start + 1:].replace('\n', '')
tr_dict_loc[annotation] = sequence
seqs_order.append(annotation)
return tr_dict_loc, seqs_order
def write_fasta_no_compression(inp_dict, filename):
with open(filename, 'w') as wf:
for i in sorted(list(inp_dict.keys())):
string_to_write = ">%s\n%s\n" % (i, inp_dict[i])
wf.write(string_to_write) | [
"numpy.uint8",
"numpy.prod",
"numpy.packbits",
"os.path.exists",
"numpy.reshape",
"hashlib.md5",
"os.makedirs",
"numpy.unpackbits",
"numpy.array",
"numpy.zeros",
"sys.exit",
"numpy.frombuffer",
"io.StringIO"
] | [((7489, 7527), 'numpy.array', 'np.array', (['profiles_list'], {'dtype': 'np.bool'}), '(profiles_list, dtype=np.bool)\n', (7497, 7527), True, 'import numpy as np\n'), ((10325, 10363), 'numpy.array', 'np.array', (['profiles_list'], {'dtype': 'np.bool'}), '(profiles_list, dtype=np.bool)\n', (10333, 10363), True, 'import numpy as np\n'), ((10495, 10543), 'numpy.frombuffer', 'np.frombuffer', (['length_bitstring'], {'dtype': 'np.uint32'}), '(length_bitstring, dtype=np.uint32)\n', (10508, 10543), True, 'import numpy as np\n'), ((10774, 10819), 'numpy.frombuffer', 'np.frombuffer', (['index_bitstring'], {'dtype': 'np.bool'}), '(index_bitstring, dtype=np.bool)\n', (10787, 10819), True, 'import numpy as np\n'), ((10839, 10888), 'numpy.frombuffer', 'np.frombuffer', (['values_bitstring'], {'dtype': 'np.float32'}), '(values_bitstring, dtype=np.float32)\n', (10852, 10888), True, 'import numpy as np\n'), ((12752, 12805), 'numpy.array', 'np.array', (['[MI_values_array.shape[0]]'], {'dtype': 'np.uint32'}), '([MI_values_array.shape[0]], dtype=np.uint32)\n', (12760, 12805), True, 'import numpy as np\n'), ((12872, 12906), 'numpy.array', 'np.array', (['[nbins]'], {'dtype': 'np.uint32'}), '([nbins], dtype=np.uint32)\n', (12880, 12906), True, 'import numpy as np\n'), ((13271, 13325), 'numpy.frombuffer', 'np.frombuffer', (['length_nbins_bitstring'], {'dtype': 'np.uint32'}), '(length_nbins_bitstring, dtype=np.uint32)\n', (13284, 13325), True, 'import numpy as np\n'), ((13592, 13643), 'numpy.frombuffer', 'np.frombuffer', (['MI_array_bitstring'], {'dtype': 'np.float64'}), '(MI_array_bitstring, dtype=np.float64)\n', (13605, 13643), True, 'import numpy as np\n'), ((15496, 15540), 'numpy.frombuffer', 'np.frombuffer', (['length_bytes'], {'dtype': 'np.uint32'}), '(length_bytes, dtype=np.uint32)\n', (15509, 15540), True, 'import numpy as np\n'), ((16001, 16045), 'numpy.frombuffer', 'np.frombuffer', (['length_bytes'], {'dtype': 'np.uint32'}), '(length_bytes, dtype=np.uint32)\n', (16014, 16045), True, 'import numpy as np\n'), ((17567, 17625), 'numpy.array', 'np.array', (['[classification_array.shape[0]]'], {'dtype': 'np.uint32'}), '([classification_array.shape[0]], dtype=np.uint32)\n', (17575, 17625), True, 'import numpy as np\n'), ((17756, 17769), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (17767, 17769), False, 'import hashlib\n'), ((18481, 18537), 'numpy.frombuffer', 'np.frombuffer', (['classification_bitstring'], {'dtype': 'np.uint32'}), '(classification_bitstring, dtype=np.uint32)\n', (18494, 18537), True, 'import numpy as np\n'), ((18559, 18617), 'numpy.array', 'np.array', (['[classification_array.shape[0]]'], {'dtype': 'np.uint32'}), '([classification_array.shape[0]], dtype=np.uint32)\n', (18567, 18617), True, 'import numpy as np\n'), ((18748, 18761), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (18759, 18761), False, 'import hashlib\n'), ((19271, 19284), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (19282, 19284), False, 'import hashlib\n'), ((19883, 19936), 'numpy.frombuffer', 'np.frombuffer', (['shape_array_bitstring'], {'dtype': 'np.uint32'}), '(shape_array_bitstring, dtype=np.uint32)\n', (19896, 19936), True, 'import numpy as np\n'), ((20323, 20367), 'numpy.frombuffer', 'np.frombuffer', (['output_bitstring'], {'dtype': 'dtype'}), '(output_bitstring, dtype=dtype)\n', (20336, 20367), True, 'import numpy as np\n'), ((20389, 20437), 'numpy.reshape', 'np.reshape', (['output_array', 'shape_array'], {'order': '"""C"""'}), "(output_array, shape_array, order='C')\n", (20399, 20437), True, 'import numpy as np\n'), ((20590, 20603), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (20601, 20603), False, 'import hashlib\n'), ((503, 596), 'numpy.frombuffer', 'np.frombuffer', (['bitstring[current_spot + 2:current_spot + 2 + full_length]'], {'dtype': 'np.uint8'}), '(bitstring[current_spot + 2:current_spot + 2 + full_length],\n dtype=np.uint8)\n', (516, 596), True, 'import numpy as np\n'), ((620, 731), 'numpy.frombuffer', 'np.frombuffer', (['bitstring[current_spot + 2 + full_length:current_spot + 2 + 2 * full_length]'], {'dtype': 'np.uint8'}), '(bitstring[current_spot + 2 + full_length:current_spot + 2 + 2 *\n full_length], dtype=np.uint8)\n', (633, 731), True, 'import numpy as np\n'), ((4322, 4370), 'numpy.frombuffer', 'np.frombuffer', (['length_bitstring'], {'dtype': 'np.uint32'}), '(length_bitstring, dtype=np.uint32)\n', (4335, 4370), True, 'import numpy as np\n'), ((5073, 5122), 'numpy.frombuffer', 'np.frombuffer', (['sequence_bitstring'], {'dtype': 'np.uint8'}), '(sequence_bitstring, dtype=np.uint8)\n', (5086, 5122), True, 'import numpy as np\n'), ((6229, 6277), 'numpy.frombuffer', 'np.frombuffer', (['length_bitstring'], {'dtype': 'np.uint32'}), '(length_bitstring, dtype=np.uint32)\n', (6242, 6277), True, 'import numpy as np\n'), ((6923, 6970), 'numpy.frombuffer', 'np.frombuffer', (['values_bitstring'], {'dtype': 'np.uint8'}), '(values_bitstring, dtype=np.uint8)\n', (6936, 6970), True, 'import numpy as np\n'), ((6988, 7021), 'numpy.unpackbits', 'np.unpackbits', (['values_packed_bits'], {}), '(values_packed_bits)\n', (7001, 7021), True, 'import numpy as np\n'), ((7936, 7984), 'numpy.frombuffer', 'np.frombuffer', (['length_bitstring'], {'dtype': 'np.uint32'}), '(length_bitstring, dtype=np.uint32)\n', (7949, 7984), True, 'import numpy as np\n'), ((8185, 8236), 'numpy.frombuffer', 'np.frombuffer', (['N_indices_bitstring'], {'dtype': 'np.uint32'}), '(N_indices_bitstring, dtype=np.uint32)\n', (8198, 8236), True, 'import numpy as np\n'), ((8435, 8482), 'numpy.frombuffer', 'np.frombuffer', (['width_bitstring'], {'dtype': 'np.uint32'}), '(width_bitstring, dtype=np.uint32)\n', (8448, 8482), True, 'import numpy as np\n'), ((9195, 9242), 'numpy.frombuffer', 'np.frombuffer', (['values_bitstring'], {'dtype': 'np.uint8'}), '(values_bitstring, dtype=np.uint8)\n', (9208, 9242), True, 'import numpy as np\n'), ((9272, 9307), 'numpy.unpackbits', 'np.unpackbits', (['indices_packed_uint8'], {}), '(indices_packed_uint8)\n', (9285, 9307), True, 'import numpy as np\n'), ((9484, 9524), 'numpy.zeros', 'np.zeros', (['(N_indices, 32)'], {'dtype': 'np.bool'}), '((N_indices, 32), dtype=np.bool)\n', (9492, 9524), True, 'import numpy as np\n'), ((9745, 9784), 'numpy.packbits', 'np.packbits', (['reshaped_full_binary_array'], {}), '(reshaped_full_binary_array)\n', (9756, 9784), True, 'import numpy as np\n'), ((9808, 9867), 'numpy.frombuffer', 'np.frombuffer', (['reshaped_full_binary_string'], {'dtype': 'np.uint32'}), '(reshaped_full_binary_string, dtype=np.uint32)\n', (9821, 9867), True, 'import numpy as np\n'), ((18289, 18333), 'numpy.frombuffer', 'np.frombuffer', (['length_bytes'], {'dtype': 'np.uint32'}), '(length_bytes, dtype=np.uint32)\n', (18302, 18333), True, 'import numpy as np\n'), ((19744, 19797), 'numpy.frombuffer', 'np.frombuffer', (['n_dimentions_bitstring'], {'dtype': 'np.uint8'}), '(n_dimentions_bitstring, dtype=np.uint8)\n', (19757, 19797), True, 'import numpy as np\n'), ((19962, 19982), 'numpy.prod', 'np.prod', (['shape_array'], {}), '(shape_array)\n', (19969, 19982), True, 'import numpy as np\n'), ((22365, 22387), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (22379, 22387), False, 'import os\n'), ((22397, 22416), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (22408, 22416), False, 'import os\n'), ((12590, 12601), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (12598, 12601), False, 'import sys\n'), ((19098, 19126), 'numpy.uint8', 'np.uint8', (['shape_n_dimensions'], {}), '(shape_n_dimensions)\n', (19106, 19126), True, 'import numpy as np\n'), ((19165, 19207), 'numpy.array', 'np.array', (['inp_array.shape'], {'dtype': 'np.uint32'}), '(inp_array.shape, dtype=np.uint32)\n', (19173, 19207), True, 'import numpy as np\n'), ((22015, 22046), 'io.StringIO', 'io.StringIO', (['shape_table_string'], {}), '(shape_table_string)\n', (22026, 22046), False, 'import io\n')] |
import numpy as np
class StateAggregation:
"""Combine multiple states into groups
and provide linear feature vector for function approximation"""
def __init__(self, N_states, group_size, N_actions=1):
"""
Args:
N_states: Total number of states
group_size: Combine this many states into group
N_actions: Number of actions
"""
self.N_states = N_states
self.N_actions = N_actions
self.group_size = group_size
self.size = int(self.N_states/self.group_size)*N_actions
self.index = {}
for s in range(self.N_states):
for a in range(self.N_actions):
self.index[(s,a)] = int(s/self.group_size) \
+ a*int(self.N_states/self.group_size)
def q(self, state, action, w):
"""Returns action-value function
Args:
state: State index
action: Action index
w: Weight vector
"""
return w[self.index[(state,action)]]
def q_deriv(self, state, action, w):
"""Returns gradient of action-value function with respect to weights
Args:
state: State index
action: Action index
w: Weight vector
"""
feature_vector = np.zeros(self.size)
feature_vector[self.index[(state,action)]] = 1
return feature_vector
def v(self, state, w):
"""Returns state-value function
Args:
state: State index
w: Weight vector
"""
return self.q(state, 0, w)
def v_deriv(self, state, w):
"""Returns gradient of state-value function with respect to weights
Args:
state: State index
w: Weight vector
"""
return self.q_deriv(state, 0, w)
def generate_weights(self):
"""
Returns weight vector
"""
return np.zeros(self.size)
| [
"numpy.zeros"
] | [((1302, 1321), 'numpy.zeros', 'np.zeros', (['self.size'], {}), '(self.size)\n', (1310, 1321), True, 'import numpy as np\n'), ((1937, 1956), 'numpy.zeros', 'np.zeros', (['self.size'], {}), '(self.size)\n', (1945, 1956), True, 'import numpy as np\n')] |
from collections import namedtuple
import numpy as np
from untwist import data, utilities, transforms
Anchors = namedtuple('Anchors', ['Distortion',
'Artefacts',
'Interferer',
'Quality'],
)
class Anchor:
'''
Anchor signals for a MUSHRA test assessing source separation
techniques. Four different anchors are provided for assessing the
perception regarding interference, distortion, artefacts, and
overall sound quality. The first three were designed after [1], the
quality anchor after [2].
[1] <NAME>., <NAME>., <NAME>., & <NAME>. (2011).
Subjective and Objective Quality Assessment of Audio Source
Separation. IEEE TASLP, 19(7), 2046–2057.
http://doi.org/10.1109/TASL.2011.2109381
[2] <NAME>., <NAME>., & <NAME>. (2016).
Evaluation of Quality of Sound Source Separation Algorithms:
Human Perception vs Quantitative Metrics. In EUSIPCO
(pp. 1758–1762).
http://doi.org/10.1109/EUSIPCO.2016.7760550
'''
def __init__(self,
target,
others,
trim_factor_distorted=0.2,
trim_factor_artefacts=0.99,
low_pass_artefacts=False,
low_pass_cutoff=3500,
include_background_in_quality_anchor=True,
loudness_normalise_interferer=True,
):
'''
target:
The target audio, e.g. vocals
others:
Can be a list of everthing else, or just the accompaniment (Wave).
trim_factor_distorted:
Proportion of spectral frames to remove randomly in time.
trim_factor_artefacts:
Proportion of time-frequency bins to randomly remove.
'''
from scipy import signal
# We need a single background
if isinstance(others, list):
self.background = sum(other for other in others)
else:
self.background = others
if not isinstance(target, data.audio.Wave):
raise ValueError('target must be of type Wave.')
self.target = target
points = 2048
window = signal.get_window('hann', points, True)
self.stft = transforms.STFT(window, points, points // 2)
self.istft = transforms.ISTFT(window, points, points // 2)
self.cut_off = utilities.conversion.nearest_bin(low_pass_cutoff,
points,
target.sample_rate)
self.trim_factor_distorted = trim_factor_distorted
self.trim_factor_artefacts = trim_factor_artefacts
self.low_pass_artefacts = low_pass_artefacts
self.include_background_in_quality_anchor = include_background_in_quality_anchor
self.loudness_normalise_interferer = loudness_normalise_interferer
def distorted_anchor(self):
'''
Returns the distortion signal created by low-pass filtering the
target source signal to a 3.5 kHz cutoff frequency and by randomly
setting 20% of the remaining time-frequency coefficients to zero,
see [1].
WARNING: this code can't reproduce the distortion from [1]
exactly!
'''
x_fft = self.stft.process(self.target)
x_fft[self.cut_off:] = 0
num_frames_to_remove = int(x_fft.shape[1] * self.trim_factor_distorted)
idx = np.random.choice(x_fft.shape[1],
num_frames_to_remove,
replace=False)
x_fft[:, idx] = 0
distortion = self.istft.process(x_fft)
return distortion[:self.target.num_frames]
def inteference_anchor(self):
'''
Interference anchor for a MUSHRA listening test.
The anchor is created by the sum of target signal and the
interferer. The interferer is formed by summing all interfering
sources and then setting the overall loudness to that of the target,
see [1].
'''
interferer = self.background.copy()
if self.loudness_normalise_interferer:
interferer.loudness = self.target.loudness
interferer += self.target
return interferer
def artefacts(self):
'''
Returns the artefacts signal (musical noise) generated by randomly
zeroing 99% of the time-frequency bins, see [1].
'''
x_fft = self.stft.process(self.target)
idx = np.random.choice(
x_fft.size,
size=int(x_fft.size * self.trim_factor_artefacts),
replace=False)
row, col = np.unravel_index(idx, x_fft.shape)
x_fft[row, col] = 0
if self.low_pass_artefacts:
x_fft[self.cut_off:] = 0
artefacts = self.istft.process(x_fft)
return artefacts[:self.target.num_frames]
def artefacts_anchor(self):
'''
Artefacts anchor for a MUSHRA listening test.
The anchor is defined as the sum of the target with musical
noise; both equally loud. Musical noise is created by randomly
zeroing 99% of the time-frequency bins, see [1].
'''
artefacts = self.artefacts()
artefacts.loudness = self.target.loudness
anchor = artefacts + self.target
return anchor
def quality_anchor(self):
'''
Quality anchor for a MUSHRA listening test.
The anchor is defined as the sum of the distortion anchor,
artefacts only and interferer only; all equally loud, see [2].
'''
target_loudness = -23
signals = []
signals_to_sum = [self.distorted_anchor(), self.artefacts()]
if self.include_background_in_quality_anchor:
signals_to_sum.append(self.background)
for signal in signals_to_sum:
signal.loudness = target_loudness
signals.append(signal)
anchor = sum(signals)
anchor = anchor[:self.target.num_frames]
return anchor
def create(self):
return Anchors(self.distorted_anchor(),
self.artefacts_anchor(),
self.inteference_anchor(),
self.quality_anchor())
class RemixAnchor():
def __init__(self,
target,
others,
trim_factor_distorted=0.2,
trim_factor_artefacts=0.99,
target_level_offset=-14,
quality_anchor_loudness_balance=[0, 0],
low_pass_cutoff=3500):
'''
target:
The target audio, e.g. vocals
others:
Can be a list of everthing else, or just the accompaniment (Wave).
trim_factor_distorted:
Proportion of spectral frames to remove randomly in time.
trim_factor_artefacts:
Proportion of time-frequency bins to randomly remove.
target_level_offset:
The level adjustment applied to the target for the balance anchor.
quality_anchor_loudness_balance:
The desired loudness balance of [distorted_audio, artefacts], e.g.
setting [10, 0] would set the distorted audio to be 10 LU above
the artefacts. Default is [0, 0] = equal loudness.
'''
# We need a single background
if isinstance(others, list):
self.background = sum(other for other in others)
else:
self.background = others
self.target = target
self.mix = self.target + self.background
self.anchor_gen = Anchor(self.mix,
None,
trim_factor_distorted,
trim_factor_artefacts,
low_pass_artefacts=True,
low_pass_cutoff=low_pass_cutoff)
self.target_level_offset = target_level_offset
self.quality_anchor_loudness_balance = np.array(
quality_anchor_loudness_balance)
def distorted_anchor(self):
'''
Returns the distortion mix created by low-pass filtering the
target source signal to a 3.5 kHz cutoff frequency and by randomly
setting 20% of the remaining time-frequency coefficients to zero,
see [1].
'''
return self.anchor_gen.distorted_anchor()
def artefacts_anchor(self):
'''
Returns the artefacts mix (musical noise) generated by randomly
zeroing 99% of the time-frequency bins, see [1].
'''
return self.anchor_gen.artefacts_anchor()
def interferer_anchor(self):
'''
Mixes the background with the target offset by 'target_level_offset'.
'''
mix = (self.target *
utilities.conversion.db_to_amp(self.target_level_offset) +
self.background)
return mix
def interferer_anchor_both_sources(self):
'''
Returns the target and background as used to create the interferer
anchor (but not normalised).
'''
return (self.target *
utilities.conversion.db_to_amp(self.target_level_offset),
self.background)
def quality_anchor(self):
'''
Sum of the distorted mix and artefacts of the mix, at equal loudness.
You can adjust the loudness balance by setting the attribute
'quality_anchor_loudness_balance' (default is an array of zeros).
'''
target_loudness = (np.array([-23.0, -23.0]) +
(self.quality_anchor_loudness_balance -
self.quality_anchor_loudness_balance.mean())
)
signals = []
for signal, loudness in zip([self.distorted_anchor(),
self.anchor_gen.artefacts()],
target_loudness):
signal.loudness = loudness
signals.append(signal)
anchor = sum(signals)
anchor = anchor[:self.target.num_frames]
return anchor
def create(self):
return Anchors(self.distorted_anchor(),
self.artefacts_anchor(),
self.interferer_anchor(),
self.quality_anchor())
| [
"collections.namedtuple",
"numpy.random.choice",
"untwist.transforms.ISTFT",
"numpy.array",
"untwist.transforms.STFT",
"untwist.utilities.conversion.nearest_bin",
"numpy.unravel_index",
"scipy.signal.get_window",
"untwist.utilities.conversion.db_to_amp"
] | [((114, 189), 'collections.namedtuple', 'namedtuple', (['"""Anchors"""', "['Distortion', 'Artefacts', 'Interferer', 'Quality']"], {}), "('Anchors', ['Distortion', 'Artefacts', 'Interferer', 'Quality'])\n", (124, 189), False, 'from collections import namedtuple\n'), ((2279, 2318), 'scipy.signal.get_window', 'signal.get_window', (['"""hann"""', 'points', '(True)'], {}), "('hann', points, True)\n", (2296, 2318), False, 'from scipy import signal\n'), ((2339, 2383), 'untwist.transforms.STFT', 'transforms.STFT', (['window', 'points', '(points // 2)'], {}), '(window, points, points // 2)\n', (2354, 2383), False, 'from untwist import data, utilities, transforms\n'), ((2405, 2450), 'untwist.transforms.ISTFT', 'transforms.ISTFT', (['window', 'points', '(points // 2)'], {}), '(window, points, points // 2)\n', (2421, 2450), False, 'from untwist import data, utilities, transforms\n'), ((2475, 2552), 'untwist.utilities.conversion.nearest_bin', 'utilities.conversion.nearest_bin', (['low_pass_cutoff', 'points', 'target.sample_rate'], {}), '(low_pass_cutoff, points, target.sample_rate)\n', (2507, 2552), False, 'from untwist import data, utilities, transforms\n'), ((3557, 3626), 'numpy.random.choice', 'np.random.choice', (['x_fft.shape[1]', 'num_frames_to_remove'], {'replace': '(False)'}), '(x_fft.shape[1], num_frames_to_remove, replace=False)\n', (3573, 3626), True, 'import numpy as np\n'), ((4770, 4804), 'numpy.unravel_index', 'np.unravel_index', (['idx', 'x_fft.shape'], {}), '(idx, x_fft.shape)\n', (4786, 4804), True, 'import numpy as np\n'), ((8137, 8178), 'numpy.array', 'np.array', (['quality_anchor_loudness_balance'], {}), '(quality_anchor_loudness_balance)\n', (8145, 8178), True, 'import numpy as np\n'), ((9688, 9712), 'numpy.array', 'np.array', (['[-23.0, -23.0]'], {}), '([-23.0, -23.0])\n', (9696, 9712), True, 'import numpy as np\n'), ((8952, 9008), 'untwist.utilities.conversion.db_to_amp', 'utilities.conversion.db_to_amp', (['self.target_level_offset'], {}), '(self.target_level_offset)\n', (8982, 9008), False, 'from untwist import data, utilities, transforms\n'), ((9293, 9349), 'untwist.utilities.conversion.db_to_amp', 'utilities.conversion.db_to_amp', (['self.target_level_offset'], {}), '(self.target_level_offset)\n', (9323, 9349), False, 'from untwist import data, utilities, transforms\n')] |
import tensorflow as tf
import numpy as np
from tqdm import tqdm
from tf_metric_learning.utils.index import AnnoyDataIndex
class AnnoyEvaluatorCallback(AnnoyDataIndex):
"""
Callback, extracts embeddings, add them to AnnoyIndex and evaluate them as recall.
"""
def __init__(
self,
model,
data_store,
data_search,
save_dir=None,
eb_size=256,
metric="euclidean",
freq=1,
batch_size=None,
normalize_eb=True,
normalize_fn=None,
progress=True,
**kwargs
):
super().__init__(eb_size, data_store["labels"], metric=metric, save_dir=save_dir, progress=progress)
self.base_model = model
self.data_store = data_store
self.data_search = data_search
self.batch_size = batch_size
self.freq = int(freq)
self.normalize_eb = normalize_eb
self.normalize_fn = normalize_fn
self.results = {}
def on_epoch_begin(self, epoch, logs=None):
if self.freq and epoch % self.freq == 0:
self.compute_data()
def batch(self, iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def compute_data(self):
self.create_index()
i = 0
with tqdm(total=len(self.data_store["images"]), desc="Indexing ... ") as pbar:
for batch in self.batch(self.data_store["images"], n=self.batch_size*10):
store_images = self.normalize_fn(batch) if self.normalize_fn is not None else batch
embeddings_store = self.base_model.predict(store_images, batch_size=self.batch_size)
if self.normalize_eb:
embeddings_store = tf.nn.l2_normalize(embeddings_store, axis=1).numpy()
for embedding in embeddings_store:
self.add_to_index(i, embedding)
i += 1
pbar.update(len(batch))
self.build(k=5)
self.evaluate(self.data_search["images"])
def evaluate(self, images):
self.results = {"default": []}
i = 0
with tqdm(total=len(images), desc="Evaluating ... ") as pbar:
for batch in self.batch(images, n=self.batch_size*10):
search_images = self.normalize_fn(batch) if self.normalize_fn is not None else batch
embeddings_search = self.base_model.predict(search_images, batch_size=self.batch_size)
if self.normalize_eb:
embeddings_search = tf.nn.l2_normalize(embeddings_search, axis=1).numpy()
for embedding in embeddings_search:
annoy_results = self.search(embedding, n=20, include_distances=False)
annoy_results = [self.get_label(result) for result in annoy_results]
recalls = self.eval_recall(annoy_results, self.data_search["labels"][i], [1, 5, 10, 20])
self.results["default"].append(recalls)
i += 1
pbar.update(len(batch))
print("\nRecall@[1, 3, 5, 10, 20] Computed:", np.mean(np.asarray(self.results["default"]), axis=0), "\n")
def eval_recall(self, annoy_results, label, recalls):
return [1 if label in annoy_results[:recall_n] else 0 for recall_n in recalls]
| [
"tensorflow.nn.l2_normalize",
"numpy.asarray"
] | [((3161, 3196), 'numpy.asarray', 'np.asarray', (["self.results['default']"], {}), "(self.results['default'])\n", (3171, 3196), True, 'import numpy as np\n'), ((1770, 1814), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['embeddings_store'], {'axis': '(1)'}), '(embeddings_store, axis=1)\n', (1788, 1814), True, 'import tensorflow as tf\n'), ((2573, 2618), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['embeddings_search'], {'axis': '(1)'}), '(embeddings_search, axis=1)\n', (2591, 2618), True, 'import tensorflow as tf\n')] |
import os
import numpy as np
import pandas as pd
from PIL import Image
import torch
from torchvision import transforms
from tqdm import tqdm
from torchvision import models
from numpy.testing import assert_almost_equal
from typing import List
from constants import PATH_IMAGES_CNN, PATH_IMAGES_RAW
class Img2VecResnet18():
"""
Class responsible for image recognition.
"""
def __init__(self, reload=False):
"""
Initialize class.
Args:
reload (bool): recompressed raw images for recognition.
"""
#: Torch device to run neural network
self.device = torch.device("cpu")
#: Number of features to extract from images
self.numberFeatures = 512
#: Model to use for similarity
self.modelName = "resnet-18"
self.model, self.featureLayer = self.getFeatureLayer()
self.model = self.model.to(self.device)
self.model.eval()
self.toTensor = transforms.ToTensor()
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.allVectors = {}
#: Input Directory for building simularity matrix
self.inputDir = PATH_IMAGES_CNN
if reload:
transformImages()
self.updateSimilarityMatrix()
def getFeatureLayer(self):
"""
Gets avgpool layer from `resnet18 <https://pytorch.org/hub/pytorch_vision_resnet/>`_ .
"""
cnnModel = models.resnet18(pretrained=True)
layer = cnnModel._modules.get('avgpool')
self.layer_output_size = 512
return cnnModel, layer
def getVec(self, img: Image):
"""
Converts passed image into a numpy vector
Args:
img (Image): pillow image to convert
Returns:
Tensor as Numpy array
"""
image = self.normalize(self.toTensor(img)).unsqueeze(0).to(self.device)
embedding = torch.zeros(1, self.numberFeatures, 1, 1)
def copyData(m, i, o): embedding.copy_(o.data)
h = self.featureLayer.register_forward_hook(copyData)
self.model(image)
h.remove()
return embedding.numpy()[0, :, 0, 0]
def getSimilarityMatrix(self, vectors):
"""
Create pandas DataFrame of simularities using passed vectors
Args:
vectors (Numpy.Array): Vectors to parse simularities
Returns:
Pandas.DataFrame
"""
v = np.array(list(vectors.values())).T
sim = np.inner(v.T, v.T) / ((np.linalg.norm(v, axis=0).reshape(-1,1)) * ((np.linalg.norm(v, axis=0).reshape(-1,1)).T))
keys = list(vectors.keys())
matrix = pd.DataFrame(sim, columns = keys, index = keys)
return matrix
def updateSimilarityMatrix(self, k: int = 10):
"""
Updates self.SimilarityMatrix, self.similarNames, self.similarValues and self.k using parameter k.
Args:
k (int): Number of recommendations to present when querrying for simularities
"""
self.k = k
for image in tqdm(os.listdir(self.inputDir)):
I = Image.open(os.path.join(self.inputDir, image))
vec = self.getVec(I)
self.allVectors[image] = vec
I.close()
self.similarityMatrix = self.getSimilarityMatrix(self.allVectors)
self.similarNames = pd.DataFrame(index = self.similarityMatrix.index, columns = range(self.k))
self.similarValues = pd.DataFrame(index = self.similarityMatrix.index, columns = range(self.k))
for j in tqdm(range(self.similarityMatrix.shape[0])):
kSimilar = self.similarityMatrix.iloc[j, :].sort_values(ascending = False).head(self.k)
self.similarNames.iloc[j, :] = list(kSimilar.index)
self.similarValues.iloc[j, :] = kSimilar.values
def getSimilarImages(self, image: str):
"""
Gets self.k most similar images from self.similarNames.
Args:
image (str): filename of image for which recommendations are desired
"""
if image in set(self.similarNames.index):
imgs = list(self.similarNames.loc[image, :])
vals = list(self.similarValues.loc[image, :])
# Don't recommend passed image
if image in imgs:
assert_almost_equal(max(vals), 1, decimal = 5)
imgs.remove(image)
vals.remove(max(vals))
return imgs, vals
else:
print("'{}' Unknown image".format(image))
def transformImages(inputDir = PATH_IMAGES_RAW, outputDir = PATH_IMAGES_CNN, filenames: List[str] = None):
"""
Process Images inside inputDir for use with neural network.
Resized images are outputed to the outputDir.
*Paths are absolute
"""
transformationForCNNInput = transforms.Compose([transforms.Resize((224,224))])
if filenames == None:
filenames = os.listdir(inputDir)
if not os.path.exists(outputDir):
os.makedirs(outputDir)
for imageName in filenames:
imageOutputPath = os.path.join(outputDir, imageName)
imagePath = os.path.join(inputDir, imageName)
if not os.path.isfile(imageOutputPath):
I = Image.open(imagePath)
newI = transformationForCNNInput(I)
if "exif" in I.info:
exif = I.info['exif']
newI.save(imageOutputPath, exif=exif)
else:
newI.save(imageOutputPath)
if __name__ == '__main__':
transformImages() | [
"os.path.exists",
"os.listdir",
"PIL.Image.open",
"os.makedirs",
"torchvision.transforms.Resize",
"os.path.join",
"torchvision.models.resnet18",
"numpy.inner",
"os.path.isfile",
"torchvision.transforms.Normalize",
"numpy.linalg.norm",
"pandas.DataFrame",
"torchvision.transforms.ToTensor",
... | [((655, 674), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (667, 674), False, 'import torch\n'), ((1013, 1034), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1032, 1034), False, 'from torchvision import transforms\n'), ((1061, 1136), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1081, 1136), False, 'from torchvision import transforms\n'), ((1549, 1581), 'torchvision.models.resnet18', 'models.resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1564, 1581), False, 'from torchvision import models\n'), ((2062, 2103), 'torch.zeros', 'torch.zeros', (['(1)', 'self.numberFeatures', '(1)', '(1)'], {}), '(1, self.numberFeatures, 1, 1)\n', (2073, 2103), False, 'import torch\n'), ((2823, 2866), 'pandas.DataFrame', 'pd.DataFrame', (['sim'], {'columns': 'keys', 'index': 'keys'}), '(sim, columns=keys, index=keys)\n', (2835, 2866), True, 'import pandas as pd\n'), ((5190, 5210), 'os.listdir', 'os.listdir', (['inputDir'], {}), '(inputDir)\n', (5200, 5210), False, 'import os\n'), ((5225, 5250), 'os.path.exists', 'os.path.exists', (['outputDir'], {}), '(outputDir)\n', (5239, 5250), False, 'import os\n'), ((5261, 5283), 'os.makedirs', 'os.makedirs', (['outputDir'], {}), '(outputDir)\n', (5272, 5283), False, 'import os\n'), ((5348, 5382), 'os.path.join', 'os.path.join', (['outputDir', 'imageName'], {}), '(outputDir, imageName)\n', (5360, 5382), False, 'import os\n'), ((5404, 5437), 'os.path.join', 'os.path.join', (['inputDir', 'imageName'], {}), '(inputDir, imageName)\n', (5416, 5437), False, 'import os\n'), ((2655, 2673), 'numpy.inner', 'np.inner', (['v.T', 'v.T'], {}), '(v.T, v.T)\n', (2663, 2673), True, 'import numpy as np\n'), ((3263, 3288), 'os.listdir', 'os.listdir', (['self.inputDir'], {}), '(self.inputDir)\n', (3273, 3288), False, 'import os\n'), ((5109, 5138), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (5126, 5138), False, 'from torchvision import transforms\n'), ((5454, 5485), 'os.path.isfile', 'os.path.isfile', (['imageOutputPath'], {}), '(imageOutputPath)\n', (5468, 5485), False, 'import os\n'), ((5504, 5525), 'PIL.Image.open', 'Image.open', (['imagePath'], {}), '(imagePath)\n', (5514, 5525), False, 'from PIL import Image\n'), ((3319, 3353), 'os.path.join', 'os.path.join', (['self.inputDir', 'image'], {}), '(self.inputDir, image)\n', (3331, 3353), False, 'import os\n'), ((2678, 2703), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {'axis': '(0)'}), '(v, axis=0)\n', (2692, 2703), True, 'import numpy as np\n'), ((2723, 2748), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {'axis': '(0)'}), '(v, axis=0)\n', (2737, 2748), True, 'import numpy as np\n')] |
import tempfile
import unittest
import numpy as np
import pystan
from pystan.tests.helper import get_model
def validate_data(fit):
la = fit.extract(permuted=True) # return a dictionary of arrays
mu, tau, eta, theta = la['mu'], la['tau'], la['eta'], la['theta']
np.testing.assert_equal(mu.shape, (2000,))
np.testing.assert_equal(tau.shape, (2000,))
np.testing.assert_equal(eta.shape, (2000, 8))
np.testing.assert_equal(theta.shape, (2000, 8))
assert -1 < np.mean(mu) < 17
assert 0 < np.mean(tau) < 17
assert all(-3 < np.mean(eta, axis=0))
assert all(np.mean(eta, axis=0) < 3)
assert all(-15 < np.mean(theta, axis=0))
assert all(np.mean(theta, axis=0) < 30)
# return an array of three dimensions: iterations, chains, parameters
a = fit.extract(permuted=False)
np.testing.assert_equal(a.shape, (500, 4, 19))
class TestRStanGettingStarted(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.schools_code = schools_code = """
data {
int<lower=0> J; // number of schools
real y[J]; // estimated treatment effects
real<lower=0> sigma[J]; // s.e. of effect estimates
}
parameters {
real mu;
real<lower=0> tau;
real eta[J];
}
transformed parameters {
real theta[J];
for (j in 1:J)
theta[j] = mu + tau * eta[j];
}
model {
eta ~ normal(0, 1);
y ~ normal(theta, sigma);
}
"""
cls.schools_dat = schools_dat = {
'J': 8,
'y': [28, 8, -3, 7, -1, 1, 18, 12],
'sigma': [15, 10, 16, 11, 9, 11, 10, 18]
}
cls.sm = sm = get_model("schools_model", schools_code)
#cls.sm = sm = pystan.StanModel(model_code=schools_code)
cls.fit = sm.sampling(data=schools_dat, iter=1000, chains=4)
def test_stan(self):
fit = self.fit
validate_data(fit)
def test_stan_file(self):
schools_code = self.schools_code
schools_dat = self.schools_dat
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(schools_code.encode('utf-8'))
fit = pystan.stan(file=f.name, data=schools_dat, iter=1000, chains=4)
validate_data(fit)
def test_stan_reuse_fit(self):
fit1 = self.fit
schools_dat = self.schools_dat
fit = pystan.stan(fit=fit1, data=schools_dat, iter=1000, chains=4)
validate_data(fit)
def test_sampling_parallel(self):
sm = self.sm
schools_dat = self.schools_dat
fit = sm.sampling(data=schools_dat, iter=1000, chains=4, n_jobs=-1)
validate_data(fit)
# n_jobs specified explicitly
fit = sm.sampling(data=schools_dat, iter=1000, chains=4, n_jobs=4)
validate_data(fit)
| [
"numpy.mean",
"numpy.testing.assert_equal",
"pystan.stan",
"tempfile.NamedTemporaryFile",
"pystan.tests.helper.get_model"
] | [((278, 320), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['mu.shape', '(2000,)'], {}), '(mu.shape, (2000,))\n', (301, 320), True, 'import numpy as np\n'), ((325, 368), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['tau.shape', '(2000,)'], {}), '(tau.shape, (2000,))\n', (348, 368), True, 'import numpy as np\n'), ((373, 418), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['eta.shape', '(2000, 8)'], {}), '(eta.shape, (2000, 8))\n', (396, 418), True, 'import numpy as np\n'), ((423, 470), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['theta.shape', '(2000, 8)'], {}), '(theta.shape, (2000, 8))\n', (446, 470), True, 'import numpy as np\n'), ((824, 870), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['a.shape', '(500, 4, 19)'], {}), '(a.shape, (500, 4, 19))\n', (847, 870), True, 'import numpy as np\n'), ((487, 498), 'numpy.mean', 'np.mean', (['mu'], {}), '(mu)\n', (494, 498), True, 'import numpy as np\n'), ((519, 531), 'numpy.mean', 'np.mean', (['tau'], {}), '(tau)\n', (526, 531), True, 'import numpy as np\n'), ((1760, 1800), 'pystan.tests.helper.get_model', 'get_model', (['"""schools_model"""', 'schools_code'], {}), "('schools_model', schools_code)\n", (1769, 1800), False, 'from pystan.tests.helper import get_model\n'), ((2247, 2310), 'pystan.stan', 'pystan.stan', ([], {'file': 'f.name', 'data': 'schools_dat', 'iter': '(1000)', 'chains': '(4)'}), '(file=f.name, data=schools_dat, iter=1000, chains=4)\n', (2258, 2310), False, 'import pystan\n'), ((2451, 2511), 'pystan.stan', 'pystan.stan', ([], {'fit': 'fit1', 'data': 'schools_dat', 'iter': '(1000)', 'chains': '(4)'}), '(fit=fit1, data=schools_dat, iter=1000, chains=4)\n', (2462, 2511), False, 'import pystan\n'), ((557, 577), 'numpy.mean', 'np.mean', (['eta'], {'axis': '(0)'}), '(eta, axis=0)\n', (564, 577), True, 'import numpy as np\n'), ((594, 614), 'numpy.mean', 'np.mean', (['eta'], {'axis': '(0)'}), '(eta, axis=0)\n', (601, 614), True, 'import numpy as np\n'), ((641, 663), 'numpy.mean', 'np.mean', (['theta'], {'axis': '(0)'}), '(theta, axis=0)\n', (648, 663), True, 'import numpy as np\n'), ((680, 702), 'numpy.mean', 'np.mean', (['theta'], {'axis': '(0)'}), '(theta, axis=0)\n', (687, 702), True, 'import numpy as np\n'), ((2135, 2176), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (2162, 2176), False, 'import tempfile\n')] |
import random
import unittest
import numpy as np
import torch
from elasticai.creator.brevitas.brevitas_model_comparison import (
BrevitasModelComparisonTestCase,
)
from elasticai.creator.brevitas.brevitas_representation import BrevitasRepresentation
from elasticai.creator.systemTests.brevitas_representation.models_definition import (
create_brevitas_model,
create_qtorch_model,
)
class ModelSystemTest(BrevitasModelComparisonTestCase):
"""
System tests for translating a big qtorch model to brevitas
"""
def setUp(self) -> None:
self.ensure_reproducibility()
@staticmethod
def ensure_reproducibility():
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
def test_complete_models_with_weights(self) -> None:
self.qtorch_model = create_qtorch_model()
# we think brevitas is manipulating some seed therefore we need to reset them again
self.ensure_reproducibility()
self.brevitas_model = create_brevitas_model()
# we think brevitas is manipulating some seed therefore we need to reset them again
self.ensure_reproducibility()
translated_model = BrevitasRepresentation.from_pytorch(
self.qtorch_model
).translated_model
self.assertModelEqual(translated_model, self.brevitas_model)
if __name__ == "__main__":
unittest.main()
| [
"torch.manual_seed",
"elasticai.creator.brevitas.brevitas_representation.BrevitasRepresentation.from_pytorch",
"random.seed",
"elasticai.creator.systemTests.brevitas_representation.models_definition.create_qtorch_model",
"numpy.random.seed",
"unittest.main",
"elasticai.creator.systemTests.brevitas_repre... | [((1380, 1395), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1393, 1395), False, 'import unittest\n'), ((663, 683), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (680, 683), False, 'import torch\n'), ((692, 706), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (703, 706), False, 'import random\n'), ((715, 732), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (729, 732), True, 'import numpy as np\n'), ((819, 840), 'elasticai.creator.systemTests.brevitas_representation.models_definition.create_qtorch_model', 'create_qtorch_model', ([], {}), '()\n', (838, 840), False, 'from elasticai.creator.systemTests.brevitas_representation.models_definition import create_brevitas_model, create_qtorch_model\n'), ((1001, 1024), 'elasticai.creator.systemTests.brevitas_representation.models_definition.create_brevitas_model', 'create_brevitas_model', ([], {}), '()\n', (1022, 1024), False, 'from elasticai.creator.systemTests.brevitas_representation.models_definition import create_brevitas_model, create_qtorch_model\n'), ((1183, 1237), 'elasticai.creator.brevitas.brevitas_representation.BrevitasRepresentation.from_pytorch', 'BrevitasRepresentation.from_pytorch', (['self.qtorch_model'], {}), '(self.qtorch_model)\n', (1218, 1237), False, 'from elasticai.creator.brevitas.brevitas_representation import BrevitasRepresentation\n')] |
"""
get_offset determines the optimal p-site offset for each read-length on the top 10 most abundant ORFs in the bam-file
usage:
python get_offset.py --bam <bam-file> --orfs <ribofy orfs-file> --output <output-file>
By default, get_offset analyses reads between 25 and 35 nt,
but this is customizable with the --min_read_length and --max_read_length options
And change number of ORFs used in offset calculation by the --norfs option
"""
import pysam
import pandas as pd
import numpy as np
from collections import Counter
from .argparse2 import argparse2
from .get_phasing import get_phasing_stats
from .stats import get_2D_matrix
from .bam_utils import get_tid_info
# def agg_percentile(n, postfix = ""):
# def percentile_(x):
# return x.quantile(n)
# percentile_.__name__ = ('percentile_' + postfix).strip ("_")
# return percentile_
# def agg_table(ret = "key"): #key/value/pct
# def table_(x):
# ctr = Counter(x).most_common()
# if ret == "key":
# return ([k for (k,v) in ctr][0])
# elif ret == "value":
# return ([v for (k,v) in ctr][0])
# elif ret == "pct":
# return ([v/len(x) for (k,v) in ctr][0])
# table_.__name__ = f"table_{ret}"
# return table_
def agg_f (x, p_methods, percentile):
"""Aggregate function to summarize the offset data
"""
d = {}
for s in p_methods:
d["p_"+s] = x[s].quantile(percentile)
ctr = Counter(x['offset']).most_common()
d['offset_key'] = [k for (k,v) in ctr][0]
d['offset_value'] = [v for (k,v) in ctr][0]
d['offset_pct'] = [v/len(x) for (k,v) in ctr][0]
for c in ['frame0', 'frame1', 'frame2']:
d[c] = np.sum (x[c])
return (pd.Series(d, index=[i for i in d])) #, index=['a_sum', 'a_max', 'b_mean', 'c_d_prodsum'])
def get_offset (bamfiles, orfs, output="", norfs=20, min_read_length=25, max_read_length=35, percentile = .9, p_methods = ['glm'], full_output = ""):
"""
Main function: based on bamfiles and pre-established ORFs, the most likely offsets from 25-35 read lengths are infered based on the top20 expressed genes.
Parameters
-------
bamfiles: list (of str)
List of bamfiles to analyse
orfs: str
path/to/orfs (generated by ribofy orfs)
output: str
path/to/output file
norfs: int, default=20
Number of orfs to use in the offset analysis
min_read_length: int, default=25
Minimum read length
max_read_length: int, default=35
Maximum read length
percentile: From the norfs analysed use the percentile best to establish offset (this ensures that single outliers are not invalidating the offset results
p_methods: statistics used in evaluating offsets
"""
print ("### getting p-site offsets ###")
pd_orfs = pd.read_csv (orfs, sep="\t") if isinstance (orfs, str) else orfs
pd_annot = pd_orfs[(pd_orfs.orf_type == "annotated") & (pd_orfs.orf_length >= 500)] \
.groupby ("orf_group") \
.head (1)
pd_output = pd.DataFrame ()
pd_full = pd.DataFrame ()
for bamfile in bamfiles:
# get transcripts with most counts
print (f"infering offsets for bam ({bamfile})...")
# load bam
bam = pysam.Samfile (bamfile)
dtid2count, dtid2ref = get_tid_info (bamfile)
# add read counts to dataframe
pd_annot['total_reads'] = pd_annot['tid'].transform (lambda x: dtid2count[x] if x in dtid2count else 0)
pd_annot = pd_annot.sort_values ('total_reads', ascending=False)
# initialize count_offsets
length_range = range (min_read_length, max_read_length+1)
count_offsets = {}
for x in length_range:
count_offsets[x] = [0,0,0]
off_conv = {0:0, 1:2, 2:1}
offset_stats = []
for i, row in pd_annot.head (norfs).iterrows ():
tid, start, end = dtid2ref[row['tid']], int(row['start']), int(row['stop'])+3
orf_id = row['orf_id']
dcds = {}
for lr in length_range:
dcds[lr] = [0] * (end-start)
for read in bam.fetch (tid, start, end):
if read.is_reverse:
continue
init_offset_pos = read.pos + 12
read_length = read.infer_read_length ()
if read_length < min_read_length or read_length > max_read_length:
continue
if init_offset_pos >= start and init_offset_pos < end:
init_rel_pos = init_offset_pos - start
offset = off_conv[init_rel_pos % 3]
rel_pos = (12 + offset + read.pos) - start
if rel_pos % 3 != 0:
print ("something wrong with offset")
count_offsets[read_length][offset] += 1
if init_rel_pos >= 0 and init_rel_pos < len (dcds[read_length]):
dcds[read_length][init_rel_pos] += 1
# check phasing for each read length individually
for lr in length_range:
mat = get_2D_matrix (dcds[lr])
frame_sort = np.argsort(mat.sum(axis=0))[::-1]
# set the frame with most count as onframe (index 0)
mat = mat[:,frame_sort]
output_stats = get_phasing_stats (mat, p_methods)
output_stats['read_length'] = lr
output_stats['tid'] = tid
output_stats['orf_id'] = orf_id
output_stats['offset'] = 12 + off_conv[frame_sort[0]] # best offset
output_stats['frame0'] = np.sum (mat[:,0]) # sum of reads with onframe psites
output_stats['frame1'] = np.sum (mat[:,1])
output_stats['frame2'] = np.sum (mat[:,2])
offset_stats.append(output_stats)
pd_stats = pd.DataFrame (offset_stats)
if full_output != "":
pd_stats['bam'] = bamfile
pd_full = pd.concat ([pd_full, pd_stats])
# for each read-length aggregate the results for the analysed transcripts
pd_stats = pd_stats \
.dropna() \
.groupby ('read_length') \
.apply (agg_f, p_methods=p_methods, percentile=percentile)
pd_stats['bam'] = bamfile
pd_output = pd.concat ([pd_output, pd_stats])
print ("extracted offsets:")
print (pd_output[['bam', 'offset_key', 'offset_pct']])
# save to output
if output != "":
pd_output.to_csv (output, sep="\t")
if full_output != "":
pd_full.to_csv (full_output, sep="\t")
pd_output['read_length'] = pd_output.index
return (pd_output)
def ribofy_offset ():
parser = argparse2 (
description="",
usage="",
help=""
)
parser.add_argument('detect', nargs='?', help='') # dummy argument
parser._action_groups.pop()
parser.add_argument("--bam", dest='bam', nargs="+", required=True, help="bam file - sorted and indexed")
parser.add_argument("--orfs", dest='orfs', required=True, help="orfs - generated by get_ORFs.py")
parser.add_argument("--output", dest='output', default = "ribofy_offsets.txt", help="output")
#optional
parser.add_argument('--norfs', dest='norfs', default = 20, type = int, help="number of distinct orfs to build offsets")
parser.add_argument('--min_read_length', dest='min_read_length', default = 25, type = int, help="minimum read length used in analysis")
parser.add_argument('--max_read_length', dest='max_read_length', default = 35, type = int, help="maximum read length used in analysis")
parser.add_argument("--percentile", dest='percentile', default = 0.9, help="Percentile of consistent offset-determinants")
parser.add_argument("--p_methods", dest='p_methods', nargs="*", default = ["binom", "wilcox", "glm"], help="Statistics: possibilities: binom, wilcox, glm, and taper")
parser.add_argument("--full_output", dest='full_output', default = "", help="output")
args = parser.parse_args()
get_offset (args.bam, args.orfs,
output=args.output,
norfs=args.norfs,
min_read_length = args.min_read_length,
max_read_length = args.max_read_length,
percentile=args.percentile,
p_methods=args.p_methods,
full_output= args.full_output)
if __name__ == "__main__":
ribofy_offset () | [
"pandas.Series",
"pandas.DataFrame",
"pandas.read_csv",
"collections.Counter",
"numpy.sum",
"pysam.Samfile",
"pandas.concat"
] | [((1757, 1791), 'pandas.Series', 'pd.Series', (['d'], {'index': '[i for i in d]'}), '(d, index=[i for i in d])\n', (1766, 1791), True, 'import pandas as pd\n'), ((3114, 3128), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3126, 3128), True, 'import pandas as pd\n'), ((3144, 3158), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3156, 3158), True, 'import pandas as pd\n'), ((1727, 1739), 'numpy.sum', 'np.sum', (['x[c]'], {}), '(x[c])\n', (1733, 1739), True, 'import numpy as np\n'), ((2889, 2916), 'pandas.read_csv', 'pd.read_csv', (['orfs'], {'sep': '"""\t"""'}), "(orfs, sep='\\t')\n", (2900, 2916), True, 'import pandas as pd\n'), ((3331, 3353), 'pysam.Samfile', 'pysam.Samfile', (['bamfile'], {}), '(bamfile)\n', (3344, 3353), False, 'import pysam\n'), ((6150, 6176), 'pandas.DataFrame', 'pd.DataFrame', (['offset_stats'], {}), '(offset_stats)\n', (6162, 6176), True, 'import pandas as pd\n'), ((6632, 6664), 'pandas.concat', 'pd.concat', (['[pd_output, pd_stats]'], {}), '([pd_output, pd_stats])\n', (6641, 6664), True, 'import pandas as pd\n'), ((1479, 1499), 'collections.Counter', 'Counter', (["x['offset']"], {}), "(x['offset'])\n", (1486, 1499), False, 'from collections import Counter\n'), ((6269, 6299), 'pandas.concat', 'pd.concat', (['[pd_full, pd_stats]'], {}), '([pd_full, pd_stats])\n', (6278, 6299), True, 'import pandas as pd\n'), ((5905, 5922), 'numpy.sum', 'np.sum', (['mat[:, 0]'], {}), '(mat[:, 0])\n', (5911, 5922), True, 'import numpy as np\n'), ((6000, 6017), 'numpy.sum', 'np.sum', (['mat[:, 1]'], {}), '(mat[:, 1])\n', (6006, 6017), True, 'import numpy as np\n'), ((6059, 6076), 'numpy.sum', 'np.sum', (['mat[:, 2]'], {}), '(mat[:, 2])\n', (6065, 6076), True, 'import numpy as np\n')] |
from pyimagesearch import datasets
from pyimagesearch import models
from sklearn.model_selection import train_test_split
from keras.layers.core import Dense
from keras.models import Model
from keras.optimizers import Adam
from keras.layers import concatenate
import tensorflow as tf
from tensorflow import feature_column
import numpy as np
import argparse
import locale
import os
import cv2
#%%
#grab ROI for as input of predict model
import glob
import os
from os import walk
names = ["770L01.png","770L02.png","770R01.png","770R02.png","850L01.png","850L02.png","850R01.png","850R02.png","940L01.png","940L02.png","940R01.png","940R02.png"]
col = 2
r = 2
for sub in os.listdir(r"demo\data"):
path = r"demo\data"
save_path = r"demo\wrist"# a path for saving image
path = os.path.join(path, sub)
save_path = os.path.join(save_path, sub)
if not os.path.isfile(save_path):
os.makedirs(save_path)
# print(path)
cv_img = []
i = 0
a = 0
for img in os.listdir(path):
if os.path.join(path, img).endswith(".png"):
img = cv2.imread(os.path.join(path, img))
cv_img.append(img)
#Do histogram equalization
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #turn RGB into GRAY
hist,bins = np.histogram(gray.flatten(),256,[0,256])
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max()/ cdf.max()
cdf_m = np.ma.masked_equal(cdf,0)# 除去直方圖中的0值
cdf_m = (cdf_m - cdf_m.min())*255/(cdf_m.max()-cdf_m.min())
cdf = np.ma.filled(cdf_m,0).astype('uint8')# 將掩模處理掉的元素補為0
img2 = cdf[gray.astype(np.uint8)]
# blur_gray = cv2.GaussianBlur(img2, (101, 101), 0) # Gaussian filter, the kernel must be an odd number
ret,thresh1 = cv2.threshold(img2,200,255,cv2.THRESH_BINARY)
_, contours, hierarchy = cv2.findContours(thresh1,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
try: hierarchy = hierarchy[0]
except: hierarchy = []
height, width = thresh1.shape
min_x, min_y = width, height
max_x = max_y = 0
# computes the bounding box for the contour, and draws it on the frame,
for contour, hier in zip(contours, hierarchy):
(x,y,w,h) = cv2.boundingRect(contour)
min_x, max_x = min(x, min_x), max(x+w, max_x)
min_y, max_y = min(y, min_y), max(y+h, max_y)
if max_x - min_x > 0 and max_y - min_y > 0:
cv2.rectangle(img, (int(min_x*1.1), int(min_y*1.0)), (int(max_x*0.95), int(max_y*0.9)), (255, 0, 0), 2) # 畫出適當ROI
x_range = int(max_x*0.95) - int(min_x*1.1)
if int(max_y*0.9) - (int(min_y) + x_range) < abs(int(min_x*1.1) - int(max_x*0.95))/5:
add = int(max_y*0.9) - int(min_y) - abs(int(min_x*1.1) - int(max_x*0.95))/3
rect =img2 [(int(min_y) + int(add)):int(max_y*0.9), int(min_x*1.1):int(max_x*0.95)]
else:
rect =img2 [(int(min_y) + x_range):int(max_y*0.9), int(min_x*1.1):int(max_x*0.95)]
cv2.imwrite(os.path.join(save_path, "{}".format(names[a])),rect)
a += 1
if a == 12:
a = 0
if col <= 7 :
ws.cell (row = r, column = col).value = rect.mean()
col += 1
else :
col = 2
r += 1
ws.cell (row = r, column = col).value = rect.mean()
col += 1
#%%
df = datasets.load_data(r'C:\Users\User\Desktop\Peter\Bone_density\demo\demo.xlsx')
df.dropna(subset = ["Name"], inplace=True)
df = df.reset_index(drop=True)
df.pop("Name")
images_Left = datasets.load_wrist_images(df,r'C:\Users\User\Desktop\Peter\Bone_density\demo\wrist',left_right = "Left")
#%%
feature_columns = []
feature_layer_inputs = {}
sex = feature_column.categorical_column_with_vocabulary_list(
'Sex', ['male', 'female'])
sex_one_hot = feature_column.indicator_column(sex)
feature_columns.append(sex_one_hot)
feature_layer_inputs['Sex'] = tf.keras.Input(shape=(1,), name='Sex', dtype=tf.string)
age = feature_column.numeric_column("Age")
age_buckets = feature_column.bucketized_column(age, boundaries=[20, 30, 40, 50, 60, 70])
feature_columns.append(age_buckets)
# demo(age_buckets)
Menopause = feature_column.categorical_column_with_vocabulary_list(
'Menopause', ['not suit', 'yes', 'no'])
Menopause_embedding = feature_column.embedding_column(Menopause, dimension=6)
feature_columns.append(Menopause_embedding)
feature_layer_inputs['Menopause'] = tf.keras.Input(shape=(1,), name='Menopause', dtype=tf.string)
# demo(Menopause_embedding)
Bone_injured = feature_column.categorical_column_with_vocabulary_list(
'Bone_injured', ['yes', 'no'])
Bone_injured_one_hot = feature_column.indicator_column(Bone_injured)
feature_columns.append(Bone_injured_one_hot)
# Bone_injured_embedding = feature_column.embedding_column(Bone_injured, dimension=8)
feature_layer_inputs['Bone_injured'] = tf.keras.Input(shape=(1,), name='Bone_injured', dtype=tf.string)
# demo(Bone_injured_one_hot)
#%%
test_data = []
first = True
for feature in feature_columns:
feature_layer = tf.keras.layers.DenseFeatures(feature)
feature_array = feature_layer(dict(df)).numpy()
if first:
test_data=feature_array
first = False
continue
test_data = np.concatenate((test_data, feature_array), axis=1)
print(feature_layer(dict(df)).numpy())
#%%
import keras
from keras.layers import LeakyReLU
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
mlp = models.create_mlp(np.asarray(test_data).shape[1], regress=True)
cnn_left = models.create_cnn(256, 128, 6, regress=False)
# cnn_right = models.create_cnn(256, 128, 6, regress=False)
# create the input to our final set of layers as the *output* of both
# the MLP and CNN
# combinedInput = concatenate([mlp.output, cnn_left.output, cnn_right.output])
combinedInput = concatenate([mlp.output, cnn_left.output])
# our final FC layer head will have two dense layers, the final one
# being our regression head
x = Dense(8, activation=LeakyReLU(alpha=0.2))(combinedInput)
x = Dense(4, activation=LeakyReLU(alpha=0.2))(x)
x = Dense(1)(x)
# our final model will accept categorical/numerical data on the MLP
# model = Model(inputs=[mlp.input, cnn_left.input, cnn_right.input], outputs=x)
my_model = Model(inputs=[mlp.input, cnn_left.input], outputs=x)
my_model.load_weights('Radius_UD_L.h5')
my_model.summary()
#%%
predictions = my_model.predict([test_data, images_Left])
print(predictions)
| [
"numpy.ma.masked_equal",
"tensorflow.feature_column.indicator_column",
"pyimagesearch.datasets.load_data",
"os.listdir",
"tensorflow.keras.layers.DenseFeatures",
"cv2.threshold",
"pyimagesearch.datasets.load_wrist_images",
"numpy.asarray",
"tensorflow.feature_column.numeric_column",
"numpy.ma.fill... | [((670, 694), 'os.listdir', 'os.listdir', (['"""demo\\\\data"""'], {}), "('demo\\\\data')\n", (680, 694), False, 'import os\n'), ((3732, 3821), 'pyimagesearch.datasets.load_data', 'datasets.load_data', (['"""C:\\\\Users\\\\User\\\\Desktop\\\\Peter\\\\Bone_density\\\\demo\\\\demo.xlsx"""'], {}), "(\n 'C:\\\\Users\\\\User\\\\Desktop\\\\Peter\\\\Bone_density\\\\demo\\\\demo.xlsx')\n", (3750, 3821), False, 'from pyimagesearch import datasets\n'), ((3915, 4034), 'pyimagesearch.datasets.load_wrist_images', 'datasets.load_wrist_images', (['df', '"""C:\\\\Users\\\\User\\\\Desktop\\\\Peter\\\\Bone_density\\\\demo\\\\wrist"""'], {'left_right': '"""Left"""'}), "(df,\n 'C:\\\\Users\\\\User\\\\Desktop\\\\Peter\\\\Bone_density\\\\demo\\\\wrist',\n left_right='Left')\n", (3941, 4034), False, 'from pyimagesearch import datasets\n'), ((4082, 4167), 'tensorflow.feature_column.categorical_column_with_vocabulary_list', 'feature_column.categorical_column_with_vocabulary_list', (['"""Sex"""', "['male', 'female']"], {}), "('Sex', ['male',\n 'female'])\n", (4136, 4167), False, 'from tensorflow import feature_column\n'), ((4185, 4221), 'tensorflow.feature_column.indicator_column', 'feature_column.indicator_column', (['sex'], {}), '(sex)\n', (4216, 4221), False, 'from tensorflow import feature_column\n'), ((4288, 4343), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(1,)', 'name': '"""Sex"""', 'dtype': 'tf.string'}), "(shape=(1,), name='Sex', dtype=tf.string)\n", (4302, 4343), True, 'import tensorflow as tf\n'), ((4354, 4390), 'tensorflow.feature_column.numeric_column', 'feature_column.numeric_column', (['"""Age"""'], {}), "('Age')\n", (4383, 4390), False, 'from tensorflow import feature_column\n'), ((4405, 4479), 'tensorflow.feature_column.bucketized_column', 'feature_column.bucketized_column', (['age'], {'boundaries': '[20, 30, 40, 50, 60, 70]'}), '(age, boundaries=[20, 30, 40, 50, 60, 70])\n', (4437, 4479), False, 'from tensorflow import feature_column\n'), ((4549, 4648), 'tensorflow.feature_column.categorical_column_with_vocabulary_list', 'feature_column.categorical_column_with_vocabulary_list', (['"""Menopause"""', "['not suit', 'yes', 'no']"], {}), "('Menopause', [\n 'not suit', 'yes', 'no'])\n", (4603, 4648), False, 'from tensorflow import feature_column\n'), ((4673, 4728), 'tensorflow.feature_column.embedding_column', 'feature_column.embedding_column', (['Menopause'], {'dimension': '(6)'}), '(Menopause, dimension=6)\n', (4704, 4728), False, 'from tensorflow import feature_column\n'), ((4809, 4870), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(1,)', 'name': '"""Menopause"""', 'dtype': 'tf.string'}), "(shape=(1,), name='Menopause', dtype=tf.string)\n", (4823, 4870), True, 'import tensorflow as tf\n'), ((4916, 5006), 'tensorflow.feature_column.categorical_column_with_vocabulary_list', 'feature_column.categorical_column_with_vocabulary_list', (['"""Bone_injured"""', "['yes', 'no']"], {}), "('Bone_injured', [\n 'yes', 'no'])\n", (4970, 5006), False, 'from tensorflow import feature_column\n'), ((5032, 5077), 'tensorflow.feature_column.indicator_column', 'feature_column.indicator_column', (['Bone_injured'], {}), '(Bone_injured)\n', (5063, 5077), False, 'from tensorflow import feature_column\n'), ((5248, 5312), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(1,)', 'name': '"""Bone_injured"""', 'dtype': 'tf.string'}), "(shape=(1,), name='Bone_injured', dtype=tf.string)\n", (5262, 5312), True, 'import tensorflow as tf\n'), ((5913, 5958), 'pyimagesearch.models.create_cnn', 'models.create_cnn', (['(256)', '(128)', '(6)'], {'regress': '(False)'}), '(256, 128, 6, regress=False)\n', (5930, 5958), False, 'from pyimagesearch import models\n'), ((6205, 6247), 'keras.layers.concatenate', 'concatenate', (['[mlp.output, cnn_left.output]'], {}), '([mlp.output, cnn_left.output])\n', (6216, 6247), False, 'from keras.layers import concatenate\n'), ((6633, 6685), 'keras.models.Model', 'Model', ([], {'inputs': '[mlp.input, cnn_left.input]', 'outputs': 'x'}), '(inputs=[mlp.input, cnn_left.input], outputs=x)\n', (6638, 6685), False, 'from keras.models import Model\n'), ((787, 810), 'os.path.join', 'os.path.join', (['path', 'sub'], {}), '(path, sub)\n', (799, 810), False, 'import os\n'), ((827, 855), 'os.path.join', 'os.path.join', (['save_path', 'sub'], {}), '(save_path, sub)\n', (839, 855), False, 'import os\n'), ((995, 1011), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1005, 1011), False, 'import os\n'), ((5427, 5465), 'tensorflow.keras.layers.DenseFeatures', 'tf.keras.layers.DenseFeatures', (['feature'], {}), '(feature)\n', (5456, 5465), True, 'import tensorflow as tf\n'), ((5619, 5669), 'numpy.concatenate', 'np.concatenate', (['(test_data, feature_array)'], {'axis': '(1)'}), '((test_data, feature_array), axis=1)\n', (5633, 5669), True, 'import numpy as np\n'), ((6460, 6468), 'keras.layers.core.Dense', 'Dense', (['(1)'], {}), '(1)\n', (6465, 6468), False, 'from keras.layers.core import Dense\n'), ((867, 892), 'os.path.isfile', 'os.path.isfile', (['save_path'], {}), '(save_path)\n', (881, 892), False, 'import os\n'), ((902, 924), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (913, 924), False, 'import os\n'), ((1257, 1294), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1269, 1294), False, 'import cv2\n'), ((1490, 1516), 'numpy.ma.masked_equal', 'np.ma.masked_equal', (['cdf', '(0)'], {}), '(cdf, 0)\n', (1508, 1516), True, 'import numpy as np\n'), ((1866, 1914), 'cv2.threshold', 'cv2.threshold', (['img2', '(200)', '(255)', 'cv2.THRESH_BINARY'], {}), '(img2, 200, 255, cv2.THRESH_BINARY)\n', (1879, 1914), False, 'import cv2\n'), ((1958, 2027), 'cv2.findContours', 'cv2.findContours', (['thresh1', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (1974, 2027), False, 'import cv2\n'), ((5856, 5877), 'numpy.asarray', 'np.asarray', (['test_data'], {}), '(test_data)\n', (5866, 5877), True, 'import numpy as np\n'), ((6370, 6390), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (6379, 6390), False, 'from keras.layers import LeakyReLU\n'), ((6431, 6451), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (6440, 6451), False, 'from keras.layers import LeakyReLU\n'), ((1024, 1047), 'os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (1036, 1047), False, 'import os\n'), ((1095, 1118), 'os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (1107, 1118), False, 'import os\n'), ((2414, 2439), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (2430, 2439), False, 'import cv2\n'), ((1617, 1639), 'numpy.ma.filled', 'np.ma.filled', (['cdf_m', '(0)'], {}), '(cdf_m, 0)\n', (1629, 1639), True, 'import numpy as np\n')] |
import numpy as np
import os
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# has to change whenever noise_width and noise_height change in the PerlinNoise.hpp file
DIMENSION1 = 200
DIMENSION2 = 200
# works if the working directory is set
path = os.path.dirname(os.path.realpath(__file__))
FILENAME = path + "\input0.txt"
if __name__ == '__main__':
string = open(FILENAME, '+r')
noise = np.fromstring(string.read(), sep=" ", dtype=float).reshape(DIMENSION2, DIMENSION1)
# Build a grid by the 2 dimensions
Xr = np.arange(DIMENSION1)
Yr = np.arange(DIMENSION2)
X, Y = np.meshgrid(Xr, Yr)
# Build a figure with 2 subplots, the first is 3D
fig = plt.figure()
fig.suptitle("3D and 2D heighmap")
colormap = 'coolwarm'
ax = fig.add_subplot(2, 1, 1, projection='3d')
surf = ax.plot_surface(X, Y, noise, rstride=1, cstride=1, cmap=colormap, linewidth=0, antialiased=False)
ax2 = fig.add_subplot(2, 1, 2)
im = ax2.imshow(noise, cmap=colormap, interpolation='nearest')
# swap the Y axis so it aligns with the 3D plot
ax2.invert_yaxis()
# add an explanatory colour bar
plt.colorbar(im, orientation='horizontal')
# Show the image
plt.show()
| [
"matplotlib.pyplot.colorbar",
"os.path.realpath",
"matplotlib.pyplot.figure",
"numpy.meshgrid",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((294, 320), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (310, 320), False, 'import os\n'), ((560, 581), 'numpy.arange', 'np.arange', (['DIMENSION1'], {}), '(DIMENSION1)\n', (569, 581), True, 'import numpy as np\n'), ((591, 612), 'numpy.arange', 'np.arange', (['DIMENSION2'], {}), '(DIMENSION2)\n', (600, 612), True, 'import numpy as np\n'), ((624, 643), 'numpy.meshgrid', 'np.meshgrid', (['Xr', 'Yr'], {}), '(Xr, Yr)\n', (635, 643), True, 'import numpy as np\n'), ((709, 721), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (719, 721), True, 'from matplotlib import pyplot as plt\n'), ((1166, 1208), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'orientation': '"""horizontal"""'}), "(im, orientation='horizontal')\n", (1178, 1208), True, 'from matplotlib import pyplot as plt\n'), ((1235, 1245), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1243, 1245), True, 'from matplotlib import pyplot as plt\n')] |
# import numpy as np
# import os
# import skimage.io as io
# import skimage.transform as trans
# import numpy as np
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
import tensorflow as tf
import numpy as np
from skimage.morphology import label
from tensorflow.keras import backend as k
from tensorflow import keras
def iou_metric(y_true_in, y_pred_in, print_table=False):
labels = label(y_true_in > 0.5)
y_pred = label(y_pred_in > 0.5)
true_objects = len(np.unique(labels))
pred_objects = len(np.unique(y_pred))
intersection = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0]
# Compute areas (needed for finding the union between all objects)
area_true = np.histogram(labels, bins=true_objects)[0]
area_pred = np.histogram(y_pred, bins=pred_objects)[0]
area_true = np.expand_dims(area_true, -1)
area_pred = np.expand_dims(area_pred, 0)
# Compute union
union = area_true + area_pred - intersection
# Exclude background from the analysis
intersection = intersection[1:, 1:]
union = union[1:, 1:]
union[union == 0] = 1e-9
# Compute the intersection over union
iou = intersection / union
# Precision helper function
def precision_at(threshold, iou):
matches = iou > threshold
true_positives = np.sum(matches, axis=1) == 1 # Correct objects
false_positives = np.sum(matches, axis=0) == 0 # Missed objects
false_negatives = np.sum(matches, axis=1) == 0 # Extra objects
tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)
return tp, fp, fn
# Loop over IoU thresholds
prec = []
if print_table:
print("Thresh\tTP\tFP\tFN\tPrec.")
for t in np.arange(0.5, 1.0, 0.05):
tp, fp, fn = precision_at(t, iou)
if (tp + fp + fn) > 0:
p = tp / (tp + fp + fn)
else:
p = 0
if print_table:
print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p))
prec.append(p)
if print_table:
print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec)))
return np.mean(prec)
def iou_metric_batch(y_true_in, y_pred_in):
batch_size = y_true_in.shape[0]
metric = []
for batch in range(batch_size):
value = iou_metric(y_true_in[batch], y_pred_in[batch])
metric.append(value)
return np.array(np.mean(metric), dtype=np.float32)
def my_iou_metric(label, pred):
metric_value = tf.py_func(iou_metric_batch, [label, pred], tf.float32)
return metric_value
def unet(pretrained_weights = False,input_size = (224,224,3,)):
inputs = Input(input_size)
conv1_1 = Conv2D(8, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(8, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1_1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2_1 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2_1)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3_1 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3_1)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4_1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4_1)
# drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5_1 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5_1)
# drop5 = Dropout(0.5)(conv5)
up6 = Conv2DTranspose(64, 2, strides=(2, 2), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
merge6 = concatenate([up6,conv4], axis = 3)
conv6_1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6_1)
up7 = Conv2DTranspose(32, 2, strides=(2, 2), activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
# up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
merge7 = concatenate([conv3,up7], axis = 3)
conv7_1 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7_1)
up8 = Conv2DTranspose(16, 2,strides=(2, 2), activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
merge8 = concatenate([conv2, up8], axis=3)
conv8_1 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8_1)
up9 = Conv2DTranspose(16, 2, strides=(2, 2), activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
merge9 = concatenate([conv1,up9], axis = 3)
conv9_1 = Conv2D(8, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(8, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9_1)
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = Model(inputs, conv10)
model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy',my_iou_metric])
#model.summary()
if(pretrained_weights):
model.load_weights('log1/only_weights.h5')
return model
| [
"numpy.mean",
"numpy.histogram",
"numpy.unique",
"skimage.morphology.label",
"numpy.sum",
"numpy.expand_dims",
"tensorflow.py_func",
"numpy.arange"
] | [((458, 480), 'skimage.morphology.label', 'label', (['(y_true_in > 0.5)'], {}), '(y_true_in > 0.5)\n', (463, 480), False, 'from skimage.morphology import label\n'), ((494, 516), 'skimage.morphology.label', 'label', (['(y_pred_in > 0.5)'], {}), '(y_pred_in > 0.5)\n', (499, 516), False, 'from skimage.morphology import label\n'), ((917, 946), 'numpy.expand_dims', 'np.expand_dims', (['area_true', '(-1)'], {}), '(area_true, -1)\n', (931, 946), True, 'import numpy as np\n'), ((963, 991), 'numpy.expand_dims', 'np.expand_dims', (['area_pred', '(0)'], {}), '(area_pred, 0)\n', (977, 991), True, 'import numpy as np\n'), ((1840, 1865), 'numpy.arange', 'np.arange', (['(0.5)', '(1.0)', '(0.05)'], {}), '(0.5, 1.0, 0.05)\n', (1849, 1865), True, 'import numpy as np\n'), ((2222, 2235), 'numpy.mean', 'np.mean', (['prec'], {}), '(prec)\n', (2229, 2235), True, 'import numpy as np\n'), ((2570, 2625), 'tensorflow.py_func', 'tf.py_func', (['iou_metric_batch', '[label, pred]', 'tf.float32'], {}), '(iou_metric_batch, [label, pred], tf.float32)\n', (2580, 2625), True, 'import tensorflow as tf\n'), ((541, 558), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (550, 558), True, 'import numpy as np\n'), ((583, 600), 'numpy.unique', 'np.unique', (['y_pred'], {}), '(y_pred)\n', (592, 600), True, 'import numpy as np\n'), ((799, 838), 'numpy.histogram', 'np.histogram', (['labels'], {'bins': 'true_objects'}), '(labels, bins=true_objects)\n', (811, 838), True, 'import numpy as np\n'), ((858, 897), 'numpy.histogram', 'np.histogram', (['y_pred'], {'bins': 'pred_objects'}), '(y_pred, bins=pred_objects)\n', (870, 897), True, 'import numpy as np\n'), ((2482, 2497), 'numpy.mean', 'np.mean', (['metric'], {}), '(metric)\n', (2489, 2497), True, 'import numpy as np\n'), ((1405, 1428), 'numpy.sum', 'np.sum', (['matches'], {'axis': '(1)'}), '(matches, axis=1)\n', (1411, 1428), True, 'import numpy as np\n'), ((1479, 1502), 'numpy.sum', 'np.sum', (['matches'], {'axis': '(0)'}), '(matches, axis=0)\n', (1485, 1502), True, 'import numpy as np\n'), ((1552, 1575), 'numpy.sum', 'np.sum', (['matches'], {'axis': '(1)'}), '(matches, axis=1)\n', (1558, 1575), True, 'import numpy as np\n'), ((1619, 1641), 'numpy.sum', 'np.sum', (['true_positives'], {}), '(true_positives)\n', (1625, 1641), True, 'import numpy as np\n'), ((1643, 1666), 'numpy.sum', 'np.sum', (['false_positives'], {}), '(false_positives)\n', (1649, 1666), True, 'import numpy as np\n'), ((1668, 1691), 'numpy.sum', 'np.sum', (['false_negatives'], {}), '(false_negatives)\n', (1674, 1691), True, 'import numpy as np\n'), ((2195, 2208), 'numpy.mean', 'np.mean', (['prec'], {}), '(prec)\n', (2202, 2208), True, 'import numpy as np\n')] |
from random import randint as rand
import matplotlib.pyplot as plt
import numpy as np
from math import factorial
import pandas as pd
#creating a variable for number of coins
global coins
global trial
#taking input from the user
coins = int(input("enter number of coins:"))
trial = int(input("enter the number of trials:"))
val = []
counts = {}
def coin_toss():
output = rand(0,1)
val.append(output)
def tosser():
for i in range(coins):
coin_toss()
def counter():
global val
val = np.array(val)
value = val.sum()
if value in counts:
counts[value] += 1
else:
counts.update({value : 1})
val = []
theor_freq =[]
def theorotical(N,n):
#hello
for r in range(n):
theor_freq.append( (N* factorial(n)) / ( (factorial(n-r) * factorial(r) ) * (2**n) ))
def start():
global trial
for i in range(trial):
tosser()
counter()
theorotical(trial,coins)
start()
l = list(counts.items())
l.sort()
counts = dict(l)
data = {"Number of Heads":counts.keys(),
"freaquency": counts.values()}
df = pd.DataFrame(data)
print(df)
#plotting graph
x = counts.keys()
y = counts.values()
#graph variables defining
x_thear = [i for i in range(coins)]
y_thear = theor_freq
#print(x_thear,y_thear)
k = np.array([str(x),str(y)])
#print(k)
data_th = {"Theoretical Number of Heads":x_thear,"Theoretical freaquency": y_thear}
df_th = pd.DataFrame(data_th)
print("Theoretical Random Distribution")
print(df_th)
plt.xlabel("values")
plt.ylabel("freaquency")
plt.plot(x,y)
plt.plot(x_thear,y_thear)
plt.legend(['Generated Random distribution','Theoretical Random distribution'], loc = 'lower right')
plt.show()
| [
"matplotlib.pyplot.ylabel",
"math.factorial",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"pandas.DataFrame",
"random.randint",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((1161, 1179), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (1173, 1179), True, 'import pandas as pd\n'), ((1509, 1530), 'pandas.DataFrame', 'pd.DataFrame', (['data_th'], {}), '(data_th)\n', (1521, 1530), True, 'import pandas as pd\n'), ((1590, 1610), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""values"""'], {}), "('values')\n", (1600, 1610), True, 'import matplotlib.pyplot as plt\n'), ((1612, 1636), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""freaquency"""'], {}), "('freaquency')\n", (1622, 1636), True, 'import matplotlib.pyplot as plt\n'), ((1640, 1654), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (1648, 1654), True, 'import matplotlib.pyplot as plt\n'), ((1657, 1683), 'matplotlib.pyplot.plot', 'plt.plot', (['x_thear', 'y_thear'], {}), '(x_thear, y_thear)\n', (1665, 1683), True, 'import matplotlib.pyplot as plt\n'), ((1686, 1789), 'matplotlib.pyplot.legend', 'plt.legend', (["['Generated Random distribution', 'Theoretical Random distribution']"], {'loc': '"""lower right"""'}), "(['Generated Random distribution',\n 'Theoretical Random distribution'], loc='lower right')\n", (1696, 1789), True, 'import matplotlib.pyplot as plt\n'), ((1790, 1800), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1798, 1800), True, 'import matplotlib.pyplot as plt\n'), ((402, 412), 'random.randint', 'rand', (['(0)', '(1)'], {}), '(0, 1)\n', (406, 412), True, 'from random import randint as rand\n'), ((545, 558), 'numpy.array', 'np.array', (['val'], {}), '(val)\n', (553, 558), True, 'import numpy as np\n'), ((809, 821), 'math.factorial', 'factorial', (['n'], {}), '(n)\n', (818, 821), False, 'from math import factorial\n'), ((829, 845), 'math.factorial', 'factorial', (['(n - r)'], {}), '(n - r)\n', (838, 845), False, 'from math import factorial\n'), ((846, 858), 'math.factorial', 'factorial', (['r'], {}), '(r)\n', (855, 858), False, 'from math import factorial\n')] |
import numpy as np
from sklearn.cluster import DBSCAN
from faster_particles.ppn_utils import crop as crop_util
from faster_particles.display_utils import extract_voxels
class CroppingAlgorithm(object):
"""
Base class for any cropping algorithm, they should inherit from it
and implement crop method (see below)
"""
def __init__(self, cfg, debug=False):
self.cfg = cfg
self.d = cfg.SLICE_SIZE # Patch or box/crop size
self.a = cfg.CORE_SIZE # Core size
self.N = cfg.IMAGE_SIZE
self._debug = debug
def crop(self, coords):
"""
coords is expected to be dimensions (None, 3) = list of non-zero voxels
Returns a list of patches centers and sizes (of cubes centered at the
patch centers)
"""
pass
def process(self, original_blob):
# FIXME cfg.SLICE_SIZE vs patch_size
patch_centers, patch_sizes = self.crop(original_blob['voxels'])
return self.extract(patch_centers, patch_sizes, original_blob)
def extract(self, patch_centers, patch_sizes, original_blob):
batch_blobs = []
for i in range(len(patch_centers)):
patch_center, patch_size = patch_centers[i], patch_sizes[i]
blob = {}
# Flip patch_center coordinates
# because gt_pixels coordinates are reversed
# FIXME here or before blob['data'] ??
patch_center = np.flipud(patch_center)
blob['data'], _ = crop_util(np.array([patch_center]),
self.cfg.SLICE_SIZE,
original_blob['data'], return_labels=False)
patch_center = patch_center.astype(int)
# print(patch_center, original_blob['data'][0, patch_center[0], patch_center[1], patch_center[2], 0], np.count_nonzero(blob['data']))
# assert np.count_nonzero(blob['data']) > 0
if 'labels' in original_blob:
blob['labels'], _ = crop_util(np.array([patch_center]),
self.cfg.SLICE_SIZE,
original_blob['labels'][..., np.newaxis], return_labels=False)
blob['labels'] = blob['labels'][..., 0]
# print(np.nonzero(blob['data']))
# print(np.nonzero(blob['labels']))
# assert np.array_equal(np.nonzero(blob['data']), np.nonzero(blob['labels']))
if 'weight' in original_blob:
blob['weight'], _ = crop_util(np.array([patch_center]),
self.cfg.SLICE_SIZE,
original_blob['weight'][..., np.newaxis], return_labels=False)
blob['weight'][blob['weight'] == 0.0] = 0.1
blob['weight'] = blob['weight'][..., 0]
# Select gt pixels
if 'gt_pixels' in original_blob:
indices = np.where(np.all(np.logical_and(
original_blob['gt_pixels'][:, :-1] >= patch_center - patch_size/2.0,
original_blob['gt_pixels'][:, :-1] < patch_center + patch_size/2.0), axis=1))
blob['gt_pixels'] = original_blob['gt_pixels'][indices]
blob['gt_pixels'][:, :-1] = blob['gt_pixels'][:, :-1] - (patch_center - patch_size / 2.0)
# Add artificial gt pixels
artificial_gt_pixels = self.add_gt_pixels(original_blob, blob, patch_center, self.cfg.SLICE_SIZE)
if artificial_gt_pixels.shape[0]:
blob['gt_pixels'] = np.concatenate([blob['gt_pixels'], artificial_gt_pixels], axis=0)
# Select voxels
# Flip patch_center coordinates back to normal
patch_center = np.flipud(patch_center)
if 'voxels' in original_blob:
voxels = original_blob['voxels']
blob['voxels'] = voxels[np.all(np.logical_and(
voxels >= patch_center - patch_size / 2.0,
voxels < patch_center + patch_size / 2.0), axis=1)]
blob['voxels'] = blob['voxels'] - (patch_center - patch_size / 2.0)
blob['entries'] = original_blob['entries']
# Crops for small UResNet
if self.cfg.NET == 'small_uresnet':
blob['crops'], blob['crops_labels'] = crop_util(
blob['gt_pixels'][:, :-1],
self.cfg.CROP_SIZE, blob['data'])
# FIXME FIXME FIXME
# Make sure there is at least one ground truth pixel in the patch (for training)
if self.cfg.NET not in ['ppn', 'ppn_ext', 'full'] or len(blob['gt_pixels']) > 0:
batch_blobs.append(blob)
return batch_blobs, patch_centers, patch_sizes
def compute_overlap(self, coords, patch_centers, sizes=None):
"""
Compute overlap dict: dict[x] gives the number of voxels which belong
to x patches.
"""
if sizes is None:
sizes = self.d/2.0
overlap = []
for voxel in coords:
overlap.append(np.sum(np.all(np.logical_and(
patch_centers-sizes <= voxel,
patch_centers + sizes >= voxel
), axis=1)))
return dict(zip(*np.unique(overlap, return_counts=True)))
def add_gt_pixels(self, original_blob, blob, patch_center, patch_size):
"""
Add artificial pixels after cropping
"""
# Case 1: crop boundaries is intersecting with data
nonzero_idx = np.array(np.where(blob['data'][0, ..., 0] > 0.0)).T # N x 3
border_idx = nonzero_idx[np.any(np.logical_or(nonzero_idx == 0, nonzero_idx == self.cfg.IMAGE_SIZE - 1), axis=1)]
# Case 2: crop is partially outside of original data (thus padded)
# if patch_center is within patch_size of boundaries of original blob
# boundary intesecting with data
padded_idx = nonzero_idx[np.any(np.logical_or(nonzero_idx + patch_center - patch_size / 2.0 >= self.cfg.IMAGE_SIZE - 2, nonzero_idx + patch_center - patch_size / 2.0 <= 1), axis=1)]
# dbscan on all found voxels from case 1 and 2
coords = np.concatenate([border_idx, padded_idx], axis=0)
artificial_gt_pixels = []
if coords.shape[0]:
db = DBSCAN(eps=10, min_samples=3).fit_predict(coords)
for v in np.unique(db):
cluster = coords[db == v]
artificial_gt_pixels.append(cluster[np.argmax(blob['data'][0, ..., 0][cluster.T[0], cluster.T[1], cluster.T[2]]), :])
artificial_gt_pixels = np.concatenate([artificial_gt_pixels, np.ones((len(artificial_gt_pixels), 1))], axis=1)
return np.array(artificial_gt_pixels)
def reconcile(self, batch_results, patch_centers, patch_sizes):
"""
Reconcile slices result together
using batch_results, batch_blobs, patch_centers and patch_sizes
"""
final_results = {}
if len(batch_results) == 0: # Empty batch
return final_results
# UResNet predictions
if 'predictions' and 'scores' and 'softmax' in batch_results[0]:
final_voxels = np.array([], dtype=np.int32).reshape(0, 3) # Shape N_voxels x dim
final_scores = np.array([], dtype=np.float32).reshape(0, self.cfg.NUM_CLASSES) # Shape N_voxels x num_classes
final_counts = np.array([], dtype=np.int32).reshape(0,) # Shape N_voxels x 1
for i, result in enumerate(batch_results):
# Extract voxel and voxel values
# Shape N_voxels x dim
v, values = extract_voxels(result['predictions'])
# Extract corresponding softmax scores
# Shape N_voxels x num_classes
scores = result['softmax'][v[:, 0], v[:, 1], v[:, 2], :]
# Restore original blob coordinates
v = (v + np.flipud(patch_centers[i]) - patch_sizes[i] / 2.0).astype(np.int64)
v = np.clip(v, 0, self.cfg.IMAGE_SIZE-1)
# indices are indices of the *first* occurrences of the unique values
# hence for doublons they are indices in final_voxels
# We assume the only overlap that can occur is between
# final_voxels and v, not inside these arrays themselves
n = final_voxels.shape[0]
final_voxels, indices, counts = np.unique(np.concatenate([final_voxels, v], axis=0), axis=0, return_index=True, return_counts=True)
final_scores = np.concatenate([final_scores, scores], axis=0)[indices]
lower_indices = indices[indices < n]
upper_indices = indices[indices >= n]
final_counts[lower_indices] += counts[lower_indices] - 1
final_counts = np.concatenate([final_counts, np.ones((upper_indices.shape[0],))], axis=0)
final_scores = final_scores / final_counts[:, np.newaxis] # Compute average
final_predictions = np.argmax(final_scores, axis=1)
final_results['predictions'] = np.zeros((self.cfg.IMAGE_SIZE,) * 3)
final_results['predictions'][final_voxels.T[0], final_voxels.T[1], final_voxels.T[2]] = final_predictions
final_results['scores'] = np.zeros((self.cfg.IMAGE_SIZE,) * 3)
final_results['scores'][final_voxels.T[0], final_voxels.T[1], final_voxels.T[2]] = final_scores[np.arange(final_scores.shape[0]), final_predictions]
final_results['softmax'] = np.zeros((self.cfg.IMAGE_SIZE,) * 3 + (self.cfg.NUM_CLASSES,))
final_results['softmax'][final_voxels.T[0], final_voxels.T[1], final_voxels.T[2], :] = final_scores
final_results['predictions'] = final_results['predictions'][np.newaxis, ...]
# PPN
if 'im_proposals' and 'im_scores' and 'im_labels' and 'rois' in batch_results[0]:
# print(batch_results[0]['im_proposals'].shape, batch_results[0]['im_scores'].shape, batch_results[0]['im_labels'].shape, batch_results[0]['rois'].shape)
final_im_proposals = np.array([], dtype=np.float32).reshape(0, 3)
final_im_scores = np.array([], dtype=np.float32).reshape(0,)
final_im_labels = np.array([], dtype=np.int32).reshape(0,)
final_rois = np.array([], dtype=np.float32).reshape(0, 3)
for i, result in enumerate(batch_results):
im_proposals = result['im_proposals'] + np.flipud(patch_centers[i]) - patch_sizes[i] / 2.0
im_proposals = np.clip(im_proposals, 0, self.cfg.IMAGE_SIZE-1)
# print(final_im_proposals, im_proposals)
final_im_proposals = np.concatenate([final_im_proposals, im_proposals], axis=0)
final_im_scores = np.concatenate([final_im_scores, result['im_scores']], axis=0)
final_im_labels = np.concatenate([final_im_labels, result['im_labels']], axis=0)
rois = result['rois'] + (np.flipud(patch_centers[i]) - patch_sizes[i] / 2.0) / (self.cfg.dim1 * self.cfg.dim2)
rois = np.clip(rois, 0, self.cfg.IMAGE_SIZE-1)
final_rois = np.concatenate([final_rois, rois], axis=0)
final_results['im_proposals'] = np.array(final_im_proposals)
final_results['im_scores'] = np.array(final_im_scores)
final_results['im_labels'] = np.array(final_im_labels)
final_results['rois'] = np.array(final_rois)
# Try thresholding
# index = np.where(final_results['im_scores'] > 1e-3)
# final_results['im_proposals'] = final_results['im_proposals'][index, :]
# final_results['im_scores'] = final_results['im_scores'][index]
# final_results['im_labels'] = final_results['im_labels'][index]
return final_results
| [
"numpy.clip",
"numpy.unique",
"faster_particles.ppn_utils.crop",
"numpy.flipud",
"numpy.where",
"numpy.arange",
"numpy.logical_and",
"numpy.ones",
"numpy.argmax",
"numpy.logical_or",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"faster_particles.display_utils.extract_voxels",
"skle... | [((6234, 6282), 'numpy.concatenate', 'np.concatenate', (['[border_idx, padded_idx]'], {'axis': '(0)'}), '([border_idx, padded_idx], axis=0)\n', (6248, 6282), True, 'import numpy as np\n'), ((6764, 6794), 'numpy.array', 'np.array', (['artificial_gt_pixels'], {}), '(artificial_gt_pixels)\n', (6772, 6794), True, 'import numpy as np\n'), ((1445, 1468), 'numpy.flipud', 'np.flipud', (['patch_center'], {}), '(patch_center)\n', (1454, 1468), True, 'import numpy as np\n'), ((3799, 3822), 'numpy.flipud', 'np.flipud', (['patch_center'], {}), '(patch_center)\n', (3808, 3822), True, 'import numpy as np\n'), ((6433, 6446), 'numpy.unique', 'np.unique', (['db'], {}), '(db)\n', (6442, 6446), True, 'import numpy as np\n'), ((9096, 9127), 'numpy.argmax', 'np.argmax', (['final_scores'], {'axis': '(1)'}), '(final_scores, axis=1)\n', (9105, 9127), True, 'import numpy as np\n'), ((9171, 9207), 'numpy.zeros', 'np.zeros', (['((self.cfg.IMAGE_SIZE,) * 3)'], {}), '((self.cfg.IMAGE_SIZE,) * 3)\n', (9179, 9207), True, 'import numpy as np\n'), ((9364, 9400), 'numpy.zeros', 'np.zeros', (['((self.cfg.IMAGE_SIZE,) * 3)'], {}), '((self.cfg.IMAGE_SIZE,) * 3)\n', (9372, 9400), True, 'import numpy as np\n'), ((9601, 9663), 'numpy.zeros', 'np.zeros', (['((self.cfg.IMAGE_SIZE,) * 3 + (self.cfg.NUM_CLASSES,))'], {}), '((self.cfg.IMAGE_SIZE,) * 3 + (self.cfg.NUM_CLASSES,))\n', (9609, 9663), True, 'import numpy as np\n'), ((11323, 11351), 'numpy.array', 'np.array', (['final_im_proposals'], {}), '(final_im_proposals)\n', (11331, 11351), True, 'import numpy as np\n'), ((11393, 11418), 'numpy.array', 'np.array', (['final_im_scores'], {}), '(final_im_scores)\n', (11401, 11418), True, 'import numpy as np\n'), ((11460, 11485), 'numpy.array', 'np.array', (['final_im_labels'], {}), '(final_im_labels)\n', (11468, 11485), True, 'import numpy as np\n'), ((11522, 11542), 'numpy.array', 'np.array', (['final_rois'], {}), '(final_rois)\n', (11530, 11542), True, 'import numpy as np\n'), ((1510, 1534), 'numpy.array', 'np.array', (['[patch_center]'], {}), '([patch_center])\n', (1518, 1534), True, 'import numpy as np\n'), ((4396, 4466), 'faster_particles.ppn_utils.crop', 'crop_util', (["blob['gt_pixels'][:, :-1]", 'self.cfg.CROP_SIZE', "blob['data']"], {}), "(blob['gt_pixels'][:, :-1], self.cfg.CROP_SIZE, blob['data'])\n", (4405, 4466), True, 'from faster_particles.ppn_utils import crop as crop_util\n'), ((5603, 5642), 'numpy.where', 'np.where', (["(blob['data'][0, ..., 0] > 0.0)"], {}), "(blob['data'][0, ..., 0] > 0.0)\n", (5611, 5642), True, 'import numpy as np\n'), ((5695, 5766), 'numpy.logical_or', 'np.logical_or', (['(nonzero_idx == 0)', '(nonzero_idx == self.cfg.IMAGE_SIZE - 1)'], {}), '(nonzero_idx == 0, nonzero_idx == self.cfg.IMAGE_SIZE - 1)\n', (5708, 5766), True, 'import numpy as np\n'), ((6012, 6156), 'numpy.logical_or', 'np.logical_or', (['(nonzero_idx + patch_center - patch_size / 2.0 >= self.cfg.IMAGE_SIZE - 2)', '(nonzero_idx + patch_center - patch_size / 2.0 <= 1)'], {}), '(nonzero_idx + patch_center - patch_size / 2.0 >= self.cfg.\n IMAGE_SIZE - 2, nonzero_idx + patch_center - patch_size / 2.0 <= 1)\n', (6025, 6156), True, 'import numpy as np\n'), ((7694, 7731), 'faster_particles.display_utils.extract_voxels', 'extract_voxels', (["result['predictions']"], {}), "(result['predictions'])\n", (7708, 7731), False, 'from faster_particles.display_utils import extract_voxels\n'), ((8073, 8111), 'numpy.clip', 'np.clip', (['v', '(0)', '(self.cfg.IMAGE_SIZE - 1)'], {}), '(v, 0, self.cfg.IMAGE_SIZE - 1)\n', (8080, 8111), True, 'import numpy as np\n'), ((10621, 10670), 'numpy.clip', 'np.clip', (['im_proposals', '(0)', '(self.cfg.IMAGE_SIZE - 1)'], {}), '(im_proposals, 0, self.cfg.IMAGE_SIZE - 1)\n', (10628, 10670), True, 'import numpy as np\n'), ((10764, 10822), 'numpy.concatenate', 'np.concatenate', (['[final_im_proposals, im_proposals]'], {'axis': '(0)'}), '([final_im_proposals, im_proposals], axis=0)\n', (10778, 10822), True, 'import numpy as np\n'), ((10857, 10919), 'numpy.concatenate', 'np.concatenate', (["[final_im_scores, result['im_scores']]"], {'axis': '(0)'}), "([final_im_scores, result['im_scores']], axis=0)\n", (10871, 10919), True, 'import numpy as np\n'), ((10954, 11016), 'numpy.concatenate', 'np.concatenate', (["[final_im_labels, result['im_labels']]"], {'axis': '(0)'}), "([final_im_labels, result['im_labels']], axis=0)\n", (10968, 11016), True, 'import numpy as np\n'), ((11167, 11208), 'numpy.clip', 'np.clip', (['rois', '(0)', '(self.cfg.IMAGE_SIZE - 1)'], {}), '(rois, 0, self.cfg.IMAGE_SIZE - 1)\n', (11174, 11208), True, 'import numpy as np\n'), ((11236, 11278), 'numpy.concatenate', 'np.concatenate', (['[final_rois, rois]'], {'axis': '(0)'}), '([final_rois, rois], axis=0)\n', (11250, 11278), True, 'import numpy as np\n'), ((2023, 2047), 'numpy.array', 'np.array', (['[patch_center]'], {}), '([patch_center])\n', (2031, 2047), True, 'import numpy as np\n'), ((2553, 2577), 'numpy.array', 'np.array', (['[patch_center]'], {}), '([patch_center])\n', (2561, 2577), True, 'import numpy as np\n'), ((3619, 3684), 'numpy.concatenate', 'np.concatenate', (["[blob['gt_pixels'], artificial_gt_pixels]"], {'axis': '(0)'}), "([blob['gt_pixels'], artificial_gt_pixels], axis=0)\n", (3633, 3684), True, 'import numpy as np\n'), ((5325, 5363), 'numpy.unique', 'np.unique', (['overlap'], {'return_counts': '(True)'}), '(overlap, return_counts=True)\n', (5334, 5363), True, 'import numpy as np\n'), ((6362, 6391), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': '(10)', 'min_samples': '(3)'}), '(eps=10, min_samples=3)\n', (6368, 6391), False, 'from sklearn.cluster import DBSCAN\n'), ((7243, 7271), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (7251, 7271), True, 'import numpy as np\n'), ((7337, 7367), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float32'}), '([], dtype=np.float32)\n', (7345, 7367), True, 'import numpy as np\n'), ((7460, 7488), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (7468, 7488), True, 'import numpy as np\n'), ((8511, 8552), 'numpy.concatenate', 'np.concatenate', (['[final_voxels, v]'], {'axis': '(0)'}), '([final_voxels, v], axis=0)\n', (8525, 8552), True, 'import numpy as np\n'), ((8632, 8678), 'numpy.concatenate', 'np.concatenate', (['[final_scores, scores]'], {'axis': '(0)'}), '([final_scores, scores], axis=0)\n', (8646, 8678), True, 'import numpy as np\n'), ((9509, 9541), 'numpy.arange', 'np.arange', (['final_scores.shape[0]'], {}), '(final_scores.shape[0])\n', (9518, 9541), True, 'import numpy as np\n'), ((10169, 10199), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float32'}), '([], dtype=np.float32)\n', (10177, 10199), True, 'import numpy as np\n'), ((10244, 10274), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float32'}), '([], dtype=np.float32)\n', (10252, 10274), True, 'import numpy as np\n'), ((10317, 10345), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (10325, 10345), True, 'import numpy as np\n'), ((10383, 10413), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float32'}), '([], dtype=np.float32)\n', (10391, 10413), True, 'import numpy as np\n'), ((2991, 3156), 'numpy.logical_and', 'np.logical_and', (["(original_blob['gt_pixels'][:, :-1] >= patch_center - patch_size / 2.0)", "(original_blob['gt_pixels'][:, :-1] < patch_center + patch_size / 2.0)"], {}), "(original_blob['gt_pixels'][:, :-1] >= patch_center - \n patch_size / 2.0, original_blob['gt_pixels'][:, :-1] < patch_center + \n patch_size / 2.0)\n", (3005, 3156), True, 'import numpy as np\n'), ((3961, 4065), 'numpy.logical_and', 'np.logical_and', (['(voxels >= patch_center - patch_size / 2.0)', '(voxels < patch_center + patch_size / 2.0)'], {}), '(voxels >= patch_center - patch_size / 2.0, voxels < \n patch_center + patch_size / 2.0)\n', (3975, 4065), True, 'import numpy as np\n'), ((5162, 5240), 'numpy.logical_and', 'np.logical_and', (['(patch_centers - sizes <= voxel)', '(patch_centers + sizes >= voxel)'], {}), '(patch_centers - sizes <= voxel, patch_centers + sizes >= voxel)\n', (5176, 5240), True, 'import numpy as np\n'), ((8929, 8963), 'numpy.ones', 'np.ones', (['(upper_indices.shape[0],)'], {}), '((upper_indices.shape[0],))\n', (8936, 8963), True, 'import numpy as np\n'), ((10539, 10566), 'numpy.flipud', 'np.flipud', (['patch_centers[i]'], {}), '(patch_centers[i])\n', (10548, 10566), True, 'import numpy as np\n'), ((6542, 6618), 'numpy.argmax', 'np.argmax', (["blob['data'][0, ..., 0][cluster.T[0], cluster.T[1], cluster.T[2]]"], {}), "(blob['data'][0, ..., 0][cluster.T[0], cluster.T[1], cluster.T[2]])\n", (6551, 6618), True, 'import numpy as np\n'), ((11058, 11085), 'numpy.flipud', 'np.flipud', (['patch_centers[i]'], {}), '(patch_centers[i])\n', (11067, 11085), True, 'import numpy as np\n'), ((7984, 8011), 'numpy.flipud', 'np.flipud', (['patch_centers[i]'], {}), '(patch_centers[i])\n', (7993, 8011), True, 'import numpy as np\n')] |
import jax.numpy as jnp
import numpy as np
import matplotlib.pyplot as plt
import jax
from jax.experimental.optimizers import adam
from dataclasses import dataclass
@dataclass
class InterceptSettings:
min_duration: float = 0.02
min_duration_final: float = 0.05
duration: float = 0
distance: float = 1.0
fix_initial_angle: bool = True
n_segments: int = 2
n_steps: int = 20_000
n_runs: int = 50
lr: float = 5e-3
attack_bearing: float = 0
attack_position_angle: float = np.pi/2
fix_attack_side: bool = False
def get_rot_matrix(phi):
s,c = jnp.sin(phi), jnp.cos(phi)
return jnp.array([[c,-s],[s,c]])
def get_duration(duration_param):
return jnp.exp(duration_param)
def move(params, state, speed):
angle = params[0]
duration = get_duration(params[1])
x, y, old_angle, time = state
dx = jnp.cos(angle) * speed * duration
dy = jnp.sin(angle) * speed * duration
return jnp.array([x + dx, y + dy, angle, time + duration])
def calc_route(params, initial_state, speed):
waypoints = [initial_state]
state = initial_state
for p in params:
state = move(p, state, speed)
waypoints.append(state)
return jnp.stack(waypoints)
def softabs(x, scale=0.1):
return jnp.sqrt(x**2 + scale**2)
def loss_func(route, target_pos, target_angle, target_speed, cfg: InterceptSettings):
final_waypoint = route[-1]
pos = final_waypoint[:2]
angle = final_waypoint[2]
durations = jnp.diff(route[:, 3])
total_duration = route[-1, 3] - route[0, 3]
e_target = jnp.array([jnp.cos(target_angle), jnp.sin(target_angle)])
final_target_pos = target_pos + e_target * target_speed * total_duration
delta_r = final_target_pos - pos
distance = jnp.linalg.norm(delta_r)
e_interceptor = jnp.array([jnp.cos(angle), jnp.sin(angle)])
e_delta_r = delta_r / (distance + 1e-6)
loss_dist = softabs(distance - cfg.distance)
if cfg.fix_attack_side:
loss_attack_position = 1 - jnp.dot(e_delta_r, get_rot_matrix(cfg.attack_position_angle) @ e_target)
else:
loss_attack_position = 1 - jnp.dot(e_delta_r, get_rot_matrix(cfg.attack_position_angle) @ e_target)**2
loss_attack_bearing = 1 - jnp.dot(e_delta_r, get_rot_matrix(-cfg.attack_bearing) @ e_interceptor)
loss_total_duration = softabs(total_duration - cfg.duration, 0.5)
loss_final_run_duration = jax.nn.sigmoid(-(durations[-1] / (0.25 * cfg.min_duration_final)))
loss_durations = jnp.sum(jax.nn.sigmoid(-(durations / (0.25 * cfg.min_duration))))
return (loss_dist +
loss_attack_position +
loss_attack_bearing +
0.05 * loss_total_duration +
loss_final_run_duration +
loss_durations
)
calc_route_batch = jax.vmap(calc_route, in_axes=[0, None, None], out_axes=0)
loss_batch = jax.vmap(loss_func, in_axes=[0, None, None, None, None], out_axes=0)
def build_value_and_grad(initial_state, speed, target_pos, target_angle, target_speed, cfg):
def loss_from_param(param):
route = calc_route(param, initial_state, speed)
return loss_func(route, target_pos, target_angle, target_speed, cfg)
return jax.vmap(jax.value_and_grad(loss_from_param))
def plot_routes(routes, losses, initial_target_pos, target_angle, target_speed, show_legend=False, axis=None):
axis = axis or plt.gca()
e_target = np.array([np.cos(target_angle), np.sin(target_angle)])
arrow_size = 0.2
colors = [f'C{i}' for i in range(10)]
for i, (route, l) in enumerate(zip(routes, losses)):
color = colors[i % len(colors)]
time = route[-1][3]
target_pos = initial_target_pos + e_target * time * target_speed
axis.plot(route[:, 0], route[:, 1], label=f"t={time:.2f}, loss={l:.5f}", color=color, marker='o', ms=2)
axis.arrow(*(target_pos - e_target * 0.5 * arrow_size), *(e_target * arrow_size), head_width=0.2, color=color)
axis.arrow(*(initial_target_pos - e_target * 0.5 * arrow_size), *(e_target * arrow_size), head_width=0.2, color='k')
if show_legend:
axis.legend()
axis.grid(alpha=0.5)
axis.set_aspect('equal', adjustable='box')
def route_to_list_of_dicts(route, speed, target_pos, target_angle, target_speed):
keys = ['x', 'y', 'angle', 't']
output = [{k: float(v) for k, v in zip(keys, waypoint)} for waypoint in route]
for i, wp in enumerate(output):
if i < len(output) - 1:
wp['new_angle'] = output[i+1]['angle']
else:
wp['new_angle'] = wp['angle']
wp['duration'] = wp['t'] - route[0][3]
wp['target_x'] = target_pos[0] + np.cos(target_angle) * target_speed * wp['duration']
wp['target_y'] = target_pos[1] + np.sin(target_angle) * target_speed * wp['duration']
dx, dy = wp['target_x'] - wp['x'], wp['target_y'] - wp['y']
wp['target_dist'] = np.sqrt(dx**2 + dy**2)
rel_angle = np.arctan2(wp['target_y'] - wp['y'], wp['target_x'] - wp['x']) - wp['angle']
wp['target_bearing'] = rel_angle % (2 * np.pi)
wp['speed'] = speed
output[i] = {k: float(v) for k,v in wp.items()}
return output
def calculate_intercept(initial_pos, initial_angle, initial_speed, target_pos, target_angle, target_speed, initial_time, cfg: InterceptSettings):
initial_pos = np.array(initial_pos)
target_pos = np.array(target_pos)
initial_state = jnp.array([initial_pos[0], initial_pos[1], initial_angle, initial_time])
target_params = (np.array(target_pos), target_angle, target_speed)
initial_params = np.stack([np.random.uniform(0, 2 * np.pi, [cfg.n_runs, cfg.n_segments]),
np.random.uniform(-1, 1, [cfg.n_runs, cfg.n_segments])], axis=-1)
grad_mask = np.ones_like(initial_params)
opt_init, opt_update, opt_get_params = adam(lambda t: cfg.lr / (1+t/5000))
val_grad_func = build_value_and_grad(initial_state, initial_speed, *target_params, cfg)
if cfg.fix_initial_angle:
initial_params[..., 0, 0] = initial_angle
grad_mask[..., 0, 0] = 0
@jax.jit
def opt_step(step_nr, _opt_state):
p = opt_get_params(_opt_state)
_losses, g = val_grad_func(p)
g = g * grad_mask
return opt_update(step_nr, g, _opt_state), _losses
opt_state = opt_init(initial_params)
all_losses = []
for n in range(cfg.n_steps):
opt_state, epoch_losses = opt_step(n, opt_state)
all_losses.append(epoch_losses)
params = opt_get_params(opt_state)
routes = calc_route_batch(params, initial_state, initial_speed)
losses = loss_batch(routes, *target_params, cfg)
ind_best = np.nanargmin(losses)
route = routes[ind_best]
loss = losses[ind_best]
route_list = route_to_list_of_dicts(route, initial_speed, *target_params)
return dict(route=route_list,
loss=float(loss)), (routes, losses, all_losses)
if __name__ == '__main__':
# initial_target_pos = [6, 2]
initial_target_pos = np.random.uniform(-5,5,2)
target_angle = np.random.uniform(0, 2 * np.pi)
# target_angle = 3.0
target_speed = 1.0
cfg = InterceptSettings(fix_initial_angle=False, n_runs=6, n_segments=2,
attack_position_angle=-np.pi/2)
intercept, (routes, losses, all_losses) = calculate_intercept(initial_pos=[0, 0],
initial_angle=0,
initial_speed=1.5,
target_pos=initial_target_pos,
target_angle=target_angle,
target_speed=target_speed,
initial_time=0,
cfg=cfg)
plt.close("all")
fig, (ax_map, ax_loss) = plt.subplots(1,2)
plot_routes(routes, losses, initial_target_pos, target_angle, target_speed, False, axis=ax_map)
all_losses = np.array(all_losses)
ax_loss.plot(all_losses)
ax_loss.set_ylim([0,1])
| [
"numpy.sqrt",
"numpy.array",
"numpy.arctan2",
"numpy.sin",
"numpy.nanargmin",
"jax.numpy.sin",
"jax.experimental.optimizers.adam",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"jax.value_and_grad",
"jax.numpy.cos",
"matplotlib.pyplot.gca",
"jax.numpy.linalg.norm",
"numpy.cos",
... | [((2796, 2853), 'jax.vmap', 'jax.vmap', (['calc_route'], {'in_axes': '[0, None, None]', 'out_axes': '(0)'}), '(calc_route, in_axes=[0, None, None], out_axes=0)\n', (2804, 2853), False, 'import jax\n'), ((2867, 2935), 'jax.vmap', 'jax.vmap', (['loss_func'], {'in_axes': '[0, None, None, None, None]', 'out_axes': '(0)'}), '(loss_func, in_axes=[0, None, None, None, None], out_axes=0)\n', (2875, 2935), False, 'import jax\n'), ((629, 657), 'jax.numpy.array', 'jnp.array', (['[[c, -s], [s, c]]'], {}), '([[c, -s], [s, c]])\n', (638, 657), True, 'import jax.numpy as jnp\n'), ((702, 725), 'jax.numpy.exp', 'jnp.exp', (['duration_param'], {}), '(duration_param)\n', (709, 725), True, 'import jax.numpy as jnp\n'), ((951, 1002), 'jax.numpy.array', 'jnp.array', (['[x + dx, y + dy, angle, time + duration]'], {}), '([x + dx, y + dy, angle, time + duration])\n', (960, 1002), True, 'import jax.numpy as jnp\n'), ((1211, 1231), 'jax.numpy.stack', 'jnp.stack', (['waypoints'], {}), '(waypoints)\n', (1220, 1231), True, 'import jax.numpy as jnp\n'), ((1272, 1301), 'jax.numpy.sqrt', 'jnp.sqrt', (['(x ** 2 + scale ** 2)'], {}), '(x ** 2 + scale ** 2)\n', (1280, 1301), True, 'import jax.numpy as jnp\n'), ((1492, 1513), 'jax.numpy.diff', 'jnp.diff', (['route[:, 3]'], {}), '(route[:, 3])\n', (1500, 1513), True, 'import jax.numpy as jnp\n'), ((1766, 1790), 'jax.numpy.linalg.norm', 'jnp.linalg.norm', (['delta_r'], {}), '(delta_r)\n', (1781, 1790), True, 'import jax.numpy as jnp\n'), ((2408, 2474), 'jax.nn.sigmoid', 'jax.nn.sigmoid', (['(-(durations[-1] / (0.25 * cfg.min_duration_final)))'], {}), '(-(durations[-1] / (0.25 * cfg.min_duration_final)))\n', (2422, 2474), False, 'import jax\n'), ((5346, 5367), 'numpy.array', 'np.array', (['initial_pos'], {}), '(initial_pos)\n', (5354, 5367), True, 'import numpy as np\n'), ((5385, 5405), 'numpy.array', 'np.array', (['target_pos'], {}), '(target_pos)\n', (5393, 5405), True, 'import numpy as np\n'), ((5426, 5498), 'jax.numpy.array', 'jnp.array', (['[initial_pos[0], initial_pos[1], initial_angle, initial_time]'], {}), '([initial_pos[0], initial_pos[1], initial_angle, initial_time])\n', (5435, 5498), True, 'import jax.numpy as jnp\n'), ((5778, 5806), 'numpy.ones_like', 'np.ones_like', (['initial_params'], {}), '(initial_params)\n', (5790, 5806), True, 'import numpy as np\n'), ((5851, 5890), 'jax.experimental.optimizers.adam', 'adam', (['(lambda t: cfg.lr / (1 + t / 5000))'], {}), '(lambda t: cfg.lr / (1 + t / 5000))\n', (5855, 5890), False, 'from jax.experimental.optimizers import adam\n'), ((6677, 6697), 'numpy.nanargmin', 'np.nanargmin', (['losses'], {}), '(losses)\n', (6689, 6697), True, 'import numpy as np\n'), ((7020, 7047), 'numpy.random.uniform', 'np.random.uniform', (['(-5)', '(5)', '(2)'], {}), '(-5, 5, 2)\n', (7037, 7047), True, 'import numpy as np\n'), ((7065, 7096), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)'], {}), '(0, 2 * np.pi)\n', (7082, 7096), True, 'import numpy as np\n'), ((7967, 7983), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (7976, 7983), True, 'import matplotlib.pyplot as plt\n'), ((8013, 8031), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (8025, 8031), True, 'import matplotlib.pyplot as plt\n'), ((8148, 8168), 'numpy.array', 'np.array', (['all_losses'], {}), '(all_losses)\n', (8156, 8168), True, 'import numpy as np\n'), ((591, 603), 'jax.numpy.sin', 'jnp.sin', (['phi'], {}), '(phi)\n', (598, 603), True, 'import jax.numpy as jnp\n'), ((605, 617), 'jax.numpy.cos', 'jnp.cos', (['phi'], {}), '(phi)\n', (612, 617), True, 'import jax.numpy as jnp\n'), ((2504, 2560), 'jax.nn.sigmoid', 'jax.nn.sigmoid', (['(-(durations / (0.25 * cfg.min_duration)))'], {}), '(-(durations / (0.25 * cfg.min_duration)))\n', (2518, 2560), False, 'import jax\n'), ((3217, 3252), 'jax.value_and_grad', 'jax.value_and_grad', (['loss_from_param'], {}), '(loss_from_param)\n', (3235, 3252), False, 'import jax\n'), ((3386, 3395), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3393, 3395), True, 'import matplotlib.pyplot as plt\n'), ((4903, 4929), 'numpy.sqrt', 'np.sqrt', (['(dx ** 2 + dy ** 2)'], {}), '(dx ** 2 + dy ** 2)\n', (4910, 4929), True, 'import numpy as np\n'), ((5520, 5540), 'numpy.array', 'np.array', (['target_pos'], {}), '(target_pos)\n', (5528, 5540), True, 'import numpy as np\n'), ((863, 877), 'jax.numpy.cos', 'jnp.cos', (['angle'], {}), '(angle)\n', (870, 877), True, 'import jax.numpy as jnp\n'), ((906, 920), 'jax.numpy.sin', 'jnp.sin', (['angle'], {}), '(angle)\n', (913, 920), True, 'import jax.numpy as jnp\n'), ((1589, 1610), 'jax.numpy.cos', 'jnp.cos', (['target_angle'], {}), '(target_angle)\n', (1596, 1610), True, 'import jax.numpy as jnp\n'), ((1612, 1633), 'jax.numpy.sin', 'jnp.sin', (['target_angle'], {}), '(target_angle)\n', (1619, 1633), True, 'import jax.numpy as jnp\n'), ((1822, 1836), 'jax.numpy.cos', 'jnp.cos', (['angle'], {}), '(angle)\n', (1829, 1836), True, 'import jax.numpy as jnp\n'), ((1838, 1852), 'jax.numpy.sin', 'jnp.sin', (['angle'], {}), '(angle)\n', (1845, 1852), True, 'import jax.numpy as jnp\n'), ((3421, 3441), 'numpy.cos', 'np.cos', (['target_angle'], {}), '(target_angle)\n', (3427, 3441), True, 'import numpy as np\n'), ((3443, 3463), 'numpy.sin', 'np.sin', (['target_angle'], {}), '(target_angle)\n', (3449, 3463), True, 'import numpy as np\n'), ((4946, 5008), 'numpy.arctan2', 'np.arctan2', (["(wp['target_y'] - wp['y'])", "(wp['target_x'] - wp['x'])"], {}), "(wp['target_y'] - wp['y'], wp['target_x'] - wp['x'])\n", (4956, 5008), True, 'import numpy as np\n'), ((5602, 5663), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)', '[cfg.n_runs, cfg.n_segments]'], {}), '(0, 2 * np.pi, [cfg.n_runs, cfg.n_segments])\n', (5619, 5663), True, 'import numpy as np\n'), ((5696, 5750), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '[cfg.n_runs, cfg.n_segments]'], {}), '(-1, 1, [cfg.n_runs, cfg.n_segments])\n', (5713, 5750), True, 'import numpy as np\n'), ((4660, 4680), 'numpy.cos', 'np.cos', (['target_angle'], {}), '(target_angle)\n', (4666, 4680), True, 'import numpy as np\n'), ((4754, 4774), 'numpy.sin', 'np.sin', (['target_angle'], {}), '(target_angle)\n', (4760, 4774), True, 'import numpy as np\n')] |
# May 2018 xyz
import numpy as np
import numba
def Rx( x ):
# ref to my master notes 2015
# anticlockwise, x: radian
Rx = np.zeros((3,3))
Rx[0,0] = 1
Rx[1,1] = np.cos(x)
Rx[1,2] = np.sin(x)
Rx[2,1] = -np.sin(x)
Rx[2,2] = np.cos(x)
return Rx
def Ry( y ):
# anticlockwise, y: radian
Ry = np.zeros((3,3))
Ry[0,0] = np.cos(y)
Ry[0,2] = -np.sin(y)
Ry[1,1] = 1
Ry[2,0] = np.sin(y)
Ry[2,2] = np.cos(y)
return Ry
@numba.jit(nopython=True)
def Rz( z ):
# anticlockwise, z: radian
Rz = np.zeros((3,3))
Rz[0,0] = np.cos(z)
Rz[0,1] = np.sin(z)
Rz[1,0] = -np.sin(z)
Rz[1,1] = np.cos(z)
Rz[2,2] = 1
return Rz
def R1D( angle, axis ):
if axis == 'x':
return Rx(angle)
elif axis == 'y':
return Ry(angle)
elif axis == 'z':
return Rz(angle)
else:
raise NotImplementedError
def EulerRotate( angles, order ='zxy' ):
R = np.eye(3)
for i in range(3):
R_i = R1D(angles[i], order[i])
R = np.matmul( R_i, R )
return R
def point_rotation_randomly( points, rxyz_max=np.pi*np.array([0.1,0.1,0.1]) ):
# Input:
# points: (B, N, 3)
# rx/y/z: in radians
# Output:
# points: (B, N, 3)
batch_size = points.shape[0]
for b in range(batch_size):
rxyz = [ np.random.uniform(-r_max, r_max) for r_max in rxyz_max ]
R = EulerRotate( rxyz, 'xyz' )
points[b,:,:] = np.matmul( points[b,:,:], np.transpose(R) )
return points
def angle_with_x(direc, scope_id=0):
if direc.ndim == 2:
x = np.array([[1.0,0]])
if direc.ndim == 3:
x = np.array([[1.0,0,0]])
x = np.tile(x, [direc.shape[0],1])
return angle_of_2lines(direc, x, scope_id)
def angle_of_2lines(line0, line1, scope_id=0):
'''
line0: [n,2/3]
line1: [n,2/3]
zero as ref
scope_id=0: [0,pi]
1: (-pi/2, pi/2]
angle: [n]
'''
assert line0.ndim == line1.ndim == 2
assert (line0.shape[0] == line1.shape[0]) or line0.shape[0]==1 or line1.shape[0]==1
assert line0.shape[1] == line1.shape[1] # 2 or 3
norm0 = np.linalg.norm(line0, axis=1, keepdims=True)
norm1 = np.linalg.norm(line1, axis=1, keepdims=True)
#assert norm0.min() > 1e-4 and norm1.min() > 1e-4 # return nan
line0 = line0 / norm0
line1 = line1 / norm1
angle = np.arccos( np.sum(line0 * line1, axis=1) )
if scope_id == 0:
pass
elif scope_id == 1:
# (-pi/2, pi/2]: offset=0.5, period=pi
angle = limit_period(angle, 0.5, np.pi)
else:
raise NotImplementedError
return angle
def limit_period(val, offset, period):
'''
[0, pi]: offset=0, period=pi
[-pi/2, pi/2]: offset=0.5, period=pi
[-pi, 0]: offset=1, period=pi
[0, pi/2]: offset=0, period=pi/2
[-pi/4, pi/4]: offset=0.5, period=pi/2
[-pi/2, 0]: offset=1, period=pi/2
'''
return val - np.floor(val / period + offset) * period
def ave_angles(angles0, angles1, scope_id=0):
'''
angles0: any shape
angles1: same as angles0
scope_id = 0: [-pi/2, pi/2]
scope_id = 1: [0, pi]
scope_id defines the scope of both input angles and averaged.
period = np.pi
make the angle between the average and both are below half period
out: [n]
'''
assert angles0.shape == angles1.shape
period = np.pi
dif = angles1 - angles0
mask = np.abs(dif) > period * 0.5
angles1 += - period * mask * np.sign(dif)
ave = (angles0 + angles1) / 2.0
if scope_id==0:
ave = limit_period(ave, 0.5, period)
elif scope_id==1:
ave = limit_period(ave, 0, period)
else:
raise NotImplementedError
return ave
def vertical_dis_points_lines(points, lines):
'''
points:[n,3]
lines:[m,2,3]
dis: [n,m]
'''
dis = []
pn = points.shape[0]
for i in range(pn):
dis.append( vertical_dis_1point_lines(points[i], lines).reshape([1,-1]) )
dis = np.concatenate(dis, 0)
return dis
def vertical_dis_1point_lines(point, lines):
'''
point:[3]
lines:[m,2,3]
dis: [m]
'''
assert point.ndim == 1
assert lines.ndim == 3
assert lines.shape[1:] == (2,3) or lines.shape[1:] == (2,2)
# use lines[:,0,:] as ref
point = point.reshape([1,-1])
ln = lines.shape[0]
direc_p = point - lines[:,0,:]
direc_l = lines[:,1,:] - lines[:,0,:]
angles = angle_of_2lines(direc_p, direc_l, scope_id=0)
dis = np.sin(angles) * np.linalg.norm(direc_p, axis=1)
mask = np.isnan(dis)
dis[mask] = 0
return dis
def cam2world_pcl(points):
R = np.eye(points.shape[-1])
R[1,1] = R[2,2] = 0
R[1,2] = 1
R[2,1] = -1
points = np.matmul(points, R)
return points
def cam2world_box(box):
assert box.shape[1] == 7
R = np.eye(7)
R[1,1] = R[2,2] = 0
R[1,2] = 1
R[2,1] = -1
R[4,4] = R[5,5] = 0
R[4,5] = 1
R[5,4] = 1
R[6,6] = 1
box = np.matmul(box, R)
return box
def lines_intersection_2d(line0s, line1s, must_on0=False, must_on1=False,
min_angle=0):
'''
line0s: [n,2,2]
line1s: [m,2,2]
return [n,m,2,2]
'''
shape0 = line0s.shape
shape1 = line1s.shape
if shape0[0] * shape1[0] == 0:
return np.empty([shape0[0], shape1[0], 2, 2])
assert len(shape0) == len(shape1) == 3
assert shape0[1:] == shape1[1:] == (2,2)
ints_all = []
for line0 in line0s:
ints_0 = []
for line1 in line1s:
ints = line_intersection_2d(line0, line1, must_on0, must_on1, min_angle)
ints_0.append(ints.reshape(1,1,2))
ints_0 = np.concatenate(ints_0, 1)
ints_all.append(ints_0)
ints_all = np.concatenate(ints_all, 0)
return ints_all
def line_intersection_2d(line0, line1, must_on0=False, must_on1=False,
min_angle=0):
'''
line0: [2,2]
line1: [2,2]
must_on0: must on the scope of line0, no extend
must_on1: must on the scope of line1, no extend
out: [2]
v01 = p1 - p0
v23 = p3 - p2
intersection = p0 + v01*k0 = p2 + v23 * k1
[v01, v23][k0;-k1] = p2 - p0
intersection between p0 and p1: 1>=k0>=0
intersection between p2 and p3: 1>=k1>=0
return [2]
'''
assert (line0.shape == (2,2) and line1.shape == (2,2))
#(line0.shape == (2,3) and line1.shape == (2,3))
dim = line0.shape[1]
p0,p1 = line0
p2,p3 = line1
v01 = p1-p0
v23 = p3-p2
v01v23 = np.concatenate([v01.reshape([2,1]), (-1)*v23.reshape([2,1])], 1)
p2sp0 = (p2-p0).reshape([2,1])
try:
inv_vov1 = np.linalg.inv(v01v23)
K = np.matmul(inv_vov1, p2sp0)
if must_on0 and (K[0]>1 or K[0]<0):
return np.array([np.nan]*2)
if must_on1 and (K[1]>1 or K[1]<0):
return np.array([np.nan]*2)
intersec = p0 + v01 * K[0]
intersec_ = p2 + v23 * K[1]
assert np.linalg.norm(intersec - intersec_) < 1e-5, f'{intersec} \n{intersec_}'
direc0 = (line0[1] - line0[0]).reshape([1,2])
direc1 = (line1[1] - line1[0]).reshape([1,2])
angle = angle_of_2lines(direc0, direc1, scope_id=1)[0]
angle = np.abs(angle)
show = False
if show and DEBUG:
print(f'K:{K}\nangle:{angle}')
lines_show = np.concatenate([np.expand_dims(line0,0), np.expand_dims(line1,0)],0)
points_show = np.array([[intersec[0], intersec[1], 0]])
Bbox3D.draw_points_lines(points_show, lines_show)
if angle > min_angle:
return intersec
else:
return np.array([np.nan]*2)
except np.linalg.LinAlgError:
return np.array([np.nan]*2)
def points_in_lines(points, lines, threshold_dis=0.03):
'''
points:[n,3]
lines:[m,2,3]
dis: [n,m]
out: [n,m]
(1)vertial dis=0
(2) angle>90 OR corner dis=0
'''
num_p = points.shape[0]
num_l = lines.shape[0]
pc_distances0 = points.reshape([num_p,1,1,3]) - lines.reshape([1,num_l,2,3])
pc_distances = np.linalg.norm(pc_distances0, axis=-1).min(2)
pl_distances = vertical_dis_points_lines(points, lines)
tmp_l = np.tile( lines.reshape([1,num_l,2,3]), (num_p,1,1,1) )
tmp_p = np.tile( points.reshape([num_p,1,1,3]), (1,num_l,1,1) )
dirs0 = tmp_l - tmp_p
dirs1 = dirs0.reshape([num_l*num_p, 2,3])
angles0 = angle_of_2lines(dirs1[:,0,:], dirs1[:,1,:])
angles = angles0.reshape([num_p, num_l])
mask_pc = pc_distances < threshold_dis
mask_pl = pl_distances < threshold_dis
mask_a = angles > np.pi/2
in_line_mask = (mask_a + mask_pc) * mask_pl
return in_line_mask
def is_extend_lines(lines0, lines1, threshold_dis=0.03):
'''
[n,2,3]
[m,2,3]
[n,m]
'''
n0 = lines0.shape[0]
n1 = lines1.shape[0]
dis0 = vertical_dis_points_lines(lines0.reshape([-1,3]), lines1)
dis1 = dis0.reshape([n0,2,n1])
mask0 = dis1 < threshold_dis
mask1 = mask0.all(1)
return mask1
class OBJ_DEF():
@staticmethod
def limit_yaw(yaws, yx_zb):
'''
standard: [0, pi]
yx_zb: [-pi/2, pi/2]
'''
if yx_zb:
yaws = limit_period(yaws, 0.5, np.pi)
else:
yaws = limit_period(yaws, 0, np.pi)
return yaws
@staticmethod
def check_bboxes(bboxes, yx_zb):
'''
x_size > y_size
'''
ofs = 1e-6
if bboxes.shape[0]==0:
return
if yx_zb:
#assert np.all(bboxes[:,3] <= bboxes[:,4]) # prediction may not mathch
assert np.max(np.abs(bboxes[:,-1])) <= np.pi*0.5+ofs
else:
#assert np.all(bboxes[:,3] >= bboxes[:,4])
assert np.max(bboxes[:,-1]) <= np.pi + ofs
assert np.min(bboxes[:,-1]) >= 0 - ofs
| [
"numpy.array",
"numpy.linalg.norm",
"numpy.sin",
"numpy.max",
"numpy.matmul",
"numpy.empty",
"numpy.concatenate",
"numpy.min",
"numpy.tile",
"numpy.eye",
"numpy.abs",
"numpy.floor",
"numba.jit",
"numpy.isnan",
"numpy.cos",
"numpy.sign",
"numpy.transpose",
"numpy.sum",
"numpy.zero... | [((477, 501), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (486, 501), False, 'import numba\n'), ((135, 151), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (143, 151), True, 'import numpy as np\n'), ((181, 190), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (187, 190), True, 'import numpy as np\n'), ((205, 214), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (211, 214), True, 'import numpy as np\n'), ((254, 263), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (260, 263), True, 'import numpy as np\n'), ((332, 348), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (340, 348), True, 'import numpy as np\n'), ((362, 371), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (368, 371), True, 'import numpy as np\n'), ((427, 436), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (433, 436), True, 'import numpy as np\n'), ((451, 460), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (457, 460), True, 'import numpy as np\n'), ((555, 571), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (563, 571), True, 'import numpy as np\n'), ((586, 595), 'numpy.cos', 'np.cos', (['z'], {}), '(z)\n', (592, 595), True, 'import numpy as np\n'), ((610, 619), 'numpy.sin', 'np.sin', (['z'], {}), '(z)\n', (616, 619), True, 'import numpy as np\n'), ((659, 668), 'numpy.cos', 'np.cos', (['z'], {}), '(z)\n', (665, 668), True, 'import numpy as np\n'), ((957, 966), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (963, 966), True, 'import numpy as np\n'), ((1670, 1701), 'numpy.tile', 'np.tile', (['x', '[direc.shape[0], 1]'], {}), '(x, [direc.shape[0], 1])\n', (1677, 1701), True, 'import numpy as np\n'), ((2114, 2158), 'numpy.linalg.norm', 'np.linalg.norm', (['line0'], {'axis': '(1)', 'keepdims': '(True)'}), '(line0, axis=1, keepdims=True)\n', (2128, 2158), True, 'import numpy as np\n'), ((2169, 2213), 'numpy.linalg.norm', 'np.linalg.norm', (['line1'], {'axis': '(1)', 'keepdims': '(True)'}), '(line1, axis=1, keepdims=True)\n', (2183, 2213), True, 'import numpy as np\n'), ((3858, 3880), 'numpy.concatenate', 'np.concatenate', (['dis', '(0)'], {}), '(dis, 0)\n', (3872, 3880), True, 'import numpy as np\n'), ((4381, 4394), 'numpy.isnan', 'np.isnan', (['dis'], {}), '(dis)\n', (4389, 4394), True, 'import numpy as np\n'), ((4458, 4482), 'numpy.eye', 'np.eye', (['points.shape[-1]'], {}), '(points.shape[-1])\n', (4464, 4482), True, 'import numpy as np\n'), ((4543, 4563), 'numpy.matmul', 'np.matmul', (['points', 'R'], {}), '(points, R)\n', (4552, 4563), True, 'import numpy as np\n'), ((4638, 4647), 'numpy.eye', 'np.eye', (['(7)'], {}), '(7)\n', (4644, 4647), True, 'import numpy as np\n'), ((4766, 4783), 'numpy.matmul', 'np.matmul', (['box', 'R'], {}), '(box, R)\n', (4775, 4783), True, 'import numpy as np\n'), ((5500, 5527), 'numpy.concatenate', 'np.concatenate', (['ints_all', '(0)'], {}), '(ints_all, 0)\n', (5514, 5527), True, 'import numpy as np\n'), ((230, 239), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (236, 239), True, 'import numpy as np\n'), ((387, 396), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (393, 396), True, 'import numpy as np\n'), ((635, 644), 'numpy.sin', 'np.sin', (['z'], {}), '(z)\n', (641, 644), True, 'import numpy as np\n'), ((1041, 1058), 'numpy.matmul', 'np.matmul', (['R_i', 'R'], {}), '(R_i, R)\n', (1050, 1058), True, 'import numpy as np\n'), ((1127, 1152), 'numpy.array', 'np.array', (['[0.1, 0.1, 0.1]'], {}), '([0.1, 0.1, 0.1])\n', (1135, 1152), True, 'import numpy as np\n'), ((1592, 1612), 'numpy.array', 'np.array', (['[[1.0, 0]]'], {}), '([[1.0, 0]])\n', (1600, 1612), True, 'import numpy as np\n'), ((1642, 1665), 'numpy.array', 'np.array', (['[[1.0, 0, 0]]'], {}), '([[1.0, 0, 0]])\n', (1650, 1665), True, 'import numpy as np\n'), ((2348, 2377), 'numpy.sum', 'np.sum', (['(line0 * line1)'], {'axis': '(1)'}), '(line0 * line1, axis=1)\n', (2354, 2377), True, 'import numpy as np\n'), ((3339, 3350), 'numpy.abs', 'np.abs', (['dif'], {}), '(dif)\n', (3345, 3350), True, 'import numpy as np\n'), ((3397, 3409), 'numpy.sign', 'np.sign', (['dif'], {}), '(dif)\n', (3404, 3409), True, 'import numpy as np\n'), ((4323, 4337), 'numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (4329, 4337), True, 'import numpy as np\n'), ((4340, 4371), 'numpy.linalg.norm', 'np.linalg.norm', (['direc_p'], {'axis': '(1)'}), '(direc_p, axis=1)\n', (4354, 4371), True, 'import numpy as np\n'), ((5075, 5113), 'numpy.empty', 'np.empty', (['[shape0[0], shape1[0], 2, 2]'], {}), '([shape0[0], shape1[0], 2, 2])\n', (5083, 5113), True, 'import numpy as np\n'), ((5429, 5454), 'numpy.concatenate', 'np.concatenate', (['ints_0', '(1)'], {}), '(ints_0, 1)\n', (5443, 5454), True, 'import numpy as np\n'), ((6414, 6435), 'numpy.linalg.inv', 'np.linalg.inv', (['v01v23'], {}), '(v01v23)\n', (6427, 6435), True, 'import numpy as np\n'), ((6446, 6472), 'numpy.matmul', 'np.matmul', (['inv_vov1', 'p2sp0'], {}), '(inv_vov1, p2sp0)\n', (6455, 6472), True, 'import numpy as np\n'), ((6964, 6977), 'numpy.abs', 'np.abs', (['angle'], {}), '(angle)\n', (6970, 6977), True, 'import numpy as np\n'), ((1342, 1374), 'numpy.random.uniform', 'np.random.uniform', (['(-r_max)', 'r_max'], {}), '(-r_max, r_max)\n', (1359, 1374), True, 'import numpy as np\n'), ((1488, 1503), 'numpy.transpose', 'np.transpose', (['R'], {}), '(R)\n', (1500, 1503), True, 'import numpy as np\n'), ((2866, 2897), 'numpy.floor', 'np.floor', (['(val / period + offset)'], {}), '(val / period + offset)\n', (2874, 2897), True, 'import numpy as np\n'), ((6531, 6553), 'numpy.array', 'np.array', (['([np.nan] * 2)'], {}), '([np.nan] * 2)\n', (6539, 6553), True, 'import numpy as np\n'), ((6609, 6631), 'numpy.array', 'np.array', (['([np.nan] * 2)'], {}), '([np.nan] * 2)\n', (6617, 6631), True, 'import numpy as np\n'), ((6711, 6747), 'numpy.linalg.norm', 'np.linalg.norm', (['(intersec - intersec_)'], {}), '(intersec - intersec_)\n', (6725, 6747), True, 'import numpy as np\n'), ((7174, 7215), 'numpy.array', 'np.array', (['[[intersec[0], intersec[1], 0]]'], {}), '([[intersec[0], intersec[1], 0]])\n', (7182, 7215), True, 'import numpy as np\n'), ((7354, 7376), 'numpy.array', 'np.array', (['([np.nan] * 2)'], {}), '([np.nan] * 2)\n', (7362, 7376), True, 'import numpy as np\n'), ((7422, 7444), 'numpy.array', 'np.array', (['([np.nan] * 2)'], {}), '([np.nan] * 2)\n', (7430, 7444), True, 'import numpy as np\n'), ((7768, 7806), 'numpy.linalg.norm', 'np.linalg.norm', (['pc_distances0'], {'axis': '(-1)'}), '(pc_distances0, axis=-1)\n', (7782, 7806), True, 'import numpy as np\n'), ((9283, 9304), 'numpy.max', 'np.max', (['bboxes[:, -1]'], {}), '(bboxes[:, -1])\n', (9289, 9304), True, 'import numpy as np\n'), ((9332, 9353), 'numpy.min', 'np.min', (['bboxes[:, -1]'], {}), '(bboxes[:, -1])\n', (9338, 9353), True, 'import numpy as np\n'), ((7099, 7123), 'numpy.expand_dims', 'np.expand_dims', (['line0', '(0)'], {}), '(line0, 0)\n', (7113, 7123), True, 'import numpy as np\n'), ((7124, 7148), 'numpy.expand_dims', 'np.expand_dims', (['line1', '(0)'], {}), '(line1, 0)\n', (7138, 7148), True, 'import numpy as np\n'), ((9172, 9193), 'numpy.abs', 'np.abs', (['bboxes[:, -1]'], {}), '(bboxes[:, -1])\n', (9178, 9193), True, 'import numpy as np\n')] |
import numpy as np
import scipy.integrate
from scipy.interpolate import interp1d, interp2d, RectBivariateSpline,UnivariateSpline,griddata
from scipy.spatial import Delaunay, ConvexHull
from matplotlib.collections import LineCollection
from scipy.interpolate import splprep,splev
from scipy.optimize import fmin
from matplotlib.path import Path
from pleiades.analysis.math import get_gpsi
#import analysis.datahelpers as dh
def scale_patches(patches,scale):
"""scale patches by desired factor."""
t = mpl.transforms.Affine2D().scale(scale)
for p in patches:
p.set_transform(p.get_transform()+t)
return patches
def get_mirror_patches(patches,axis=0,scale=1):
"""Get mirror image patches across desired axis.
axis = 0 means reflect across x axis, axis = 1
means reflect across y axis. Optional scale
argument can be used as well."""
x_ref = np.array([[1,0,0],[0,-1,0],[0,0,1]])
y_ref = np.array([[-1,0,0],[0,1,0],[0,0,1]])
if axis == 0:
matrix = x_ref
else:
matrix = y_ref
t = mpl.transforms.Affine2D(matrix=matrix).scale(scale)
mirror_patches = []
for p in patches:
m_patch = copy.copy(p)
m_patch.set_transform(m_patch.get_transform()+t)
mirror_patches.append(m_patch)
return mirror_patches
def transpose_patches(patches):
"""Transpose patches (reflect across line y=x)."""
transpose = np.array([[0,1,0],[1,0,0],[0,0,1]])
t = mpl.transforms.Affine2D(matrix=transpose)
mirror_patches = []
for p in patches:
p.set_transform(p.get_transform()+t)
#return patches
class Boundary(object):
def __init__(self,vertices):
self._interpolate_verts(vertices)
def _interpolate_verts(self,vertices,u=None,s=0.0,npts=200):
tck,u = splprep(vertices.T,u=u,s=s)
u_new = np.linspace(u.min(),u.max(),npts)
self.tck = tck
self.u = u_new
r_new,z_new = splev(u_new,tck,der=0)
self.verts = np.vstack((r_new,z_new)).T
def interpolate(self,u):
return splev(u,self.tck,der=0)
class FieldLine(object):
def __init__(self,psi,verts):
self.psi = psi
self._verts = verts
self._interpolate_verts()
self.reorder_verts()
def is_closed(self):
return np.all(self._verts[0,:] == self._verts[-1,:])
def _interpolate_verts(self,u=None,s=0.0,k=2,npts=1000):
if self.is_closed():
per = 1
else:
per = 0
tck,u = splprep(self._verts.T,u=u,k=k,s=s,per=per)
u_new = np.linspace(u.min(),u.max(),npts)
self.tck = tck
self.u = u_new
r_new,z_new = splev(u_new,tck,der=0)
self.verts = np.vstack((r_new,z_new)).T
def interpolate(self,u):
return splev(u,self.tck,der=0)
def reorder_verts(self,steptol=0.1):
if not self.is_closed():
istart = np.argmin(self.verts[:,0])
tmpvert = np.roll(self.verts,-istart,axis=0)
if (tmpvert[1,1]-tmpvert[0,1])**2 + (tmpvert[1,0]-tmpvert[0,0])**2 > steptol**2:
tmpvert = np.roll(tmpvert,-1,axis=0)[::-1,:]
self.verts = tmpvert
def get_svec(self):
s = np.zeros(self.verts.shape[0])
r,z = self.verts[:,0], self.verts[:,1]
s[1:] = np.cumsum(np.sqrt((r[1:]-r[0:-1])**2+(z[1:]-z[0:-1])**2))
return s
def get_length(self):
return self.get_svec()[-1]
def get_ds(self):
r,z = self.verts[:,0], self.verts[:,1]
return np.sqrt((r[1]-r[0])**2+(z[1]-z[0])**2)
def interpolate_onto(self,R,Z,Q,method="cubic"):
return griddata((R.ravel(),Z.ravel()),Q.ravel(),xi=(self.verts[:,0],self.verts[:,1]),method=method)
def get_kappa_n(self,R,Z,BR,BZ,method="cubic"):
modB = np.sqrt(BR**2+BZ**2)
bhatr, bhatz = BR/modB, BZ/modB
bhatr_terp = self.interpolate_onto(R,Z,bhatr)
bhatz_terp = self.interpolate_onto(R,Z,bhatz)
signb = np.sign(self.verts[0,0]*bhatr_terp[0] + self.verts[0,1]*bhatz_terp[0])
kap_r, kap_z = signb*self.d_ds(bhatr_terp), signb*self.d_ds(bhatz_terp)
return kap_r, kap_z
def d_ds(self,Q):
ds = self.get_ds()
res = np.zeros_like(Q)
res[1:-1] = (Q[2:] - Q[:-2]) / (2*ds)
res[0] = (-3.0/2.0*Q[0] + 2*Q[1] - 1.0/2.0*Q[2]) / ds
res[-1] = (1.0/2.0*Q[-3] - 2*Q[-2] + 3.0/2.0*Q[-1]) / ds
return res
def get_gradpsi(self,R,Z,BR,BZ,method="cubic"):
gradpsi_r = self.interpolate_onto(R,Z,R*BZ)
gradpsi_z = -self.interpolate_onto(R,Z,R*BR)
return gradpsi_r,gradpsi_z
def intersection(self,boundary):
def _distfunc(self,boundary,s1,s2):
rfl,zfl = self.interpolate(s1)
rb,zb = boundary.interpolate(s2)
return (rfl-rb)**2 + (zfl-zb)**2
distfunc = lambda x0: _distfunc(self,boundary,x0[0],x0[1])
res = fmin(distfunc,[.5,.5],disp=0)
return res[0]
def apply_boundary(self,b1,b2):
self.ubound0 = self.intersection(b1)
self.ubound1 = self.intersection(b2)
def get_bounded_fl(self,npts=1000):
return self.interpolate(np.linspace(self.ubound0,self.ubound1,npts))
def contour_points(contourset):
condict = {}
for ilev, lev in enumerate(contourset.levels):
condict[lev] = [FieldLine(lev,seg) for seg in contourset.allsegs[ilev]]
return condict
def regular_grid(xx,yy,*args,**kwargs):
nx = kwargs.pop("nx",200)
ny = kwargs.pop("ny",200)
xi = kwargs.pop("xi",None)
yi = kwargs.pop("yi",None)
method = kwargs.pop("method","linear")
""" interpolate irregularly gridded data onto regular grid."""
if xi is not None and yi is not None:
pass
else:
x = np.linspace(xx.min(), xx.max(), nx)
y = np.linspace(yy.min(), yy.max(), ny)
xi, yi = np.meshgrid(x,y,indexing="ij")
#then, interpolate your data onto this grid:
points = np.vstack((xx.flatten(),yy.flatten())).T
zs = []
for z in args:
zi = griddata(points,z.flatten(),(xi,yi),method=method)
zs.append(zi)
return (xi,yi) + tuple(zs)
def get_deltapsi(data,Req,Zeq):
""" Returns contribution to psi from fast ion currents.
Args:
data (netcdf4 Dataset object)
Req (2D R grid from eqdsk)
Zeq (2D Z grid from eqdsk)
Returns:
deltapsi (psi from fast ion currents on eqdsk grid)
"""
var = data.variables
dims = data.dimensions
nrj = dims["nreqadim"].size
nzj = dims["nzeqadim"].size
req = np.linspace(np.min(Req),np.max(Req),nrj)
zeq = np.linspace(np.min(Zeq),np.max(Zeq),nzj)
dr,dz = req[1]-req[0], zeq[1]-zeq[0]
rr,zz = np.meshgrid(req,zeq)
gpsi_jphi = get_gpsi(rr,zz)
jphi = var["curr_diamcurv_phi"][:]
if len(jphi.shape) > 2:
jphi = np.sum(jphi,axis=0)
jphi*=1E4 # A/cm^2 to A/m^2
Iphi = jphi*dr*dz
deltapsi = (gpsi_jphi.dot(Iphi.flatten())).reshape(rr.shape)
_,_,deltapsi = regular_grid(rr,zz,deltapsi,xi=Req,yi=Zeq)
return deltapsi
def poly_fit(x,y,order=3):
n = order+1
m = len(y)
basis_fns = [(lambda z,i=i: z**i) for i in range(n)]
A = np.zeros((m,n))
for i in range(m):
for j in range(n):
A[i,j] = basis_fns[j](x[i])
(u,s,vt) = np.linalg.svd(A)
Sinv = np.zeros((n,m))
s[ s<1.0e-10 ] = 0.0
s[ s>=1.0e-10 ] = 1.0/s[ s>=1.0e-10]
Sinv[:n,:n] = np.diag(s)
c = np.dot(Sinv,u.T)
c = np.dot(vt.T,c)
c = np.dot(c,y)
return basis_fns,c
def reflect_and_hstack(Rho, Z, *args):
"""Reflect and concatenate grid and quantities in args to plot both half
planes (rho>=0 and rho<=0). Currently this function only reflects across
the z axis since that is the symmetry convention we've taken for the
machine.
Parameters
----------
Rho : np.array
2D array for the R coordinates of the grid
Z : np.array
2D array for the Z coordinates of the grid
args : tuple
2D arrays of any quantity on this grid you wish to plot in both half
planes
"""
Rho_total = np.hstack((-Rho[:,-1:0:-1],Rho))
Z_total = np.hstack((Z[:,-1:0:-1],Z))
arglist = []
for arg in args:
assert arg.shape == Rho.shape
arglist.append(np.hstack((arg[:,-1:0:-1],arg)))
return (Rho_total,Z_total)+tuple(arglist)
def get_concave_hull(Rho,Z,Q):
points = np.array([[rho0,z0] for rho0,z0,q in zip(Rho.flatten(),Z.flatten(),Q.flatten()) if ~np.isnan(q)])
tri = Delaunay(points)
# Make a list of line segments:
# edge_points = [ ((x1_1, y1_1), (x2_1, y2_1)),
# ((x1_2, y1_2), (x2_2, y2_2)),
# ... ]
edge_points = []
edges = set()
def add_edge(i, j):
"""Add a line between the i-th and j-th points, if not in the list already"""
if (i, j) in edges or (j, i) in edges:
# already added
return
edges.add( (i, j) )
edge_points.append(points[ [i, j] ])
# loop over triangles:
# ia, ib, ic = indices of corner points of the triangle
for ia, ib, ic in tri.vertices:
add_edge(ia, ib)
add_edge(ib, ic)
add_edge(ic, ia)
# plot it: the LineCollection is just a (maybe) faster way to plot lots of
# lines at once
lines = LineCollection(edge_points)
plt.figure()
plt.title('Delaunay triangulation')
plt.gca().add_collection(lines)
#plt.plot(points[:,0], points[:,1], 'o', hold=1)
plt.xlim(-20, 20)
plt.ylim(-10, 10)
plt.show()
def transform_to_rtheta(Rho,Z,rho_component,z_component):
"""Transform Rho Z grid and rho,z components of vector field to polar coordinates"""
R = np.sqrt(Rho**2+Z**2)
Theta = np.pi/2+np.arctan2(Z,Rho)
sin_t = np.sin(Theta)
cos_t = np.cos(Theta)
r_component = rho_component*sin_t + z_component*cos_t
theta_component = rho_component*cos_t - z_component*sin_t
return R,Theta,r_component,theta_component
def transform_to_rhoz(R,Theta,r_component,theta_component):
"""Transform R Theta grid and r theta components of vector field to cylindrical coordinates"""
Rho = R*np.sin(Theta)
Z = R*np.cos(Theta)
rho_component = (r_component*Rho + theta_component*Z)/R
z_component = (r_component*Z - theta_component*Rho)/R
return Rho,Z,rho_component,z_component
def locs_to_vals(X,Y,Q,coord_list):
"""Picks values of field Q at desired coordinates.
Args:
X (2D array): grid representing column coordinates of Q
Y (2D array): grid representing row coordinates of Q
Q (2D array): value of Q on grid
coord_list (list):list of tuples (x,y) for desired coordinates
"""
q_vals = []
for x,y in coord_list:
x0_idx,y0_idx = np.argmin(np.abs(X[0,:]-x)),np.argmin(np.abs(Y[:,0]-y))
q_vals.append(Q[y0_idx,x0_idx])
return q_vals
def locs_to_vals_griddata(X,Y,Q,coord_list):
"""Picks values of field Q at desired coordinates.
Args:
X (2D array): grid representing column coordinates of Q
Y (2D array): grid representing row coordinates of Q
Q (2D array): value of Q on grid
coord_list (list):list of tuples (x,y) for desired coordinates
"""
xi,yi = zip(*coord_list)
return griddata((X.flatten(),Y.flatten()),Q.flatten(),(xi,yi))
def locs_to_vals1D(X,Y,Q,coord_list):
"""Picks values of field Q at desired coordinates.
Args:
X (2D array): grid representing column coordinates of Q
Y (2D array): grid representing row coordinates of Q
Q (2D array): value of Q on grid
coord_list (list):list of tuples (x,y) for desired coordinates
"""
q_vals = []
for x,y in coord_list:
idx = ((X-x)**2 + (Y-y)**2).argmin()
q_vals.append(Q[idx])
return q_vals
def get_fieldlines(contourset,level,start_coord=None,end_coord=None,clockwise=True,idx_check=[]):
"""Return coordinates for segments comprising a flux surface (Nx2 array).
Args:
contourset (matplotlib.contour.QuadContourSet instance): i.e.
ax.contour call
level (float): desired contour level
start_coord (tuple): coordinate (x,y) at which to start the field line
end_coord (tuple): coordinate (x,y) at which to end the field line
clockwise (bool): whether to order the field line coordinates clockwise or
counterclockwise
"""
## Find desired flux surface and get its vertices
assert level in list(contourset.levels), "level: {0} not found in contourset".format(level)
idx = list(contourset.levels).index(level)
segs = contourset.allsegs[idx]
len_list = [s.shape[0] for s in segs]
max_idx = len_list.index(max(len_list))
flpoints = parse_segment(segs[max_idx],start_coord=start_coord,end_coord=end_coord,clockwise=clockwise)
# if idx in idx_check:
# fig_pts,ax_pts = plt.subplots()
# fig_B,ax_B = plt.subplots()
# for i,pts in enumerate(segs):
# tmp_pts = parse_segment(pts,start_coord=start_coord,end_coord=end_coord,clockwise=clockwise)
# ax_pts.plot(tmp_pts[:,0],tmp_pts[:,1],"o")
# ax_pts.plot(tmp_pts[0,0],tmp_pts[0,1],"go")
# ax_pts.plot(tmp_pts[-1,0],tmp_pts[-1,1],"ro")
# ax_pts.set_xlim(0,2.50)
# ax_pts.set_ylim(-1.25,1.25)
# ax_pts.set_aspect(1)
# B_interp = interp(Rho,Z,B,tmp_pts)
# s = get_fieldline_distance(tmp_pts)
# ax_B.plot(s,B_interp,"o")
# plt.show()
return flpoints
def parse_segment(flpoints,start_coord=None,end_coord=None,clockwise=True):
if start_coord != None:
i_start = np.argmin(np.array([x0**2+y0**2 for x0,y0 in zip(flpoints[:,0]-start_coord[0],flpoints[:,1]-start_coord[1])]))
flpoints = np.roll(flpoints,-i_start,axis=0)
## Find out if curve is cw or ccw
x = flpoints[:,0]
y = flpoints[:,1]
iscw = np.sum((x[1:]-x[0:-1])*(y[1:]+y[0:-1]))+(x[0]-x[-1])*(y[0]+y[-1]) > 0
if clockwise != iscw:
flpoints = np.roll(flpoints[::-1,:],1,axis=0)
i_end = len(x)-1
if end_coord != None:
i_end = np.argmin(np.array([x0**2+y0**2 for x0,y0 in zip(flpoints[:,0]-end_coord[0],flpoints[:,1]-end_coord[1])]))
if i_end < len(x)-1:
i_end += 1
flpoints = flpoints[0:i_end,:]
return flpoints
def get_fieldline_distance(flpoints):
"""Return cumulative field line distance vector
"""
s = np.zeros(flpoints.shape[0])
x = flpoints[:,0]
y = flpoints[:,1]
s[1:] = np.cumsum(np.sqrt((x[1:]-x[0:-1])**2+(y[1:]-y[0:-1])**2))
return s
def interp(Rho,Z,Q,flpoints):
"""interpolate quantity Q on Rho, Z grid onto flpoints (Nx2 array of x,y pairs).
"""
x0 = Rho[0,:].squeeze()
y0 = Z[:,0].squeeze()
f = RectBivariateSpline(y0,x0,Q)
return np.array([float(f(yi,xi)[0]) for xi,yi in zip(flpoints[:,0],flpoints[:,1])])
def flux_surface_avg(Rho,Z,B,flpoints,Q=None):
"""Compute flux surface average of quantity Q or return dVdpsi (dl_B)
"""
## Interpolate B and quantity Q onto flux line
B_interp = interp(Rho,Z,B,flpoints)
s = get_fieldline_distance(flpoints)
dl_B = scipy.integrate.trapz(y=1.0/B_interp,x=s)
if Q != None:
Q_interp = interp(Rho,Z,Q,flpoints)
fsa = 1/dl_B*scipy.integrate.trapz(y=Q_interp/B_interp,x=s)
return fsa
else:
return dl_B
def diff_central(x, y):
x0 = x[:-2]
x1 = x[1:-1]
x2 = x[2:]
y0 = y[:-2]
y1 = y[1:-1]
y2 = y[2:]
f = (x2 - x1)/(x2 - x0)
return (1-f)*(y2 - y1)/(x2 - x1) + f*(y1 - y0)/(x1 - x0)
# need to remove datahelpers dependency from this before using
#def get_F(Rho,Z,psi,B,min_rho,max_rho,start_coord=None,end_coord=None,clockwise=True,plotit=False,dfl_tol=.1):
# gamma = 5.0/3.0
# figf,axf = plt.subplots()
# psi_min,psi_edge = locs_to_vals(Rho,Z,psi,[(min_rho,0),(max_rho,0)])
# psi_levels = np.linspace(psi_min,psi_edge,500)
# cff = axf.contour(Rho,Z,psi,tuple(psi_levels))
# dl_B_list = []
# for psi0 in psi_levels:
# if psi0 == 0.0:
# flpoints = get_fieldlines(cff,psi0,start_coord=start_coord,end_coord=end_coord,clockwise=False)
# else:
# flpoints = get_fieldlines(cff,psi0,start_coord=start_coord,end_coord=end_coord,clockwise=clockwise)
# s = get_fieldline_distance(flpoints)
# if np.max(s[1:]-s[0:-1]) > dfl_tol:
# raise ValueError("fieldline distance between successive points greater than dfl_tol: {0}m".format(dfl_tol))
# if plotit:
# x,y = flpoints[:,0],flpoints[:,1]
# axf.plot(x,y,'bo')
# axf.plot(x[0],y[0],'go')
# axf.plot(x[-1],y[-1],'ro')
# dl_B_list.append(flux_surface_avg(Rho,Z,B,flpoints))
# psi_new = psi_levels
# dl_B_new = np.array(dl_B_list)
# dl_B_new = dh.smooth(dl_B_new,5,mode="valid")
# psi_new = dh.smooth(psi_new,5,mode="valid")
# U = UnivariateSpline(psi_new,dl_B_new,k=4,s=0)
# dUdpsi = UnivariateSpline(psi_new[1:-1],diff_central(psi_new,dl_B_new),k=1,s=0,ext="const")
# d2Udpsi2 = UnivariateSpline(psi_new[2:-2],diff_central(psi_new[1:-1],diff_central(psi_new,dl_B_new)),k=1,s=0,ext="const")
# #dUdpsi = UnivariateSpline(psi_new[1:-1],diff_central(psi_new,dl_B_new),k=3,s=0,ext="const")
# #d2Udpsi2 = UnivariateSpline(psi_new[2:-2],diff_central(psi_new[1:-1],diff_central(psi_new,dl_B_new)),k=3,s=0,ext="const")
# term1 = lambda x: gamma*x/U(x)*(dUdpsi(x))**2
# term2 = lambda x: dUdpsi(x) + x*d2Udpsi2(x)
# F = lambda x: term1(x) - term2(x)
# if plotit:
# z0_idx = np.abs(Z[:,0]).argmin()
# end_idx = np.abs(Rho[z0_idx,:]-max_rho).argmin()
# R_of_psi = UnivariateSpline(psi[z0_idx,0:end_idx],Rho[z0_idx,0:end_idx],s=0)
# psi_test = np.linspace(psi_min,psi_edge,1000)
# psi_norm = psi_test/psi_edge
# fig9,(ax9a,ax9b,ax9c) = plt.subplots(3,1,sharex="col")
# ax9a.plot(psi_new,dl_B_new,"o")
# ax9a.plot(psi_test,U(psi_test),"r")
# ax9b.plot(psi_new[1:-1],dUdpsi(psi_new[1:-1]),"o")
# ax9b.plot(psi_test,dUdpsi(psi_test),"r")
# ax9c.plot(psi_new[2:-2],d2Udpsi2(psi_new[2:-2]),"o")
# ax9c.plot(psi_test,d2Udpsi2(psi_test),"r")
# fig0,((ax0,ax1),(ax2,ax3)) = plt.subplots(2,2,sharex="all",figsize=(18,9))
# ax0.plot(psi_new/psi_edge,dl_B_new,"o")
# ax0.plot(psi_norm,U(psi_test),lw=2)
# ax0.set_ylabel("U")
# ax0top = ax0.twiny()
# new_labels = ["{0:1.2f}".format(r) for r in R_of_psi(psi_edge*np.array(ax0.get_xticks()))]
# ax0top.set_xticklabels(new_labels)
# ax0top.set_xlabel("R (m)")
# ax1.plot(psi_norm,dUdpsi(psi_test),'o')
# ax1.set_ylabel("U'")
# ax1top = ax1.twiny()
# ax1top.set_xticklabels(new_labels)
# ax1top.set_xlabel("R (m)")
# ax2.plot(psi_norm,term1(psi_test),'o')
# ax2.plot(psi_norm,term2(psi_test),'o')
# ax2.set_xlabel("$\\psi/\\psi_{lim}$")
# ax2.set_ylabel("Term1 and term2")
# F_clean = dh.smooth(F(psi_test),20)
# ax3.plot(psi_norm,F_clean,lw=2)
# ax3.set_xlabel("$\\psi/\\psi_{lim}$")
# ax3.set_ylabel("F($\\psi$)")
# #ax3.set_ylim(-1.2,1.2)
# plt.tight_layout()
# return F
# Added by Roger some simple check for field lines looping back on them selves
# def get_F_v2(Rho,Z,psi,B,min_rho,max_rho,start_coord=None,end_coord=None,clockwise=True,plotit=False,plotdots=True,thresh=.2,num_psi=500):
# gamma = 5.0/3.0
# figf,axf = plt.subplots()
# psi_min,psi_edge = locs_to_vals(Rho,Z,psi,[(min_rho,0),(max_rho,0)])
# psi_levels = np.linspace(psi_min,psi_edge,num_psi)
# cff = axf.contour(Rho,Z,psi,tuple(psi_levels))
# dl_B_list = []
# for psi0 in psi_levels:
# if psi0 == 0.0:
# flpoints = get_fieldlines(cff,psi0,start_coord=start_coord,end_coord=end_coord,clockwise=False)
# else:
# flpoints = get_fieldlines(cff,psi0,start_coord=start_coord,end_coord=end_coord,clockwise=clockwise)
# if np.abs(flpoints[0][0]-start_coord[0]) > thresh:
# raise ValueError("I think some of these contours start after the separatrix.")
# if plotdots:
# x,y = flpoints[:,0],flpoints[:,1]
# axf.plot(x,y,'bo')
# axf.plot(x[0],y[0],'go')
# axf.plot(x[-1],y[-1],'ro')
# else:
# plt.close()
# dl_B_list.append(flux_surface_avg(Rho,Z,B,flpoints))
# psi_new = psi_levels
# dl_B_new = np.array(dl_B_list)
# dl_B_new = dh.smooth(dl_B_new,5,mode="valid")
# psi_new = dh.smooth(psi_new,5,mode="valid")
# U = UnivariateSpline(psi_new,dl_B_new,k=4,s=0)
# dUdpsi = UnivariateSpline(psi_new[1:-1],diff_central(psi_new,dl_B_new),k=1,s=0,ext="const")
# d2Udpsi2 = UnivariateSpline(psi_new[2:-2],diff_central(psi_new[1:-1],diff_central(psi_new,dl_B_new)),k=1,s=0,ext="const")
# #dUdpsi = UnivariateSpline(psi_new[1:-1],diff_central(psi_new,dl_B_new),k=3,s=0,ext="const")
# #d2Udpsi2 = UnivariateSpline(psi_new[2:-2],diff_central(psi_new[1:-1],diff_central(psi_new,dl_B_new)),k=3,s=0,ext="const")
# term1 = lambda x: gamma*x/U(x)*(dUdpsi(x))**2
# term2 = lambda x: dUdpsi(x) + x*d2Udpsi2(x)
# F = lambda x: term1(x) - term2(x)
# if plotit:
# z0_idx = np.abs(Z[:,0]).argmin()
# end_idx = np.abs(Rho[z0_idx,:]-max_rho).argmin()
# R_of_psi = UnivariateSpline(psi[z0_idx,0:end_idx],Rho[z0_idx,0:end_idx],s=0)
# psi_test = np.linspace(psi_min,psi_edge,1000)
# psi_norm = psi_test/psi_edge
# fig9,(ax9a,ax9b,ax9c) = plt.subplots(3,1,sharex="col")
# ax9a.plot(psi_new,dl_B_new,"o")
# ax9a.plot(psi_test,U(psi_test),"r")
# ax9b.plot(psi_new[1:-1],dUdpsi(psi_new[1:-1]),"o")
# ax9b.plot(psi_test,dUdpsi(psi_test),"r")
# ax9c.plot(psi_new[2:-2],d2Udpsi2(psi_new[2:-2]),"o")
# ax9c.plot(psi_test,d2Udpsi2(psi_test),"r")
# fig0,((ax0,ax1),(ax2,ax3)) = plt.subplots(2,2,sharex="all",figsize=(18,9))
# ax0.plot(psi_new/psi_edge,dl_B_new,"o")
# ax0.plot(psi_norm,U(psi_test),lw=2)
# ax0.set_ylabel("U")
# ax0top = ax0.twiny()
# new_labels = ["{0:1.2f}".format(r) for r in R_of_psi(psi_edge*np.array(ax0.get_xticks()))]
# ax0top.set_xticklabels(new_labels)
# ax0top.set_xlabel("R (m)")
# ax1.plot(psi_norm,dUdpsi(psi_test),'o')
# ax1.set_ylabel("U'")
# ax1top = ax1.twiny()
# ax1top.set_xticklabels(new_labels)
# ax1top.set_xlabel("R (m)")
# ax2.plot(psi_norm,term1(psi_test),'o')
# ax2.plot(psi_norm,term2(psi_test),'o')
# ax2.set_xlabel("$\\psi/\\psi_{lim}$")
# ax2.set_ylabel("Term1 and term2")
# F_clean = dh.smooth(F(psi_test),20)
# ax3.plot(psi_norm,F_clean,lw=2)
# ax3.set_xlabel("$\\psi/\\psi_{lim}$")
# ax3.set_ylabel("F($\\psi$)")
# #ax3.set_ylim(-1.2,1.2)
# plt.tight_layout()
# return F
| [
"numpy.sqrt",
"numpy.hstack",
"matplotlib.collections.LineCollection",
"numpy.array",
"numpy.arctan2",
"numpy.sin",
"scipy.optimize.fmin",
"scipy.interpolate.RectBivariateSpline",
"numpy.max",
"numpy.dot",
"numpy.linspace",
"scipy.interpolate.splev",
"numpy.vstack",
"numpy.min",
"numpy.a... | [((885, 929), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, -1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, -1, 0], [0, 0, 1]])\n', (893, 929), True, 'import numpy as np\n'), ((934, 978), 'numpy.array', 'np.array', (['[[-1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (942, 978), True, 'import numpy as np\n'), ((1408, 1451), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 0, 1]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 0, 1]])\n', (1416, 1451), True, 'import numpy as np\n'), ((6714, 6735), 'numpy.meshgrid', 'np.meshgrid', (['req', 'zeq'], {}), '(req, zeq)\n', (6725, 6735), True, 'import numpy as np\n'), ((6751, 6767), 'pleiades.analysis.math.get_gpsi', 'get_gpsi', (['rr', 'zz'], {}), '(rr, zz)\n', (6759, 6767), False, 'from pleiades.analysis.math import get_gpsi\n'), ((7194, 7210), 'numpy.zeros', 'np.zeros', (['(m, n)'], {}), '((m, n))\n', (7202, 7210), True, 'import numpy as np\n'), ((7315, 7331), 'numpy.linalg.svd', 'np.linalg.svd', (['A'], {}), '(A)\n', (7328, 7331), True, 'import numpy as np\n'), ((7343, 7359), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (7351, 7359), True, 'import numpy as np\n'), ((7443, 7453), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (7450, 7453), True, 'import numpy as np\n'), ((7462, 7479), 'numpy.dot', 'np.dot', (['Sinv', 'u.T'], {}), '(Sinv, u.T)\n', (7468, 7479), True, 'import numpy as np\n'), ((7487, 7502), 'numpy.dot', 'np.dot', (['vt.T', 'c'], {}), '(vt.T, c)\n', (7493, 7502), True, 'import numpy as np\n'), ((7510, 7522), 'numpy.dot', 'np.dot', (['c', 'y'], {}), '(c, y)\n', (7516, 7522), True, 'import numpy as np\n'), ((8127, 8161), 'numpy.hstack', 'np.hstack', (['(-Rho[:, -1:0:-1], Rho)'], {}), '((-Rho[:, -1:0:-1], Rho))\n', (8136, 8161), True, 'import numpy as np\n'), ((8174, 8203), 'numpy.hstack', 'np.hstack', (['(Z[:, -1:0:-1], Z)'], {}), '((Z[:, -1:0:-1], Z))\n', (8183, 8203), True, 'import numpy as np\n'), ((8533, 8549), 'scipy.spatial.Delaunay', 'Delaunay', (['points'], {}), '(points)\n', (8541, 8549), False, 'from scipy.spatial import Delaunay, ConvexHull\n'), ((9346, 9373), 'matplotlib.collections.LineCollection', 'LineCollection', (['edge_points'], {}), '(edge_points)\n', (9360, 9373), False, 'from matplotlib.collections import LineCollection\n'), ((9735, 9761), 'numpy.sqrt', 'np.sqrt', (['(Rho ** 2 + Z ** 2)'], {}), '(Rho ** 2 + Z ** 2)\n', (9742, 9761), True, 'import numpy as np\n'), ((9806, 9819), 'numpy.sin', 'np.sin', (['Theta'], {}), '(Theta)\n', (9812, 9819), True, 'import numpy as np\n'), ((9832, 9845), 'numpy.cos', 'np.cos', (['Theta'], {}), '(Theta)\n', (9838, 9845), True, 'import numpy as np\n'), ((14500, 14527), 'numpy.zeros', 'np.zeros', (['flpoints.shape[0]'], {}), '(flpoints.shape[0])\n', (14508, 14527), True, 'import numpy as np\n'), ((14841, 14871), 'scipy.interpolate.RectBivariateSpline', 'RectBivariateSpline', (['y0', 'x0', 'Q'], {}), '(y0, x0, Q)\n', (14860, 14871), False, 'from scipy.interpolate import interp1d, interp2d, RectBivariateSpline, UnivariateSpline, griddata\n'), ((1788, 1817), 'scipy.interpolate.splprep', 'splprep', (['vertices.T'], {'u': 'u', 's': 's'}), '(vertices.T, u=u, s=s)\n', (1795, 1817), False, 'from scipy.interpolate import splprep, splev\n'), ((1934, 1958), 'scipy.interpolate.splev', 'splev', (['u_new', 'tck'], {'der': '(0)'}), '(u_new, tck, der=0)\n', (1939, 1958), False, 'from scipy.interpolate import splprep, splev\n'), ((2050, 2075), 'scipy.interpolate.splev', 'splev', (['u', 'self.tck'], {'der': '(0)'}), '(u, self.tck, der=0)\n', (2055, 2075), False, 'from scipy.interpolate import splprep, splev\n'), ((2289, 2336), 'numpy.all', 'np.all', (['(self._verts[0, :] == self._verts[-1, :])'], {}), '(self._verts[0, :] == self._verts[-1, :])\n', (2295, 2336), True, 'import numpy as np\n'), ((2496, 2542), 'scipy.interpolate.splprep', 'splprep', (['self._verts.T'], {'u': 'u', 'k': 'k', 's': 's', 'per': 'per'}), '(self._verts.T, u=u, k=k, s=s, per=per)\n', (2503, 2542), False, 'from scipy.interpolate import splprep, splev\n'), ((2657, 2681), 'scipy.interpolate.splev', 'splev', (['u_new', 'tck'], {'der': '(0)'}), '(u_new, tck, der=0)\n', (2662, 2681), False, 'from scipy.interpolate import splprep, splev\n'), ((2773, 2798), 'scipy.interpolate.splev', 'splev', (['u', 'self.tck'], {'der': '(0)'}), '(u, self.tck, der=0)\n', (2778, 2798), False, 'from scipy.interpolate import splprep, splev\n'), ((3201, 3230), 'numpy.zeros', 'np.zeros', (['self.verts.shape[0]'], {}), '(self.verts.shape[0])\n', (3209, 3230), True, 'import numpy as np\n'), ((3516, 3564), 'numpy.sqrt', 'np.sqrt', (['((r[1] - r[0]) ** 2 + (z[1] - z[0]) ** 2)'], {}), '((r[1] - r[0]) ** 2 + (z[1] - z[0]) ** 2)\n', (3523, 3564), True, 'import numpy as np\n'), ((3785, 3811), 'numpy.sqrt', 'np.sqrt', (['(BR ** 2 + BZ ** 2)'], {}), '(BR ** 2 + BZ ** 2)\n', (3792, 3811), True, 'import numpy as np\n'), ((3970, 4046), 'numpy.sign', 'np.sign', (['(self.verts[0, 0] * bhatr_terp[0] + self.verts[0, 1] * bhatz_terp[0])'], {}), '(self.verts[0, 0] * bhatr_terp[0] + self.verts[0, 1] * bhatz_terp[0])\n', (3977, 4046), True, 'import numpy as np\n'), ((4213, 4229), 'numpy.zeros_like', 'np.zeros_like', (['Q'], {}), '(Q)\n', (4226, 4229), True, 'import numpy as np\n'), ((4913, 4947), 'scipy.optimize.fmin', 'fmin', (['distfunc', '[0.5, 0.5]'], {'disp': '(0)'}), '(distfunc, [0.5, 0.5], disp=0)\n', (4917, 4947), False, 'from scipy.optimize import fmin\n'), ((5861, 5893), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'indexing': '"""ij"""'}), "(x, y, indexing='ij')\n", (5872, 5893), True, 'import numpy as np\n'), ((6581, 6592), 'numpy.min', 'np.min', (['Req'], {}), '(Req)\n', (6587, 6592), True, 'import numpy as np\n'), ((6593, 6604), 'numpy.max', 'np.max', (['Req'], {}), '(Req)\n', (6599, 6604), True, 'import numpy as np\n'), ((6632, 6643), 'numpy.min', 'np.min', (['Zeq'], {}), '(Zeq)\n', (6638, 6643), True, 'import numpy as np\n'), ((6644, 6655), 'numpy.max', 'np.max', (['Zeq'], {}), '(Zeq)\n', (6650, 6655), True, 'import numpy as np\n'), ((6849, 6869), 'numpy.sum', 'np.sum', (['jphi'], {'axis': '(0)'}), '(jphi, axis=0)\n', (6855, 6869), True, 'import numpy as np\n'), ((9776, 9794), 'numpy.arctan2', 'np.arctan2', (['Z', 'Rho'], {}), '(Z, Rho)\n', (9786, 9794), True, 'import numpy as np\n'), ((10186, 10199), 'numpy.sin', 'np.sin', (['Theta'], {}), '(Theta)\n', (10192, 10199), True, 'import numpy as np\n'), ((10210, 10223), 'numpy.cos', 'np.cos', (['Theta'], {}), '(Theta)\n', (10216, 10223), True, 'import numpy as np\n'), ((13839, 13874), 'numpy.roll', 'np.roll', (['flpoints', '(-i_start)'], {'axis': '(0)'}), '(flpoints, -i_start, axis=0)\n', (13846, 13874), True, 'import numpy as np\n'), ((14081, 14118), 'numpy.roll', 'np.roll', (['flpoints[::-1, :]', '(1)'], {'axis': '(0)'}), '(flpoints[::-1, :], 1, axis=0)\n', (14088, 14118), True, 'import numpy as np\n'), ((14594, 14650), 'numpy.sqrt', 'np.sqrt', (['((x[1:] - x[0:-1]) ** 2 + (y[1:] - y[0:-1]) ** 2)'], {}), '((x[1:] - x[0:-1]) ** 2 + (y[1:] - y[0:-1]) ** 2)\n', (14601, 14650), True, 'import numpy as np\n'), ((1978, 2003), 'numpy.vstack', 'np.vstack', (['(r_new, z_new)'], {}), '((r_new, z_new))\n', (1987, 2003), True, 'import numpy as np\n'), ((2701, 2726), 'numpy.vstack', 'np.vstack', (['(r_new, z_new)'], {}), '((r_new, z_new))\n', (2710, 2726), True, 'import numpy as np\n'), ((2893, 2920), 'numpy.argmin', 'np.argmin', (['self.verts[:, 0]'], {}), '(self.verts[:, 0])\n', (2902, 2920), True, 'import numpy as np\n'), ((2942, 2978), 'numpy.roll', 'np.roll', (['self.verts', '(-istart)'], {'axis': '(0)'}), '(self.verts, -istart, axis=0)\n', (2949, 2978), True, 'import numpy as np\n'), ((3304, 3360), 'numpy.sqrt', 'np.sqrt', (['((r[1:] - r[0:-1]) ** 2 + (z[1:] - z[0:-1]) ** 2)'], {}), '((r[1:] - r[0:-1]) ** 2 + (z[1:] - z[0:-1]) ** 2)\n', (3311, 3360), True, 'import numpy as np\n'), ((5165, 5210), 'numpy.linspace', 'np.linspace', (['self.ubound0', 'self.ubound1', 'npts'], {}), '(self.ubound0, self.ubound1, npts)\n', (5176, 5210), True, 'import numpy as np\n'), ((8301, 8334), 'numpy.hstack', 'np.hstack', (['(arg[:, -1:0:-1], arg)'], {}), '((arg[:, -1:0:-1], arg))\n', (8310, 8334), True, 'import numpy as np\n'), ((13966, 14011), 'numpy.sum', 'np.sum', (['((x[1:] - x[0:-1]) * (y[1:] + y[0:-1]))'], {}), '((x[1:] - x[0:-1]) * (y[1:] + y[0:-1]))\n', (13972, 14011), True, 'import numpy as np\n'), ((10811, 10830), 'numpy.abs', 'np.abs', (['(X[0, :] - x)'], {}), '(X[0, :] - x)\n', (10817, 10830), True, 'import numpy as np\n'), ((10839, 10858), 'numpy.abs', 'np.abs', (['(Y[:, 0] - y)'], {}), '(Y[:, 0] - y)\n', (10845, 10858), True, 'import numpy as np\n'), ((3096, 3124), 'numpy.roll', 'np.roll', (['tmpvert', '(-1)'], {'axis': '(0)'}), '(tmpvert, -1, axis=0)\n', (3103, 3124), True, 'import numpy as np\n'), ((8509, 8520), 'numpy.isnan', 'np.isnan', (['q'], {}), '(q)\n', (8517, 8520), True, 'import numpy as np\n')] |
import numpy as np
from .utils import gaussian_pdf, mutation_kernel, resize_to_exp_limits_det
from .utils import prob_low_det_high_measurement
from .model_parameters import low_en_exp_cutoff, high_en_exp_cutoff, low_en_threshold
class det_pop:
'''
Deterministic population function class. This class implements the
deterministic evolution of a population of cells, as defined in our model.
A population is represented as a continuous distribution in the binding
energy space. The class features methods to perform cell duplication and
mutation, selection and differentiation.
The class displays the following methods:
- __init__: default class constructor
- create_empty: initializes an empty population
- create_with_explicit_attributes: creates an object having specified
population size and distribution
- create_copy_without_kernel: creates a copy of an object, copying every
attribute but the mutation kernel.
- merge_with: modifies the population by merging it with the one passed as
argument
- select_with_psurv: given a survival probability it models the effect of
selection on the population according to this survival probability.
- differentiate: implements cell differentiation and returns the
differentiated MC/PC populations
- carrying_cap: implements a finite carrying capacity
- expand: implements the combination of duplications and mutations that
occur during a single evolution round.
- bareps: returns the current value of bar-epsilon for the population
- N: returns the current population size
- energies: returns the energy domain of the distribution (by reference!)
- mean_en: returns the mean energy of the population
'''
def __init__(self, par, mc_seed=None):
'''
Initializes the population using the parameter set specified.
Args:
- par: the model parameters dictionary
- mc_seed (stoch_pop object, optional): MC seed population. If
specified then the population is seeded by a weighted mixture of
reactivated memory and naive cells, according to the weight
specified in the parameters.
'''
xlim_m, xlim_p, dx = par['xlim_minus'], par['xlim_plus'], par['dx']
Ni, mu_i, sigma_i = par['N_i'], par['mu_i'], par['sigma_i']
# distribution domain and discretization step
self.x = np.arange(xlim_m, xlim_p, dx)
self.dx = dx
# number of cells in the population
self.N = Ni
# naive cells normalized distribution
self.varphi = gaussian_pdf(x=self.x, mu=mu_i, sigma=sigma_i)
# if mc_seed specified then initialize distribution with a mixture of
# naive and memory cells
if mc_seed is not None:
# the weight is specified in the parameters dictionary
if par['f_mem_reinit'] == 'pop':
# it either depends on the amount of MCs collected so far
w = mc_seed.N / (self.N + mc_seed.N)
else:
# or it is a constant fraction
w = par['f_mem_reinit']
self.varphi = self.varphi * (1. - w) + mc_seed.varphi * w
# build mutation kernel
_, self.ker = mutation_kernel(par)
@classmethod
def create_empty(cls, par):
'''
Initialize an empty population. Both the distribution and the
population size are set to zero.
Args:
- par: model parameters dictionary.
'''
pop = cls.__new__(cls)
pop.N = 0 # zero population size
# create distribution domain according to model parameters.
pop.x = np.arange(par['xlim_minus'], par['xlim_plus'], par['dx'])
pop.dx = par['dx']
pop.varphi = np.zeros_like(pop.x) # null distribution
return pop
@classmethod
def create_with_explicit_attributes(cls, N, x, dx, varphi):
'''
Creates a new object having the attributes passed as argugment. Lists
are copied in the process.
Args:
- N (float): population size
- x (float array): distribution energy domain
- dx (float): discretization interval of the energy domain
- varphi (float array): values of the normalized distribution
'''
pop = cls.__new__(cls)
# initialize parameters with the arguments specified
pop.N = N
pop.x = np.copy(x) # creates a copy
pop.dx = dx
pop.varphi = np.copy(varphi) # creates a copy
return pop
def create_copy_without_kernel(self):
'''
Creates a copy of the caller. It copies everything attribute except the
mutation kernel, which is usually not needed in the copy.
'''
pop = det_pop.create_with_explicit_attributes(
self.N, self.x, self.dx, self.varphi)
return pop
def merge_with(self, pop_add):
'''
Function that merges the current population with the population passed
as argument.
Args:
- pop_add (det_pop object): population to be merged with the caller.
'''
if self.N > 0:
# weight of the normalized distribution sum
w = self.N / (self.N + pop_add.N)
# merge distributions and renormalize
self.varphi = self.varphi * w + pop_add.varphi * (1. - w)
# add up sizes
self.N += pop_add.N
else:
# if the caller population is empty then the result is simply the
# added population
self.N = pop_add.N
self.varphi = pop_add.varphi
def __renormalize_varphi(self):
'''
Renormalize the distribution after an operation that changes its size,
and report the modification to the population size. This method should
remain private.
'''
# evaluate the current normalization of the distribution
N_factor = np.sum(self.varphi) * self.dx
# update population size with the resulting factor
self.N *= N_factor
# renormalize the distribution
self.varphi /= N_factor
def select_with_psurv(self, psurv_x):
'''
Given a probability of survival, this method applies it to the
population.
Args:
- psurv_x (float array): this array should contain the survival
probability as a function of the energy domain of the distribution.
'''
# multiply the distribution by the probability of survival
self.varphi *= psurv_x
# renormalize the distribution and update population size
self.__renormalize_varphi()
def differentiate(self, prob_mc, prob_pc):
'''
This function implements differentiation. It returns the resulting
populations of MCs and PCs.
Args:
- prob_mc, prob_pc (float): probabilities of respectivelt MC and PC
differentiation.
Returns:
- MC_pop. PC_pop (det_pop objects): populations of differentiated
MCs and PCs
'''
# create differentiated MC population from a copy of the current pop
MC_pop = self.create_copy_without_kernel()
# multiplied by the probability of differentiation
MC_pop.N *= prob_mc
# same for the plasma cell population
PC_pop = self.create_copy_without_kernel()
PC_pop.N *= prob_pc
# remove the differentiated cells from the population size
self.N *= (1 - prob_mc - prob_pc)
return MC_pop, PC_pop
def carrying_cap(self, par):
'''
This function implements a finite carry capacity.
Args:
- par: model parameters dictionary
'''
# if population size exceeds the carrying capacity remove the excess
self.N = np.min([self.N, par['GC_carrying_capacity']])
def expand(self, *args):
'''
This function it implements population expansion and mutation according
to the model parameters.
'''
# perform convolution (amplification + mutation multiple times)
self.varphi = np.convolve(self.ker, self.varphi,
'same') * self.dx
# renormalize the distribution and update population size
self.__renormalize_varphi()
def bareps(self):
'''
This function evaluate and returns the current value of bar-epsilon
for the population.
'''
beps = -np.log(np.dot(self.varphi, np.exp(-self.x)) * self.dx)
return beps
def N_cells(self):
'''
This function returns the current population size.
'''
return self.N
def energies(self):
'''
returns the distribution domain. NB: it is returned by reference.
Therefore one must be careful not to modify them!
'''
return self.x
def mean_en(self):
'''
returns the mean binding energy of the population. It returns None if
the population is empty.
'''
norm = np.sum(self.varphi) * self.dx
if norm > 0:
return np.dot(self.x, self.varphi) * self.dx / norm
else:
return None
def mean_en_exp(self):
'''
returns the mean binding energy of the population evaluated taking into
account the experimental sensitivity range. Returns None if the
population distribution is null in the detectable + below detectable
range.
'''
# evaluate the probability of a measurement being below, in, or above
# the instrumental detection range
p_low, p_det, p_high = prob_low_det_high_measurement(self)
# if no measurement in or below then return None
if p_low + p_det < 0:
return None
# otherwise evaluate the relative contribution of below and in range
# measurements
rel_p_low = p_low / (p_low + p_det)
if rel_p_low == 1:
# if no measurement in the detection range, then the mean is simply
# the low detection limit
return low_en_exp_cutoff
else:
# otherwise evaluate the average in the detection range.
# restrict to experimental sensitivity range and renormalize
x, vp = resize_to_exp_limits_det(self)
# mean of measurements in detection range
mean_det = np.dot(x, vp) * self.dx
# correct with below-range measurements
mean = mean_det * (1. - rel_p_low) + rel_p_low * low_en_exp_cutoff
return mean
def r_haff_exp(self):
'''
returns the high affinity fraction for the population evaluated taking
into account the experimental sensitivity range. Returns None if the
population distribution is null in the detectable + below detectable
range.
'''
# evaluate the probability of a measurement being below, in, or above
# the instrumental detection range
p_low, p_det, p_high = prob_low_det_high_measurement(self)
# if no measurement in or below then return None
if p_low + p_det < 0:
return None
# otherwise evaluate the relative contribution of below and in range
# measurements
rel_p_low = p_low / (p_low + p_det)
if rel_p_low == 1:
# if only measurements below range then return one
return 1.
else:
# otherwise evaluate the in-range high-affinity fraction
# restrict to experimental sensitivity range and renormalize
x, vp = resize_to_exp_limits_det(self)
# evaluate high affinity fraction in detectable range
mask = x <= low_en_threshold
h_aff_det = np.sum(vp[mask]) * self.dx
# correct for measurements below low-detection limit
h_aff = h_aff_det * (1. - rel_p_low) + rel_p_low * 1.
return h_aff
| [
"numpy.copy",
"numpy.convolve",
"numpy.exp",
"numpy.sum",
"numpy.dot",
"numpy.min",
"numpy.zeros_like",
"numpy.arange"
] | [((2458, 2487), 'numpy.arange', 'np.arange', (['xlim_m', 'xlim_p', 'dx'], {}), '(xlim_m, xlim_p, dx)\n', (2467, 2487), True, 'import numpy as np\n'), ((3722, 3779), 'numpy.arange', 'np.arange', (["par['xlim_minus']", "par['xlim_plus']", "par['dx']"], {}), "(par['xlim_minus'], par['xlim_plus'], par['dx'])\n", (3731, 3779), True, 'import numpy as np\n'), ((3828, 3848), 'numpy.zeros_like', 'np.zeros_like', (['pop.x'], {}), '(pop.x)\n', (3841, 3848), True, 'import numpy as np\n'), ((4478, 4488), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (4485, 4488), True, 'import numpy as np\n'), ((4548, 4563), 'numpy.copy', 'np.copy', (['varphi'], {}), '(varphi)\n', (4555, 4563), True, 'import numpy as np\n'), ((7895, 7940), 'numpy.min', 'np.min', (["[self.N, par['GC_carrying_capacity']]"], {}), "([self.N, par['GC_carrying_capacity']])\n", (7901, 7940), True, 'import numpy as np\n'), ((6016, 6035), 'numpy.sum', 'np.sum', (['self.varphi'], {}), '(self.varphi)\n', (6022, 6035), True, 'import numpy as np\n'), ((8202, 8244), 'numpy.convolve', 'np.convolve', (['self.ker', 'self.varphi', '"""same"""'], {}), "(self.ker, self.varphi, 'same')\n", (8213, 8244), True, 'import numpy as np\n'), ((9139, 9158), 'numpy.sum', 'np.sum', (['self.varphi'], {}), '(self.varphi)\n', (9145, 9158), True, 'import numpy as np\n'), ((10497, 10510), 'numpy.dot', 'np.dot', (['x', 'vp'], {}), '(x, vp)\n', (10503, 10510), True, 'import numpy as np\n'), ((11869, 11885), 'numpy.sum', 'np.sum', (['vp[mask]'], {}), '(vp[mask])\n', (11875, 11885), True, 'import numpy as np\n'), ((9209, 9236), 'numpy.dot', 'np.dot', (['self.x', 'self.varphi'], {}), '(self.x, self.varphi)\n', (9215, 9236), True, 'import numpy as np\n'), ((8585, 8600), 'numpy.exp', 'np.exp', (['(-self.x)'], {}), '(-self.x)\n', (8591, 8600), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@author: <NAME> <<EMAIL>>
@brief:
"""
from __future__ import print_function
import os
import re
import sys
import shutil
import tempfile
import subprocess
import numpy as np
import scipy.sparse as sps
from pylightgbm.utils import io_utils
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
class GenericGMB(BaseEstimator):
def __init__(self, exec_path="LighGBM/lightgbm",
config="",
application="regression",
num_iterations=10,
learning_rate=0.1,
num_leaves=127,
tree_learner="serial",
num_threads=1,
min_data_in_leaf=100,
metric='l2,',
is_training_metric=False,
feature_fraction=1.,
feature_fraction_seed=2,
bagging_fraction=1.,
bagging_freq=0,
bagging_seed=3,
metric_freq=1,
early_stopping_round=0,
max_bin=255,
is_unbalance=False,
num_class=1,
boosting_type='gbdt',
min_sum_hessian_in_leaf=10,
drop_rate=0.01,
drop_seed=4,
max_depth=-1,
lambda_l1=0.,
lambda_l2=0.,
min_gain_to_split=0.,
verbose=True,
model=None):
# '~/path/to/lightgbm' becomes 'absolute/path/to/lightgbm'
try:
self.exec_path = os.environ['LIGHTGBM_EXEC']
except KeyError:
print("pyLightGBM is looking for 'LIGHTGBM_EXEC' environment variable, cannot be found.")
print("exec_path will be deprecated in favor of environment variable")
self.exec_path = os.path.expanduser(exec_path)
self.config = config
self.model = model
self.verbose = verbose
self.param = {
'application': application,
'num_iterations': num_iterations,
'learning_rate': learning_rate,
'num_leaves': num_leaves,
'tree_learner': tree_learner,
'num_threads': num_threads,
'min_data_in_leaf': min_data_in_leaf,
'metric': metric,
'is_training_metric': is_training_metric,
'feature_fraction': feature_fraction,
'feature_fraction_seed': feature_fraction_seed,
'bagging_fraction': bagging_fraction,
'bagging_freq': bagging_freq,
'bagging_seed': bagging_seed,
'metric_freq': metric_freq,
'early_stopping_round': early_stopping_round,
'max_bin': max_bin,
'is_unbalance': is_unbalance,
'num_class': num_class,
'boosting_type': boosting_type,
'min_sum_hessian_in_leaf': min_sum_hessian_in_leaf,
'drop_rate': drop_rate,
'drop_seed': drop_seed,
'max_depth': max_depth,
'lambda_l1': lambda_l1,
'lambda_l2': lambda_l2,
'min_gain_to_split': min_gain_to_split,
}
def fit(self, X, y, test_data=None, init_scores=[]):
# create tmp dir to hold data and model (especially the latter)
tmp_dir = tempfile.mkdtemp()
issparse = sps.issparse(X)
f_format = "svm" if issparse else "csv"
train_filepath = os.path.abspath("{}/X.{}".format(tmp_dir, f_format))
init_filepath = train_filepath + ".init"
io_utils.dump_data(X, y, train_filepath, issparse)
if len(init_scores) > 0:
assert len(init_scores) == X.shape[0]
np.savetxt(init_filepath, X=init_scores, delimiter=',', newline=os.linesep)
# else:
# if self.param['application'] in ['binary', 'multiclass']:
# np.savetxt(init_filepath, X=0.5 * np.ones(X.shape[0]),
# delimiter=',', newline=os.linesep)
if test_data:
valid = []
for i, (x_test, y_test) in enumerate(test_data):
test_filepath = os.path.abspath("{}/X{}_test.{}".format(tmp_dir, i, f_format))
valid.append(test_filepath)
io_utils.dump_data(x_test, y_test, test_filepath, issparse)
self.param['valid'] = ",".join(valid)
self.param['task'] = 'train'
self.param['data'] = train_filepath
self.param['output_model'] = os.path.join(tmp_dir, "LightGBM_model.txt")
calls = ["{}={}\n".format(k, self.param[k]) for k in self.param]
if self.config == "":
conf_filepath = os.path.join(tmp_dir, "train.conf")
with open(conf_filepath, 'w') as f:
f.writelines(calls)
process = subprocess.Popen([self.exec_path, "config={}".format(conf_filepath)],
stdout=subprocess.PIPE, bufsize=1)
else:
process = subprocess.Popen([self.exec_path, "config={}".format(self.config)],
stdout=subprocess.PIPE, bufsize=1)
with process.stdout:
for line in iter(process.stdout.readline, b''):
print(line.strip().decode('utf-8')) if self.verbose else None
# wait for the subprocess to exit
process.wait()
with open(self.param['output_model'], mode='r') as file:
self.model = file.read()
shutil.rmtree(tmp_dir)
if test_data and self.param['early_stopping_round'] > 0:
self.best_round = max(map(int, re.findall("Tree=(\d+)", self.model))) + 1
def predict(self, X):
tmp_dir = tempfile.mkdtemp()
issparse = sps.issparse(X)
f_format = "svm" if issparse else "csv"
predict_filepath = os.path.abspath(os.path.join(tmp_dir, "X_to_pred.{}".format(f_format)))
output_model = os.path.abspath(os.path.join(tmp_dir, "model"))
output_results = os.path.abspath(os.path.join(tmp_dir, "LightGBM_predict_result.txt"))
conf_filepath = os.path.join(tmp_dir, "predict.conf")
with open(output_model, mode="w") as file:
file.write(self.model)
io_utils.dump_data(X, np.zeros(X.shape[0]), predict_filepath, issparse)
calls = ["task = predict\n",
"data = {}\n".format(predict_filepath),
"input_model = {}\n".format(output_model),
"output_result={}\n".format(output_results)]
with open(conf_filepath, 'w') as f:
f.writelines(calls)
process = subprocess.Popen([self.exec_path, "config={}".format(conf_filepath)],
stdout=subprocess.PIPE, bufsize=1)
with process.stdout:
for line in iter(process.stdout.readline, b''):
print(line.strip().decode('utf-8')) if self.verbose else None
# wait for the subprocess to exit
process.wait()
y_pred = np.loadtxt(output_results, dtype=float)
shutil.rmtree(tmp_dir)
return y_pred
def get_params(self, deep=True):
params = dict(self.param)
params['exec_path'] = self.exec_path
params['config'] = self.config
params['model'] = self.model
params['verbose'] = self.verbose
if 'output_model' in params:
del params['output_model']
return params
def set_params(self, **kwargs):
params = self.get_params()
params.update(kwargs)
self.__init__(**params)
return self
def feature_importance(self, feature_names=[], importance_type='weight'):
"""Get feature importance of each feature.
Importance type can be defined as:
'weight' - the number of times a feature is used to split the data across all trees.
'gain' - the average gain of the feature when it is used in trees
'cover' - the average coverage of the feature when it is used in trees
Parameters
----------
feature_names: list (optional)
List of feature names.
importance_type: string
The type of feature importance
"""
assert importance_type in ['weight'], 'For now, only weighted feature importance is implemented'
match = re.findall("Column_(\d+)=(\d+)", self.model)
if importance_type == 'weight':
if len(match) > 0:
dic_fi = {int(k): int(value) for k, value in match}
if len(feature_names) > 0:
dic_fi = {feature_names[key]: dic_fi[key] for key in dic_fi}
else:
dic_fi = {}
return dic_fi
class GBMClassifier(GenericGMB, ClassifierMixin):
def __init__(self, exec_path="LighGBM/lightgbm",
config="",
application='binary',
num_iterations=10,
learning_rate=0.1,
num_leaves=127,
tree_learner="serial",
num_threads=1,
min_data_in_leaf=100,
metric='binary_logloss,',
is_training_metric='False',
feature_fraction=1.,
feature_fraction_seed=2,
bagging_fraction=1.,
bagging_freq=0,
bagging_seed=3,
metric_freq=1,
early_stopping_round=0,
max_bin=255,
is_unbalance=False,
num_class=1,
boosting_type='gbdt',
min_sum_hessian_in_leaf=10,
drop_rate=0.01,
drop_seed=4,
max_depth=-1,
lambda_l1=0.,
lambda_l2=0.,
min_gain_to_split=0.,
verbose=True,
model=None):
super(GBMClassifier, self).__init__(exec_path=exec_path,
config=config,
application=application,
num_iterations=num_iterations,
learning_rate=learning_rate,
num_leaves=num_leaves,
tree_learner=tree_learner,
num_threads=num_threads,
min_data_in_leaf=min_data_in_leaf,
metric=metric,
is_training_metric=is_training_metric,
feature_fraction=feature_fraction,
feature_fraction_seed=feature_fraction_seed,
bagging_fraction=bagging_fraction,
bagging_freq=bagging_freq,
bagging_seed=bagging_seed,
metric_freq=metric_freq,
early_stopping_round=early_stopping_round,
max_bin=max_bin,
is_unbalance=is_unbalance,
num_class=num_class,
boosting_type=boosting_type,
min_sum_hessian_in_leaf=min_sum_hessian_in_leaf,
drop_rate=drop_rate,
drop_seed=drop_seed,
max_depth=max_depth,
lambda_l1=lambda_l1,
lambda_l2=lambda_l2,
min_gain_to_split=min_gain_to_split,
verbose=verbose,
model=model)
def predict_proba(self, X):
tmp_dir = tempfile.mkdtemp()
issparse = sps.issparse(X)
f_format = "svm" if issparse else "csv"
predict_filepath = os.path.abspath(os.path.join(tmp_dir, "X_to_pred.{}".format(f_format)))
output_model = os.path.abspath(os.path.join(tmp_dir, "model"))
conf_filepath = os.path.join(tmp_dir, "predict.conf")
output_results = os.path.abspath(os.path.join(tmp_dir, "LightGBM_predict_result.txt"))
with open(output_model, mode="w") as file:
file.write(self.model)
io_utils.dump_data(X, np.zeros(X.shape[0]), predict_filepath, issparse)
calls = [
"task = predict\n",
"data = {}\n".format(predict_filepath),
"input_model = {}\n".format(output_model),
"output_result={}\n".format(output_results)
]
with open(conf_filepath, 'w') as f:
f.writelines(calls)
process = subprocess.Popen([self.exec_path, "config={}".format(conf_filepath)],
stdout=subprocess.PIPE, bufsize=1)
with process.stdout:
for line in iter(process.stdout.readline, b''):
print(line.strip().decode('utf-8')) if self.verbose else None
# wait for the subprocess to exit
process.wait()
raw_probabilities = np.loadtxt(output_results, dtype=float)
if self.param['application'] == 'multiclass':
y_prob = raw_probabilities
elif self.param['application'] == 'binary':
probability_of_one = raw_probabilities
probability_of_zero = 1 - probability_of_one
y_prob = np.transpose(np.vstack((probability_of_zero, probability_of_one)))
else:
raise
shutil.rmtree(tmp_dir)
return y_prob
def predict(self, X):
y_prob = self.predict_proba(X)
return y_prob.argmax(-1)
class GBMRegressor(GenericGMB, RegressorMixin):
def __init__(self, exec_path="LighGBM/lightgbm",
config="",
application='regression',
num_iterations=10,
learning_rate=0.1,
num_leaves=127,
tree_learner="serial",
num_threads=1,
min_data_in_leaf=100,
metric='l2,',
is_training_metric=False,
feature_fraction=1.,
feature_fraction_seed=2,
bagging_fraction=1.,
bagging_freq=0,
bagging_seed=3,
metric_freq=1,
early_stopping_round=0,
max_bin=255,
is_unbalance=False,
num_class=1,
boosting_type='gbdt',
min_sum_hessian_in_leaf=10,
drop_rate=0.01,
drop_seed=4,
max_depth=-1,
lambda_l1=0.,
lambda_l2=0.,
min_gain_to_split=0.,
verbose=True,
model=None):
super(GBMRegressor, self).__init__(exec_path=exec_path,
config=config,
application=application,
num_iterations=num_iterations,
learning_rate=learning_rate,
num_leaves=num_leaves,
tree_learner=tree_learner,
num_threads=num_threads,
min_data_in_leaf=min_data_in_leaf,
metric=metric,
is_training_metric=is_training_metric,
feature_fraction=feature_fraction,
feature_fraction_seed=feature_fraction_seed,
bagging_fraction=bagging_fraction,
bagging_freq=bagging_freq,
bagging_seed=bagging_seed,
metric_freq=metric_freq,
early_stopping_round=early_stopping_round,
max_bin=max_bin,
is_unbalance=is_unbalance,
num_class=num_class,
boosting_type=boosting_type,
min_sum_hessian_in_leaf=min_sum_hessian_in_leaf,
drop_rate=drop_rate,
drop_seed=drop_seed,
max_depth=max_depth,
lambda_l1=lambda_l1,
lambda_l2=lambda_l2,
min_gain_to_split=min_gain_to_split,
verbose=verbose,
model=model)
| [
"os.path.join",
"scipy.sparse.issparse",
"re.findall",
"pylightgbm.utils.io_utils.dump_data",
"numpy.zeros",
"tempfile.mkdtemp",
"numpy.vstack",
"numpy.savetxt",
"shutil.rmtree",
"numpy.loadtxt",
"os.path.expanduser"
] | [((3334, 3352), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (3350, 3352), False, 'import tempfile\n'), ((3372, 3387), 'scipy.sparse.issparse', 'sps.issparse', (['X'], {}), '(X)\n', (3384, 3387), True, 'import scipy.sparse as sps\n'), ((3572, 3622), 'pylightgbm.utils.io_utils.dump_data', 'io_utils.dump_data', (['X', 'y', 'train_filepath', 'issparse'], {}), '(X, y, train_filepath, issparse)\n', (3590, 3622), False, 'from pylightgbm.utils import io_utils\n'), ((4511, 4554), 'os.path.join', 'os.path.join', (['tmp_dir', '"""LightGBM_model.txt"""'], {}), "(tmp_dir, 'LightGBM_model.txt')\n", (4523, 4554), False, 'import os\n'), ((5498, 5520), 'shutil.rmtree', 'shutil.rmtree', (['tmp_dir'], {}), '(tmp_dir)\n', (5511, 5520), False, 'import shutil\n'), ((5718, 5736), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (5734, 5736), False, 'import tempfile\n'), ((5756, 5771), 'scipy.sparse.issparse', 'sps.issparse', (['X'], {}), '(X)\n', (5768, 5771), True, 'import scipy.sparse as sps\n'), ((6110, 6147), 'os.path.join', 'os.path.join', (['tmp_dir', '"""predict.conf"""'], {}), "(tmp_dir, 'predict.conf')\n", (6122, 6147), False, 'import os\n'), ((7020, 7059), 'numpy.loadtxt', 'np.loadtxt', (['output_results'], {'dtype': 'float'}), '(output_results, dtype=float)\n', (7030, 7059), True, 'import numpy as np\n'), ((7068, 7090), 'shutil.rmtree', 'shutil.rmtree', (['tmp_dir'], {}), '(tmp_dir)\n', (7081, 7090), False, 'import shutil\n'), ((8352, 8398), 're.findall', 're.findall', (['"""Column_(\\\\d+)=(\\\\d+)"""', 'self.model'], {}), "('Column_(\\\\d+)=(\\\\d+)', self.model)\n", (8362, 8398), False, 'import re\n'), ((12152, 12170), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (12168, 12170), False, 'import tempfile\n'), ((12190, 12205), 'scipy.sparse.issparse', 'sps.issparse', (['X'], {}), '(X)\n', (12202, 12205), True, 'import scipy.sparse as sps\n'), ((12449, 12486), 'os.path.join', 'os.path.join', (['tmp_dir', '"""predict.conf"""'], {}), "(tmp_dir, 'predict.conf')\n", (12461, 12486), False, 'import os\n'), ((13472, 13511), 'numpy.loadtxt', 'np.loadtxt', (['output_results'], {'dtype': 'float'}), '(output_results, dtype=float)\n', (13482, 13511), True, 'import numpy as np\n'), ((13896, 13918), 'shutil.rmtree', 'shutil.rmtree', (['tmp_dir'], {}), '(tmp_dir)\n', (13909, 13918), False, 'import shutil\n'), ((3719, 3794), 'numpy.savetxt', 'np.savetxt', (['init_filepath'], {'X': 'init_scores', 'delimiter': '""","""', 'newline': 'os.linesep'}), "(init_filepath, X=init_scores, delimiter=',', newline=os.linesep)\n", (3729, 3794), True, 'import numpy as np\n'), ((4688, 4723), 'os.path.join', 'os.path.join', (['tmp_dir', '"""train.conf"""'], {}), "(tmp_dir, 'train.conf')\n", (4700, 4723), False, 'import os\n'), ((5959, 5989), 'os.path.join', 'os.path.join', (['tmp_dir', '"""model"""'], {}), "(tmp_dir, 'model')\n", (5971, 5989), False, 'import os\n'), ((6032, 6084), 'os.path.join', 'os.path.join', (['tmp_dir', '"""LightGBM_predict_result.txt"""'], {}), "(tmp_dir, 'LightGBM_predict_result.txt')\n", (6044, 6084), False, 'import os\n'), ((6266, 6286), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (6274, 6286), True, 'import numpy as np\n'), ((12393, 12423), 'os.path.join', 'os.path.join', (['tmp_dir', '"""model"""'], {}), "(tmp_dir, 'model')\n", (12405, 12423), False, 'import os\n'), ((12528, 12580), 'os.path.join', 'os.path.join', (['tmp_dir', '"""LightGBM_predict_result.txt"""'], {}), "(tmp_dir, 'LightGBM_predict_result.txt')\n", (12540, 12580), False, 'import os\n'), ((12700, 12720), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (12708, 12720), True, 'import numpy as np\n'), ((1859, 1888), 'os.path.expanduser', 'os.path.expanduser', (['exec_path'], {}), '(exec_path)\n', (1877, 1888), False, 'import os\n'), ((4282, 4341), 'pylightgbm.utils.io_utils.dump_data', 'io_utils.dump_data', (['x_test', 'y_test', 'test_filepath', 'issparse'], {}), '(x_test, y_test, test_filepath, issparse)\n', (4300, 4341), False, 'from pylightgbm.utils import io_utils\n'), ((13801, 13853), 'numpy.vstack', 'np.vstack', (['(probability_of_zero, probability_of_one)'], {}), '((probability_of_zero, probability_of_one))\n', (13810, 13853), True, 'import numpy as np\n'), ((5630, 5667), 're.findall', 're.findall', (['"""Tree=(\\\\d+)"""', 'self.model'], {}), "('Tree=(\\\\d+)', self.model)\n", (5640, 5667), False, 'import re\n')] |
# Copyright 2021 Southwest Research Institute
# Licensed under the Apache License, Version 2.0
# Imports for ros
from inspect import EndOfBlock
from operator import truediv
import rospy
# import tf
import numpy as np
import matplotlib.pyplot as plt
from colorama import Fore, Back, Style
from rospkg import RosPack
from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform
from std_msgs.msg import String
from rospy.core import configure_logging
from sensor_msgs.msg import JointState
# from assembly_ros.srv import ExecuteStart, ExecuteRestart, ExecuteStop
from controller_manager_msgs.srv import SwitchController, LoadController, ListControllers
import tf2_ros
# import tf2
import tf2_geometry_msgs
import tf.transformations as trfm
from threading import Lock
from modern_robotics import Adjoint as homogeneous_to_adjoint, RpToTrans
class AssemblyTools():
def __init__(self, ROS_rate, start_time):
self._wrench_pub = rospy.Publisher('/cartesian_compliance_controller/target_wrench', WrenchStamped, queue_size=10)
self._pose_pub = rospy.Publisher('cartesian_compliance_controller/target_frame', PoseStamped , queue_size=2)
self._adj_wrench_pub = rospy.Publisher('adjusted_wrench_force', WrenchStamped, queue_size=2)
#for plotting node
self.avg_wrench_pub = rospy.Publisher("/assembly_tools/avg_wrench", Wrench, queue_size=5)
self.avg_speed_pub = rospy.Publisher("/assembly_tools/avg_speed", Point, queue_size=5)
self.rel_position_pub = rospy.Publisher("/assembly_tools/rel_position", Point, queue_size=5)
self.status_pub = rospy.Publisher("/assembly_tools/status", String, queue_size=5)
self._ft_sensor_sub = rospy.Subscriber("/cartesian_compliance_controller/ft_sensor_wrench/", WrenchStamped, self.callback_update_wrench, queue_size=2)
# self._tcp_pub = rospy.Publisher('target_hole_position', PoseStamped, queue_size=2, latch=True)
#Needed to get current pose of the robot
self.tf_buffer = tf2_ros.Buffer(rospy.Duration(1200.0)) #tf buffer length
self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)
self.broadcaster = tf2_ros.StaticTransformBroadcaster()
#Instantiate the dictionary of frames which are published to tf2. They have to be published in a single Broadcaster call to both be accessible.
self.reference_frames = {"tcp": TransformStamped(), "target_hole_position": TransformStamped()}
self._rate_selected = ROS_rate
self._rate = rospy.Rate(self._rate_selected) #setup for sleeping in hz
self._start_time = start_time #for _spiral_search_basic_force_control and spiral_search_motion
self.curr_time = rospy.get_rostime() - self._start_time
self.curr_time_numpy = np.double(self.curr_time.to_sec())
self.highForceWarning = False
self.collision_confidence = 0
self._seq = 0
# Initialize filtering class
self.filters = AssemblyFilters(5, self._rate_selected)
self.tool_data = dict()
""" Dictionary of transform/ matrix transformation dictionary which contains each TCP configuration loaded from the YAML. It is automatically populated in readYAML(). Access info by invoking:
self.tool_data[*tool name*]["transform"] = (geometry_msgs.TransformStamped) Transform from tool0 (robot wrist flange) to tcp location.
self.tool_data[*tool name*]["matrix"] = (np.array()) 4x4 homogeneous transformation matrix of same transform.
"""
self.readYAML()
#loop parameters
self.wrench_vec = self.get_command_wrench([0,0,0])
self.next_trigger = '' #Empty to start. Each callback should decide what next trigger to implement in the main loop
self.switch_state = False
# initialize loop parameters
self.current_pose = self.get_current_pos()
self.pose_vec = self.full_compliance_position()
self.current_wrench = self.create_wrench([0,0,0], [0,0,0])
self._average_wrench_gripper = self.create_wrench([0,0,0], [0,0,0]).wrench
self._average_wrench_world = Wrench()
self._bias_wrench = self.create_wrench([0,0,0], [0,0,0]).wrench #Calculated to remove the steady-state error from wrench readings.
self.average_speed = np.array([0.0,0.0,0.0])
rospy.loginfo_once(Fore.CYAN + Back.RED + "Controllers list:\n" + str(ListControllers()) + Style.RESET_ALL);
def readYAML(self):
"""Read data from job config YAML and make certain calculations for later use. Stores peg frames in dictionary tool_data
"""
#job parameters moved in from the peg_in_hole_params.yaml file
#'peg_4mm' 'peg_8mm' 'peg_10mm' 'peg_16mm'
#'hole_4mm' 'hole_8mm' 'hole_10mm' 'hole_16mm'
self.target_peg = rospy.get_param('/task/target_peg')
self.target_hole = rospy.get_param('/task/target_hole')
self.activeTCP = rospy.get_param('/task/starting_tcp')
self.read_board_positions()
self.read_peg_hole_dimensions()
#Spiral parameters
self._spiral_params = rospy.get_param('/algorithm/spiral_params')
#Calculate transform from TCP to peg corner
self.peg_locations = rospy.get_param('/objects/'+self.target_peg+'/grasping/pinch_grasping/locations')
# Setup default zero-transform in case it needs to be referenced for consistency.
self.tool_data['gripper_tip'] = dict()
a = TransformStamped()
a.header.frame_id = "tool0"
a.child_frame_id = 'gripper_tip'
a.transform.rotation.w = 1
self.tool_data['gripper_tip']['transform'] = a
self.tool_data['gripper_tip']['matrix'] = AssemblyTools.to_homogeneous(a.transform.rotation, a.transform.translation)
self.reference_frames['tcp'] = a
for key in list(self.peg_locations):
#Read in each listed tool position; measure their TF and store in dictionary.
#Write the position of the peg's corner wrt the gripper tip as a reference-ready TF.
pegTransform = AssemblyTools.get_tf_from_YAML(self.peg_locations[str(key)]['pose'], self.peg_locations[str(key)]['orientation'],
"tool0_to_gripper_tip_link", "peg_"+str(key)+"_position")
self.reference_frames['tcp'] = pegTransform
self.send_reference_TFs()
self._rate.sleep()
a = self.tf_buffer.lookup_transform("tool0", "peg_"+str(key)+"_position", rospy.Time(0), rospy.Duration(1.0))
self.tool_data[str(key)]=dict()
self.tool_data[str(key)]['transform'] = a
self.tool_data[str(key)]['matrix'] = AssemblyTools.to_homogeneous(a.transform.rotation, a.transform.translation)
rospy.logerr("Added TCP entry for " + str(key))
rospy.logerr("TCP position dictionary now contains: " + str(list(self.tool_data))+ ", selected tool publishing now: ")
self.select_tool(self.activeTCP)
self.surface_height = rospy.get_param('/task/assumed_starting_height') #Starting height assumption
self.restart_height = rospy.get_param('/task/restart_height') #Height to restart
# quit()
def read_board_positions(self):
""" Calculates pose of target hole relative to robot base frame.
"""
temp_z_position_offset = 207 #Our robot is reading Z positions wrong on the pendant for some reason.
taskPos = list(np.array(rospy.get_param('/environment_state/task_frame/position')))
taskPos[2] = taskPos[2] + temp_z_position_offset
taskOri = rospy.get_param('/environment_state/task_frame/orientation')
holePos = list(np.array(rospy.get_param('/objects/'+self.target_hole+'/local_position')))
holePos[2] = holePos[2] + temp_z_position_offset
holeOri = rospy.get_param('/objects/'+self.target_hole+'/local_orientation')
#Set up target hole pose
tf_robot_to_task_board = AssemblyTools.get_tf_from_YAML(taskPos, taskOri, "base_link", "task_board")
pose_task_board_to_hole = AssemblyTools.get_pose_from_YAML(holePos, holeOri, "base_link")
target_hole_pose = tf2_geometry_msgs.do_transform_pose(pose_task_board_to_hole, tf_robot_to_task_board)
# self.target_broadcaster = tf2_geometry_msgs.do_transform_pose(self.pose_task_board_to_hole, self.tf_robot_to_task_board)
targetHoleTF = AssemblyTools.swap_pose_tf(target_hole_pose, "target_hole_position")
self.reference_frames['target_hole_position'] = targetHoleTF
self.send_reference_TFs()
self._rate.sleep()
# self._target_pub.publish(self.target_hole_pose)
self.x_pos_offset = target_hole_pose.pose.position.x
self.y_pos_offset = target_hole_pose.pose.position.y
def read_peg_hole_dimensions(self):
"""Read peg and hole data from YAML configuration file.
"""
peg_diameter = rospy.get_param('/objects/'+self.target_peg+'/dimensions/diameter')/1000 #mm
peg_tol_plus = rospy.get_param('/objects/'+self.target_peg+'/tolerance/upper_tolerance')/1000
peg_tol_minus = rospy.get_param('/objects/'+self.target_peg+'/tolerance/lower_tolerance')/1000
hole_diameter = rospy.get_param('/objects/'+self.target_hole+'/dimensions/diameter')/1000 #mm
hole_tol_plus = rospy.get_param('/objects/'+self.target_hole+'/tolerance/upper_tolerance')/1000
hole_tol_minus = rospy.get_param('/objects/'+self.target_hole+'/tolerance/lower_tolerance')/1000
self.hole_depth = rospy.get_param('/objects/'+self.target_peg+'/dimensions/min_insertion_depth')/1000
#setup, run to calculate useful values based on params:
self.clearance_max = hole_tol_plus - peg_tol_minus #calculate the total error zone;
self.clearance_min = hole_tol_minus + peg_tol_plus #calculate minimum clearance; =0
self.clearance_avg = .5 * (self.clearance_max- self.clearance_min) #provisional calculation of "wiggle room"
self.safe_clearance = (hole_diameter-peg_diameter + self.clearance_min)/2; # = .2 *radial* clearance i.e. on each side.
# rospy.logerr("Peg is " + str(self.target_peg) + " and hole is " + str(self.target_hole))
# rospy.logerr("Spiral pitch is gonna be " + str(self.safe_clearance) + "because that's min tolerance " + str(self.clearance_min) + " plus gap of " + str(hole_diameter-peg_diameter))
def send_reference_TFs(self):
if(self.reference_frames['tcp'].header.frame_id != ''):
# print("Broadcasting tfs: " + str(self.reference_frames))
self._rate.sleep()
self.broadcaster.sendTransform(list(self.reference_frames.values()))
else:
rospy.logerr("Trying to publish headless TF!")
@staticmethod
def get_tf_from_YAML(pos, ori, base_frame, child_frame): #Returns the transform from base_frame to child_frame based on vector inputs
"""Reads a TF from config YAML.
:param pos: (string) Param key for desired position parameter.
:param ori: (string) Param key for desired orientation parameter.
:param base_frame: (string) Base frame for output TF.
:param child_frame: (string) Child frame for output TF.
:return: Geometry_Msgs.TransformStamped with linked parameters.
"""
output_pose = AssemblyTools.get_pose_from_YAML(pos, ori, base_frame) #tf_task_board_to_hole
output_tf = TransformStamped()
output_tf.header = output_pose.header
#output_tf.transform.translation = output_pose.pose.position
[output_tf.transform.translation.x, output_tf.transform.translation.y, output_tf.transform.translation.z] = [output_pose.pose.position.x, output_pose.pose.position.y, output_pose.pose.position.z]
output_tf.transform.rotation = output_pose.pose.orientation
output_tf.child_frame_id = child_frame
return output_tf
@staticmethod
def get_pose_from_YAML(pos, ori, base_frame): #Returns the pose wrt base_frame based on vector inputs.
"""Reads a Pose from config YAML.
:param pos: (string) Param key for desired position parameter.
:param ori: (string) Param key for desired orientation parameter.
:param base_frame: (string) Base frame for output pose.
:param child_frame: (string) Child frame for output pose.
:return: Geometry_Msgs.PoseStamped with linked parameters.
"""
#Inputs are in mm XYZ and degrees RPY
#move to utils
output_pose = PoseStamped() #tf_task_board_to_hole
output_pose.header.stamp = rospy.get_rostime()
output_pose.header.frame_id = base_frame
tempQ = list(trfm.quaternion_from_euler(ori[0]*np.pi/180, ori[1]*np.pi/180, ori[2]*np.pi/180))
output_pose.pose = Pose(Point(pos[0]/1000,pos[1]/1000,pos[2]/1000) , Quaternion(tempQ[0], tempQ[1], tempQ[2], tempQ[3]))
return output_pose
def select_tool(self, tool_name):
"""Sets activeTCP frame according to title of desired peg frame (tip, middle, etc.). This frame must be included in the YAML.
:param tool_name: (string) Key in tool_data dictionary for desired frame.
"""
# TODO: Make this a loop-run state to slowly slerp from one TCP to another using https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Slerp.html
if(tool_name in list(self.tool_data)):
self.activeTCP = tool_name
self.reference_frames['tcp'] = self.tool_data[self.activeTCP]['transform']
self.send_reference_TFs()
else:
rospy.logerr_throttle(2, "Tool selection key error! No key '" + tool_name + "' in tool dictionary.")
def spiral_search_motion(self, frequency = .15, min_amplitude = .002, max_cycles = 62.83185):
"""Generates position, orientation offset vectors which describe a plane spiral about z;
Adds this offset to the current approach vector to create a searching pattern. Constants come from Init;
x,y vector currently comes from x_ and y_pos_offset variables.
"""
curr_time = rospy.get_rostime() - self._start_time
curr_time_numpy = np.double(curr_time.to_sec())
curr_amp = min_amplitude + self.safe_clearance * np.mod(2.0 * np.pi * frequency *curr_time_numpy, max_cycles);
x_pos = curr_amp * np.cos(2.0 * np.pi * frequency * curr_time_numpy)
y_pos = curr_amp * np.sin(2.0 * np.pi * frequency * curr_time_numpy)
x_pos = x_pos + self.x_pos_offset
y_pos = y_pos + self.y_pos_offset
z_pos = self.current_pose.transform.translation.z #0.104 is the approximate height of the hole itself. TODO:Assume the part needs to be inserted here. Update once I know the real value
pose_position = [x_pos, y_pos, z_pos]
pose_orientation = [0, 1, 0, 0] # w, x, y, z
return [pose_position, pose_orientation]
def linear_search_position(self, direction_vector = [0,0,0], desired_orientation = [0, 1, 0, 0]):
"""Generates a command pose vector which causes the robot to hold a certain orientation
and comply in z while maintaining the approach vector along x_ and y_pos_offset.
:param direction_vector: (list of floats) vector directional offset from normal position. Causes constant motion in z.
:param desired_orientation: (list of floats) quaternion parameters for orientation.
"""
pose_position = self.current_pose.transform.translation
pose_position.x = self.x_pos_offset + direction_vector[0]
pose_position.y = self.y_pos_offset + direction_vector[1]
pose_position.z = pose_position.z + direction_vector[2]
pose_orientation = desired_orientation
return [[pose_position.x, pose_position.y, pose_position.z], pose_orientation]
def full_compliance_position(self, direction_vector = [0,0,0], desired_orientation = [0, 1, 0, 0]):
"""Generates a command pose vector which causes the robot to hold a certain orientation
and comply translationally in all directions.
:param direction_vector: (list of floats) vector directional offset from normal position. Causes constant motion.
:param desired_orientation: (list of floats) quaternion parameters for orientation.
"""
pose_position = self.current_pose.transform.translation
pose_position.x = pose_position.x + direction_vector[0]
pose_position.y = pose_position.y + direction_vector[1]
pose_position.z = pose_position.z + direction_vector[2]
pose_orientation = desired_orientation
return [[pose_position.x, pose_position.y, pose_position.z], pose_orientation]
#Load cell current data
def callback_update_wrench(self, data: WrenchStamped):
"""Callback to update current wrench data whenever new data becomes available.
"""
self.current_wrench = data
# rospy.loginfo_once("Callback working! " + str(data))
# def subtract_vector3s(self, vec1, vec2):
# newVector3 = Vector3(vec1.x - vec2.x, vec1.y - vec2.y, vec1.z - vec2.z)
# return newVector3
def get_current_pos(self):
"""Read in current pose from robot base to activeTCP.
"""
transform = TransformStamped() #TODO: Check that this worked.
# if(type(offset) == str):
# transform = self.tf_buffer.lookup_transform("base_link", self.activeTCP, rospy.Time(0), rospy.Duration(100.0))
# else:
if(self.activeTCP == "tool0"):
transform = self.tf_buffer.lookup_transform("base_link", "tool0",
rospy.Time(0), rospy.Duration(10.0))
else:
transform = self.tf_buffer.lookup_transform("base_link", self.tool_data[self.activeTCP]['transform'].child_frame_id,
rospy.Time(0), rospy.Duration(10.0))
return transform
def get_command_wrench(self, vec = [0,0,0], ori = [0,0,0]):
"""Output ROS wrench parameters from human-readable vector inputs.
:param vec: (list of floats) Vector of desired force in each direction (in Newtons).
:param ori: (list of floats) Vector of desired torque about each axis (in N*m)
"""
return [vec[0], vec[1], vec[2], ori[0], ori[1], ori[2]]
def publish_wrench(self, input_vec):
"""Publish the commanded wrench to the command topic.
:param vec: (list of Floats) XYZ force commands
:param vec: (list of Floats) XYC commanded torque.
"""
# self.check_controller(self.force_controller)
# forces, torques = self.com_to_tcp(result[:3], result[3:], transform)
# result_wrench = self.create_wrench(result[:3], result[3:])
# result_wrench = self.create_wrench([7,0,0], [0,0,0])
result_wrench = self.create_wrench(input_vec[:3], input_vec[3:])
transform_world_to_gripper:TransformStamped = self.tf_buffer.lookup_transform('target_hole_position', 'tool0', rospy.Time(0), rospy.Duration(1.25))
offset =Point( -1*self.tool_data[self.activeTCP]["transform"].transform.translation.x, -1*self.tool_data[self.activeTCP]["transform"].transform.translation.y, -1*(self.tool_data[self.activeTCP]["transform"].transform.translation.z - .05))
transform_world_to_gripper.transform.translation = offset
#Execute reinterpret-to-tcp and rotate-to-world simultaneously:
result_wrench.wrench = AssemblyTools.transform_wrench(transform_world_to_gripper, result_wrench.wrench) #This works
self._wrench_pub.publish(result_wrench)
@staticmethod
def list_from_quat(quat):
return [quat.x, quat.y, quat.z, quat.w]
@staticmethod
def list_from_point(point):
return [point.x, point.y, point.z]
# def _publish_pose(self, position, orientation):
def publish_pose(self, pose_stamped_vec):
"""Takes in vector representations of position
:param pose_stamped_vec: (list of floats) List of parameters for pose with x,y,z position and orientation quaternion
"""
# Ensure controller is loaded
# self.check_controller(self.controller_name)
# Create poseStamped msg
goal_pose = PoseStamped()
# Set the position and orientation
point = Point()
quaternion = Quaternion()
# point.x, point.y, point.z = position
point.x, point.y, point.z = pose_stamped_vec[0][:]
goal_pose.pose.position = point
quaternion.w, quaternion.x, quaternion.y, quaternion.z = pose_stamped_vec[1][:]
goal_pose.pose.orientation = quaternion
# Set header values
goal_pose.header.stamp = rospy.get_rostime()
goal_pose.header.frame_id = "base_link"
if(self.activeTCP != "tool0"):
#Convert pose in TCP coordinates to assign wrist "tool0" position for controller
b_link = goal_pose.header.frame_id
goal_matrix = AssemblyTools.to_homogeneous(goal_pose.pose.orientation, goal_pose.pose.position) #tf from base_link to tcp_goal = bTg
backing_mx = trfm.inverse_matrix(self.tool_data[self.activeTCP]['matrix']) #tf from tcp_goal to wrist = gTw
goal_matrix = np.dot(goal_matrix, backing_mx) #bTg * gTw = bTw
goal_pose = AssemblyTools.matrix_to_pose(goal_matrix, b_link)
# self._tool_offset_pub.publish(goal_pose)
self._pose_pub.publish(goal_pose)
@staticmethod
def to_homogeneous(quat, point):
"""Takes a quaternion and msg.Point and outputs a homog. tf matrix.
:param quat: (geometry_msgs.Quaternion) Orientation information.
:param point: (geometry.msgs.Point) Position information.
:return: (np.Array()) 4x4 Homogeneous transform matrix.
"""
#TODO candidate for Utils
output = trfm.quaternion_matrix(np.array([quat.x, quat.y, quat.z, quat.w]))
output[0][3] = point.x
output[1][3] = point.y
output[2][3] = point.z
return output
@staticmethod
def matrix_to_pose(input, base_frame):
"""Converts matrix into a pose.
:param input: (np.Array) 4x4 homogeneous transformation matrix
:param base_frame: (string) base frame for new pose.
:return: (geometry_msgs.PoseStamped) Pose based on input.
"""
output = PoseStamped()
output.header.stamp = rospy.get_rostime()
output.header.frame_id = base_frame
quat = trfm.quaternion_from_matrix(input)
output.pose.orientation.x = quat[0]
output.pose.orientation.y = quat[1]
output.pose.orientation.z = quat[2]
output.pose.orientation.w = quat[3]
output.pose.position.x = input[0][3]
output.pose.position.y = input[1][3]
output.pose.position.z = input[2][3]
return output
@staticmethod
def create_adjoint_representation(T_ab=None, R_ab=None, P_ab=None):
"""Convert homogeneous transform (T_ab) or a combination rotation matrix (R_ab) and pose (P_ab)
into the adjoint representation. This can be used to transform wrenches (e.g., force and torque) between frames.
If T_ab is provided, R_ab and P_ab will be ignored.
:param T_ab: (np.Array) 4x4 homogeneous transformation matrix representing frame 'b' relative to frame 'a'
:param R_ab: (np.Array) 3x3 rotation matrix representing frame 'b' relative to frame 'a'
:param P_ab: (np.Array) 3x1 pose representing frame 'b' relative to frame 'a'
:return Ad_T: (np.Array) 6x6 adjoint representation of the transformation
"""
#Accomodation for input R_ab and P_ab
if (type(T_ab) == type(None)):
T_ab = RpToTrans(R_ab, P_ab)
Ad_T = homogeneous_to_adjoint(T_ab)
return Ad_T
@staticmethod
def wrenchToArray(wrench: Wrench):
"""Restructures wrench object into numpy array with order needed by wrench reinterpretation math, namely, torque first then forces.
:param wrench: (geometry_msgs.Wrench) Input wrench.
:return: (np.Array) 1x6 numpy array
"""
return np.array([wrench.torque.x, wrench.torque.y, wrench.torque.z, wrench.force.x, wrench.force.y, wrench.force.z])
@staticmethod
def arrayToWrench(array: np.ndarray):
"""Restructures output 1x6 mathematical array representation of a wrench into a wrench object.
:param wrench: (np.Array) 1x6 numpy array
:return: (geometry_msgs.Wrench) Return wrench.
"""
return Wrench(Point(*list(array[3:])), Point(*list(array[:3])))
@staticmethod
def transform_wrench(transform: TransformStamped, wrench: Wrench, invert=False, log=False):
"""Transform a wrench object by the given transform object.
:param transform: (geometry_msgs.TransformStamped) Transform to apply
:param wrench: (geometry_msgs.Wrench) Wrench object to transform.
:param invert: (bool) Whether to interpret the tansformation's inverse, i.e. transform "from child to parent" instead of "from parent to child"
:return: (geometry.msgs.Wrench) changed wrench
"""
matrix = AssemblyTools.to_homogeneous(transform.transform.rotation, transform.transform.translation)
if(log):
rospy.loginfo_throttle(2, Fore.RED + " Transform passed in is " + str(transform) + " and matrix passed in is \n" + str(matrix) + Style.RESET_ALL)
if(invert):
matrix = trfm.inverse_matrix(matrix)
return AssemblyTools.transform_wrench_by_matrix(matrix, AssemblyTools.wrenchToArray(wrench))
@staticmethod
def transform_wrench_by_matrix(T_ab, wrench):
"""Use the homogeneous transform (T_ab) to transform a given wrench using an adjoint transformation (see create_adjoint_representation).
:param T_ab: (np.Array) 4x4 homogeneous transformation matrix representing frame 'b' relative to frame 'a'
:param wrench: (np.Array) 6x1 representation of a wrench relative to frame 'a'. This should include forces and torques as np.array([torque, force])
:return wrench_transformed: (np.Array) 6x1 representation of a wrench relative to frame 'b'. This should include forces and torques as np.array([torque, force])
"""
Ad_T = AssemblyTools.create_adjoint_representation(T_ab)
wrench_transformed = np.matmul(Ad_T.T, wrench)
return AssemblyTools.arrayToWrench(wrench_transformed)
@staticmethod
def matrix_to_tf(input, base_frame, child_frame):
"""Converts matrix back into a TF.
:param input: (np.Array) 4x4 homogeneous transformation matrix
:param base_frame: (string) base frame for new pose.
:return: (geometry_msgs.TransformStamped) Transform based on input.
"""
pose = AssemblyTools.matrix_to_pose(input, base_frame)
output = AssemblyTools.swap_pose_tf(pose, child_frame)
return output
@staticmethod
def swap_pose_tf(input, child_frame):
"""Swaps pose for tf and vice-versa.
:param input: (geometry_msgs.PoseStamped or geometry_msgs.TransformStamped) Input data type.
:param child_frame: (string) Child frame name if converting Pose to Transform.
:return: (geometry_msgs.TransformStamped or geometry_msgs.PoseStamped) Output data, of the other type from input.
"""
if('PoseStamped' in str(type(input))):
output = TransformStamped()
output.header = input.header
# output.transform = input.pose
[output.transform.translation.x, output.transform.translation.y, output.transform.translation.z] = [input.pose.position.x, input.pose.position.y, input.pose.position.z]
output.transform.rotation = input.pose.orientation
output.child_frame_id = child_frame
return output
else:
if('TransformStamped' in str(type(input))):
output = PoseStamped()
output.header = input.header
output.pose = input.transform
return output
rospy.logerr("Invalid input to swap_pose_tf !!!")
def create_wrench(self, force, torque):
"""Composes a standard wrench object from human-readable vectors.
:param force: (list of floats) x,y,z force values
:param torque: (list of floats) torques about x,y,z
:return: (geometry.msgs.WrenchStamped) Output wrench.
"""
wrench_stamped = WrenchStamped()
wrench = Wrench()
# create wrench
wrench.force.x, wrench.force.y, wrench.force.z = force
wrench.torque.x, wrench.torque.y, wrench.torque.z = torque
# create header
wrench_stamped.header.seq = self._seq
wrench_stamped.header.stamp = rospy.get_rostime()
wrench_stamped.header.frame_id = "base_link"
self._seq+=1
wrench_stamped.wrench = wrench
return wrench_stamped
def update_average_wrench(self):
"""Create a very simple moving average of the incoming wrench readings and store it as self.average.wrench.
"""
self._average_wrench_gripper = self.filters.average_wrench(self.current_wrench.wrench)
#Get current angle from gripper to hole:
transform_world_rotation:TransformStamped = self.tf_buffer.lookup_transform('tool0', 'target_hole_position', rospy.Time(0), rospy.Duration(1.25))
#We want to rotate this only, not reinterpret F/T components.
#We reinterpret based on the position of the TCP (but ignore the relative rotation). In addition, the wrench is internally measured at the load cell and has a built-in transformation to tool0 which is 5cm forward. We have to undo that transformation to get accurate transformation.
offset =Point(self.tool_data[self.activeTCP]["transform"].transform.translation.x, self.tool_data[self.activeTCP]["transform"].transform.translation.y, self.tool_data[self.activeTCP]["transform"].transform.translation.z - .05)
transform_world_rotation.transform.translation = offset
#Execute reinterpret-to-tcp and rotate-to-world simultaneously:
self._average_wrench_world = AssemblyTools.transform_wrench(transform_world_rotation, self._average_wrench_gripper) #This works
#Output the wrench for debug visualization
guy = self.create_wrench([0,0,0], [0,0,0])
guy.wrench = self._average_wrench_world
# guy.header.frame_id = "tool0"
guy.header.frame_id = "target_hole_position"
# guy.header.frame_id = self.reference_frames['tcp'].child_frame_id
self._adj_wrench_pub.publish(guy)
# Probably not needed, delete when certain:
# def weighted_average_wrenches(self, wrench1, scale1, wrench2, scale2):
# """Returns a simple linear interpolation between wrenches.
# :param wrench1:(geometry_msgs.WrenchStamped) First input wrench
# :param scale1: (float) Weight of first input wrench
# :param wrench2:(geometry_msgs.WrenchStamped) Second input wrench
# :param scale2: (float) Weight of second input wrench
# :return: (geometry_msgs.WrenchStamped)
# """
# newForce = (self.as_array(wrench1.force) * scale1 + self.as_array(wrench2.force) * scale2) * 1/(scale1 + scale2)
# newTorque = (self.as_array(wrench1.torque) * scale1 + self.as_array(wrench2.torque) * scale2) * 1/(scale1 + scale2)
# return self.create_wrench([newForce[0], newForce[1], newForce[2]], [newTorque[0], newTorque[1], newTorque[2]]).wrench
def update_avg_speed(self):
"""Updates a simple moving average of robot tcp speed in mm/s. A speed is calculated from the difference between a
previous pose (.1 s in the past) and the current pose; this speed is filtered and stored as self.average_speed.
"""
curr_time = rospy.get_rostime() - self._start_time
if(curr_time.to_sec() > rospy.Duration(.5).to_sec()):
try:
earlierPosition = self.tf_buffer.lookup_transform("base_link", self.tool_data[self.activeTCP]['transform'].child_frame_id,
rospy.Time.now() - rospy.Duration(.1), rospy.Duration(2.0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
raise
#Speed Diff: distance moved / time between poses
positionDiff = self.as_array(self.current_pose.transform.translation) - self.as_array(earlierPosition.transform.translation)
timeDiff = ((self.current_pose.header.stamp) - (earlierPosition.header.stamp)).to_sec()
if(timeDiff > 0.0): #Update only if we're using a new pose; also, avoid divide by zero
speedDiff = positionDiff / timeDiff
#Moving averate weighted toward old speed; response is independent of rate selected.
# self.average_speed = self.average_speed * (1-10/self._rate_selected) + speedDiff * (10/self._rate_selected)
# rospy.logwarn_throttle(2.0, "Speed is currently about " + str(speedDiff))
self.average_speed = self.filters.average_speed(speedDiff)
else:
rospy.logwarn_throttle(1.0, "Too early to report past time!" + str(curr_time.to_sec()))
def publish_plotted_values(self):
"""Publishes critical data for plotting node to process.
"""
self.avg_wrench_pub.publish(self._average_wrench_world)
self.avg_speed_pub.publish(Point(self.average_speed[0], self.average_speed[1],self.average_speed[2]))
self.rel_position_pub.publish(self.current_pose.transform.translation)
# Send a dictionary as plain text to expose some additional info
status_dict = dict({('state', self.state), ('tcp_name', str(self.tool_data[self.activeTCP]['transform'].child_frame_id) )})
if(self.surface_height != 0.0):
# If we have located the work surface
status_dict['surface_height']=str(self.surface_height)
self.status_pub.publish(str(status_dict))
def as_array(self, vec):
"""Takes a Point and returns a Numpy array.
:param vec: (geometry_msgs.Point) Vector in serialized ROS format.
:return: (numpy.Array) Vector in 3x1 numpy array format.
"""
return np.array([vec.x, vec.y, vec.z])
#See if the force/speed (any vector) is within a 3-d bound. Technically, it is a box, with sqrt(2)*bound okay at diagonals.
def vectorRegionCompare_symmetrical(self, input, bounds_max):
"""See ``vectorRegionCompare``_. Compares an input to boundaries element-wise. Essentially checks whether a vector
is within a rectangular region. This version assumes min values to be the negative of max values.
:param input: (list of floats) x,y,z of a vector to check.
:param bounds_max: (list of floats) x,y,z max value of each element.
:return: (bool) Whether the vector falls within the region.
"""
#initialize a minimum list
bounds_min = [0,0,0]
#Each min value is the negative of the max value
#Create bounds_min to be the negative of bounds_max. symmetrical, duh....
bounds_min[0] = bounds_max[0] * -1.0
bounds_min[1] = bounds_max[1] * -1.0
bounds_min[2] = bounds_max[2] * -1.0
return self.vectorRegionCompare(input, bounds_max, bounds_min)
# bounds_max and bounds_min let you set a range for each dimension.
#This just compares if you are in the cube described above.
def vectorRegionCompare(self, input, bounds_max, bounds_min):
""".. vectorRegionCompare Compares an input to boundaries element-wise. Essentially checks whether a vector is within a rectangular region.
:param input: (list of floats) x,y,z of a vector to check.
:param bounds_max: (list of floats) x,y,z max value of each element.
:param bounds_min: (list of floats) x,y,z min value of each element.
:return: (bool) Whether the vector falls within the region.
"""
#Simply compares abs. val.s of input's elements to a vector of maximums and returns whether it exceeds
#if(symmetrical):
# bounds_min[0], bounds_min[1], bounds_min[2] = bounds_max[0] * -1, bounds_max[1] * -1, bounds_max[2] * -1
#TODO - convert to a process of numpy arrays! They process way faster because that library is written in C++
#Note - actually Numpy's allclose() method may be perfect here.
if( bounds_max[0] >= input[0] >= bounds_min[0]):
if( bounds_max[1] >= input[1] >= bounds_min[1]):
if( bounds_max[2] >= input[2] >= bounds_min[2]):
return True
return False
#TODO: Make the parameters of function part of the constructor or something...
def force_cap_check(self, danger_force=[45, 45, 45], danger_transverse_force=[3.5, 3.5, 3.5], warning_force=[25, 25, 25], warning_transverse_force=[2, 2, 2]):
"""Checks whether any forces or torques are dangerously high. There are two levels of response:
*Elevated levels of force cause this program to pause for 1s. If forces remain high after pause,
the system will enter a freewheeling state
*Dangerously high forces will kill this program immediately to prevent damage.
:return: (Bool) True if all is safe; False if a warning stop is requested.
"""
#Calculate acceptable torque from transverse forces
radius = np.linalg.norm(self.as_array(self.tool_data[self.activeTCP]['transform'].transform.translation))
#Set a minimum radius to always permit some torque
radius = max(3, radius)
rospy.loginfo_once("For TCP " + self.activeTCP + " moment arm is coming out to " + str(radius))
warning_torque=[warning_transverse_force[a]*radius for a in range(3)]
danger_torque=[danger_transverse_force[b]*radius for b in range(3)]
rospy.loginfo_once("So torques are limited to " + str(warning_torque) + str(danger_torque))
if(not (self.vectorRegionCompare_symmetrical(self.as_array(self.current_wrench.wrench.force), danger_force)
and self.vectorRegionCompare_symmetrical(self.as_array(self.current_wrench.wrench.torque), danger_torque))):
rospy.logerr("*Very* high force/torque detected! " + str(self.current_wrench.wrench))
rospy.logerr("Killing program.")
quit() # kills the program. Since the node is required, it kills the ROS application.
return False
if(self.vectorRegionCompare_symmetrical(self.as_array(self.current_wrench.wrench.force), warning_force)):
if(self.vectorRegionCompare_symmetrical(self.as_array(self.current_wrench.wrench.torque), warning_torque)):
return True
rospy.logerr("High force/torque detected! " + str(self.current_wrench.wrench))
if(self.highForceWarning):
self.highForceWarning = False
return False
else:
rospy.logerr("Sleeping for 1s to damp oscillations...")
self.highForceWarning = True
rospy.sleep(1) #Want the system to stop for a second in hopes that it prevents higher forces/torques. May not be helping.
return True
class AssemblyFilters():
"""Averages a signal based on a history log of previous values. Window size is normallized to different
frequency values using _rate_selected; window should be for 100hz cycle time.
"""
def __init__(self, window = 15, rate_selected=100):
#Simple Moving Average Parameters
self._rate_selected = rate_selected
self._buffer_window = dict()
self._buffer_window["wrench"] = window # should tie to self._rate_selected = 1/Hz since this variable is the rate of ROS commands
self._data_buffer = dict()
# self._moving_avg_data = np. #Empty to start. make larger than we need since np is contiguous memory. Will ignore NaN values.
# self._data_buffer = np.empty(self._buffer_window)
# self.avg_it = 0#iterator for allocating the first window in the moving average calculation
# self._data_buffer = np.zeros(self._buffer_window)
# self._moving_avg_data = [] #Empty to start
def average_wrench(self, input):
# out = input
# Combine
force = self.average_threes(input.force, 'force')
torque = self.average_threes(input.torque, 'torque')
return Wrench(self.dict_to_point(force), self.dict_to_point(torque))
def average_speed(self, input):
"""Takes speed as a list of components, returns smoothed version
:param input: (numpy.Array) Speed vector
:return: (numpy.Array) Smoothed speed vector
"""
speed = self.average_threes(Point(input[0], input[1], input[2]), 'speed')
return np.array([speed['x'], speed['y'], speed['z']])
def average_threes(self, input, name):
"""Returns the moving average of a dict of x,y,z values
:param input: (geometry_msgs.msg.Point) A point with x,y,z properties
:param name: (string) Name to use for buffer dictionary
:return: (dict) x,y,z dictionary of the averaged values.
"""
vals = self.point_to_dict(input)
for k, v in vals.items():
vals[k] = self.simple_moving_average(v, 15, key=name+'_'+k)
return vals
def point_to_dict(self, input):
return {"x": input.x, "y":input.y, "z":input.z}
def dict_to_point(self, input):
return Point(input["x"], input["y"], input["z"])
def simple_moving_average(self, new_data_point, window=None, key="wrench"):
if not key in self._data_buffer:
self._data_buffer[key] = np.array([])
self._buffer_window[key] = window
window = int(np.floor(self._buffer_window[key] * self._rate_selected/100)) #Unless new input provided, use class member
#Fill up the first window while returning current value, else calculate moving average using constant window
if len(self._data_buffer[key]) < window:
self._data_buffer[key] = np.append(self._data_buffer[key], new_data_point)
avg = self.calc_moving_average(self._data_buffer[key], len(self._data_buffer[key]))
else:
self._data_buffer[key] = np.append(self._data_buffer[key], new_data_point) #append new datapoint to the end
self._data_buffer[key] = np.delete(self._data_buffer[key], 0) #pop the first element
avg = self.calc_moving_average(self._data_buffer[key], window)
return avg
def calc_moving_average(self, buffered_data, w): #w is the window
return np.convolve(buffered_data, np.ones(w), 'valid') / w
if __name__ == '__main__':
rospy.init_node("demo_assembly_application_compliance")
| [
"rospy.logerr",
"geometry_msgs.msg.TransformStamped",
"rospy.init_node",
"geometry_msgs.msg.Wrench",
"tf2_ros.StaticTransformBroadcaster",
"numpy.array",
"controller_manager_msgs.srv.ListControllers",
"rospy.Rate",
"tf2_geometry_msgs.do_transform_pose",
"numpy.sin",
"numpy.mod",
"geometry_msgs... | [((43659, 43714), 'rospy.init_node', 'rospy.init_node', (['"""demo_assembly_application_compliance"""'], {}), "('demo_assembly_application_compliance')\n", (43674, 43714), False, 'import rospy\n'), ((1019, 1118), 'rospy.Publisher', 'rospy.Publisher', (['"""/cartesian_compliance_controller/target_wrench"""', 'WrenchStamped'], {'queue_size': '(10)'}), "('/cartesian_compliance_controller/target_wrench',\n WrenchStamped, queue_size=10)\n", (1034, 1118), False, 'import rospy\n'), ((1153, 1247), 'rospy.Publisher', 'rospy.Publisher', (['"""cartesian_compliance_controller/target_frame"""', 'PoseStamped'], {'queue_size': '(2)'}), "('cartesian_compliance_controller/target_frame', PoseStamped,\n queue_size=2)\n", (1168, 1247), False, 'import rospy\n'), ((1283, 1352), 'rospy.Publisher', 'rospy.Publisher', (['"""adjusted_wrench_force"""', 'WrenchStamped'], {'queue_size': '(2)'}), "('adjusted_wrench_force', WrenchStamped, queue_size=2)\n", (1298, 1352), False, 'import rospy\n'), ((1419, 1486), 'rospy.Publisher', 'rospy.Publisher', (['"""/assembly_tools/avg_wrench"""', 'Wrench'], {'queue_size': '(5)'}), "('/assembly_tools/avg_wrench', Wrench, queue_size=5)\n", (1434, 1486), False, 'import rospy\n'), ((1525, 1590), 'rospy.Publisher', 'rospy.Publisher', (['"""/assembly_tools/avg_speed"""', 'Point'], {'queue_size': '(5)'}), "('/assembly_tools/avg_speed', Point, queue_size=5)\n", (1540, 1590), False, 'import rospy\n'), ((1629, 1697), 'rospy.Publisher', 'rospy.Publisher', (['"""/assembly_tools/rel_position"""', 'Point'], {'queue_size': '(5)'}), "('/assembly_tools/rel_position', Point, queue_size=5)\n", (1644, 1697), False, 'import rospy\n'), ((1737, 1800), 'rospy.Publisher', 'rospy.Publisher', (['"""/assembly_tools/status"""', 'String'], {'queue_size': '(5)'}), "('/assembly_tools/status', String, queue_size=5)\n", (1752, 1800), False, 'import rospy\n'), ((1840, 1972), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/cartesian_compliance_controller/ft_sensor_wrench/"""', 'WrenchStamped', 'self.callback_update_wrench'], {'queue_size': '(2)'}), "('/cartesian_compliance_controller/ft_sensor_wrench/',\n WrenchStamped, self.callback_update_wrench, queue_size=2)\n", (1856, 1972), False, 'import rospy\n'), ((2259, 2300), 'tf2_ros.TransformListener', 'tf2_ros.TransformListener', (['self.tf_buffer'], {}), '(self.tf_buffer)\n', (2284, 2300), False, 'import tf2_ros\n'), ((2339, 2375), 'tf2_ros.StaticTransformBroadcaster', 'tf2_ros.StaticTransformBroadcaster', ([], {}), '()\n', (2373, 2375), False, 'import tf2_ros\n'), ((2725, 2756), 'rospy.Rate', 'rospy.Rate', (['self._rate_selected'], {}), '(self._rate_selected)\n', (2735, 2756), False, 'import rospy\n'), ((4410, 4418), 'geometry_msgs.msg.Wrench', 'Wrench', ([], {}), '()\n', (4416, 4418), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((4588, 4613), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4596, 4613), True, 'import numpy as np\n'), ((5126, 5161), 'rospy.get_param', 'rospy.get_param', (['"""/task/target_peg"""'], {}), "('/task/target_peg')\n", (5141, 5161), False, 'import rospy\n'), ((5204, 5240), 'rospy.get_param', 'rospy.get_param', (['"""/task/target_hole"""'], {}), "('/task/target_hole')\n", (5219, 5240), False, 'import rospy\n'), ((5283, 5320), 'rospy.get_param', 'rospy.get_param', (['"""/task/starting_tcp"""'], {}), "('/task/starting_tcp')\n", (5298, 5320), False, 'import rospy\n'), ((5477, 5520), 'rospy.get_param', 'rospy.get_param', (['"""/algorithm/spiral_params"""'], {}), "('/algorithm/spiral_params')\n", (5492, 5520), False, 'import rospy\n'), ((5632, 5721), 'rospy.get_param', 'rospy.get_param', (["('/objects/' + self.target_peg + '/grasping/pinch_grasping/locations')"], {}), "('/objects/' + self.target_peg +\n '/grasping/pinch_grasping/locations')\n", (5647, 5721), False, 'import rospy\n'), ((5904, 5922), 'geometry_msgs.msg.TransformStamped', 'TransformStamped', ([], {}), '()\n', (5920, 5922), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((7509, 7557), 'rospy.get_param', 'rospy.get_param', (['"""/task/assumed_starting_height"""'], {}), "('/task/assumed_starting_height')\n", (7524, 7557), False, 'import rospy\n'), ((7616, 7655), 'rospy.get_param', 'rospy.get_param', (['"""/task/restart_height"""'], {}), "('/task/restart_height')\n", (7631, 7655), False, 'import rospy\n'), ((8090, 8150), 'rospy.get_param', 'rospy.get_param', (['"""/environment_state/task_frame/orientation"""'], {}), "('/environment_state/task_frame/orientation')\n", (8105, 8150), False, 'import rospy\n'), ((8324, 8394), 'rospy.get_param', 'rospy.get_param', (["('/objects/' + self.target_hole + '/local_orientation')"], {}), "('/objects/' + self.target_hole + '/local_orientation')\n", (8339, 8394), False, 'import rospy\n'), ((8667, 8755), 'tf2_geometry_msgs.do_transform_pose', 'tf2_geometry_msgs.do_transform_pose', (['pose_task_board_to_hole', 'tf_robot_to_task_board'], {}), '(pose_task_board_to_hole,\n tf_robot_to_task_board)\n', (8702, 8755), False, 'import tf2_geometry_msgs\n'), ((12016, 12034), 'geometry_msgs.msg.TransformStamped', 'TransformStamped', ([], {}), '()\n', (12032, 12034), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((13132, 13145), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (13143, 13145), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((13204, 13223), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (13221, 13223), False, 'import rospy\n'), ((17923, 17941), 'geometry_msgs.msg.TransformStamped', 'TransformStamped', ([], {}), '()\n', (17939, 17941), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((19674, 19925), 'geometry_msgs.msg.Point', 'Point', (["(-1 * self.tool_data[self.activeTCP]['transform'].transform.translation.x)", "(-1 * self.tool_data[self.activeTCP]['transform'].transform.translation.y)", "(-1 * (self.tool_data[self.activeTCP]['transform'].transform.translation.z -\n 0.05))"], {}), "(-1 * self.tool_data[self.activeTCP]['transform'].transform.\n translation.x, -1 * self.tool_data[self.activeTCP]['transform'].\n transform.translation.y, -1 * (self.tool_data[self.activeTCP][\n 'transform'].transform.translation.z - 0.05))\n", (19679, 19925), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((20850, 20863), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (20861, 20863), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((20924, 20931), 'geometry_msgs.msg.Point', 'Point', ([], {}), '()\n', (20929, 20931), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((20953, 20965), 'geometry_msgs.msg.Quaternion', 'Quaternion', ([], {}), '()\n', (20963, 20965), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((21313, 21332), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (21330, 21332), False, 'import rospy\n'), ((23024, 23037), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (23035, 23037), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((23068, 23087), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (23085, 23087), False, 'import rospy\n'), ((23148, 23182), 'tf.transformations.quaternion_from_matrix', 'trfm.quaternion_from_matrix', (['input'], {}), '(input)\n', (23175, 23182), True, 'import tf.transformations as trfm\n'), ((24427, 24455), 'modern_robotics.Adjoint', 'homogeneous_to_adjoint', (['T_ab'], {}), '(T_ab)\n', (24449, 24455), True, 'from modern_robotics import Adjoint as homogeneous_to_adjoint, RpToTrans\n'), ((24806, 24919), 'numpy.array', 'np.array', (['[wrench.torque.x, wrench.torque.y, wrench.torque.z, wrench.force.x, wrench.\n force.y, wrench.force.z]'], {}), '([wrench.torque.x, wrench.torque.y, wrench.torque.z, wrench.force.x,\n wrench.force.y, wrench.force.z])\n', (24814, 24919), True, 'import numpy as np\n'), ((27057, 27082), 'numpy.matmul', 'np.matmul', (['Ad_T.T', 'wrench'], {}), '(Ad_T.T, wrench)\n', (27066, 27082), True, 'import numpy as np\n'), ((28791, 28840), 'rospy.logerr', 'rospy.logerr', (['"""Invalid input to swap_pose_tf !!!"""'], {}), "('Invalid input to swap_pose_tf !!!')\n", (28803, 28840), False, 'import rospy\n'), ((29177, 29192), 'geometry_msgs.msg.WrenchStamped', 'WrenchStamped', ([], {}), '()\n', (29190, 29192), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((29210, 29218), 'geometry_msgs.msg.Wrench', 'Wrench', ([], {}), '()\n', (29216, 29218), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((29493, 29512), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (29510, 29512), False, 'import rospy\n'), ((30532, 30760), 'geometry_msgs.msg.Point', 'Point', (["self.tool_data[self.activeTCP]['transform'].transform.translation.x", "self.tool_data[self.activeTCP]['transform'].transform.translation.y", "(self.tool_data[self.activeTCP]['transform'].transform.translation.z - 0.05)"], {}), "(self.tool_data[self.activeTCP]['transform'].transform.translation.x,\n self.tool_data[self.activeTCP]['transform'].transform.translation.y, \n self.tool_data[self.activeTCP]['transform'].transform.translation.z - 0.05)\n", (30537, 30760), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((35097, 35128), 'numpy.array', 'np.array', (['[vec.x, vec.y, vec.z]'], {}), '([vec.x, vec.y, vec.z])\n', (35105, 35128), True, 'import numpy as np\n'), ((41720, 41766), 'numpy.array', 'np.array', (["[speed['x'], speed['y'], speed['z']]"], {}), "([speed['x'], speed['y'], speed['z']])\n", (41728, 41766), True, 'import numpy as np\n'), ((42408, 42449), 'geometry_msgs.msg.Point', 'Point', (["input['x']", "input['y']", "input['z']"], {}), "(input['x'], input['y'], input['z'])\n", (42413, 42449), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((2179, 2201), 'rospy.Duration', 'rospy.Duration', (['(1200.0)'], {}), '(1200.0)\n', (2193, 2201), False, 'import rospy\n'), ((2575, 2593), 'geometry_msgs.msg.TransformStamped', 'TransformStamped', ([], {}), '()\n', (2591, 2593), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((2619, 2637), 'geometry_msgs.msg.TransformStamped', 'TransformStamped', ([], {}), '()\n', (2635, 2637), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((2935, 2954), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (2952, 2954), False, 'import rospy\n'), ((9433, 9504), 'rospy.get_param', 'rospy.get_param', (["('/objects/' + self.target_peg + '/dimensions/diameter')"], {}), "('/objects/' + self.target_peg + '/dimensions/diameter')\n", (9448, 9504), False, 'import rospy\n'), ((9541, 9618), 'rospy.get_param', 'rospy.get_param', (["('/objects/' + self.target_peg + '/tolerance/upper_tolerance')"], {}), "('/objects/' + self.target_peg + '/tolerance/upper_tolerance')\n", (9556, 9618), False, 'import rospy\n'), ((9651, 9728), 'rospy.get_param', 'rospy.get_param', (["('/objects/' + self.target_peg + '/tolerance/lower_tolerance')"], {}), "('/objects/' + self.target_peg + '/tolerance/lower_tolerance')\n", (9666, 9728), False, 'import rospy\n'), ((9761, 9833), 'rospy.get_param', 'rospy.get_param', (["('/objects/' + self.target_hole + '/dimensions/diameter')"], {}), "('/objects/' + self.target_hole + '/dimensions/diameter')\n", (9776, 9833), False, 'import rospy\n'), ((9870, 9948), 'rospy.get_param', 'rospy.get_param', (["('/objects/' + self.target_hole + '/tolerance/upper_tolerance')"], {}), "('/objects/' + self.target_hole + '/tolerance/upper_tolerance')\n", (9885, 9948), False, 'import rospy\n'), ((9981, 10059), 'rospy.get_param', 'rospy.get_param', (["('/objects/' + self.target_hole + '/tolerance/lower_tolerance')"], {}), "('/objects/' + self.target_hole + '/tolerance/lower_tolerance')\n", (9996, 10059), False, 'import rospy\n'), ((10096, 10182), 'rospy.get_param', 'rospy.get_param', (["('/objects/' + self.target_peg + '/dimensions/min_insertion_depth')"], {}), "('/objects/' + self.target_peg +\n '/dimensions/min_insertion_depth')\n", (10111, 10182), False, 'import rospy\n'), ((11288, 11334), 'rospy.logerr', 'rospy.logerr', (['"""Trying to publish headless TF!"""'], {}), "('Trying to publish headless TF!')\n", (11300, 11334), False, 'import rospy\n'), ((13294, 13391), 'tf.transformations.quaternion_from_euler', 'trfm.quaternion_from_euler', (['(ori[0] * np.pi / 180)', '(ori[1] * np.pi / 180)', '(ori[2] * np.pi / 180)'], {}), '(ori[0] * np.pi / 180, ori[1] * np.pi / 180, ori[\n 2] * np.pi / 180)\n', (13320, 13391), True, 'import tf.transformations as trfm\n'), ((13408, 13458), 'geometry_msgs.msg.Point', 'Point', (['(pos[0] / 1000)', '(pos[1] / 1000)', '(pos[2] / 1000)'], {}), '(pos[0] / 1000, pos[1] / 1000, pos[2] / 1000)\n', (13413, 13458), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((13453, 13503), 'geometry_msgs.msg.Quaternion', 'Quaternion', (['tempQ[0]', 'tempQ[1]', 'tempQ[2]', 'tempQ[3]'], {}), '(tempQ[0], tempQ[1], tempQ[2], tempQ[3])\n', (13463, 13503), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((14226, 14330), 'rospy.logerr_throttle', 'rospy.logerr_throttle', (['(2)', '("Tool selection key error! No key \'" + tool_name + "\' in tool dictionary.")'], {}), '(2, "Tool selection key error! No key \'" + tool_name +\n "\' in tool dictionary.")\n', (14247, 14330), False, 'import rospy\n'), ((14740, 14759), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (14757, 14759), False, 'import rospy\n'), ((14982, 15031), 'numpy.cos', 'np.cos', (['(2.0 * np.pi * frequency * curr_time_numpy)'], {}), '(2.0 * np.pi * frequency * curr_time_numpy)\n', (14988, 15031), True, 'import numpy as np\n'), ((15060, 15109), 'numpy.sin', 'np.sin', (['(2.0 * np.pi * frequency * curr_time_numpy)'], {}), '(2.0 * np.pi * frequency * curr_time_numpy)\n', (15066, 15109), True, 'import numpy as np\n'), ((19620, 19633), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (19630, 19633), False, 'import rospy\n'), ((19635, 19655), 'rospy.Duration', 'rospy.Duration', (['(1.25)'], {}), '(1.25)\n', (19649, 19655), False, 'import rospy\n'), ((21742, 21803), 'tf.transformations.inverse_matrix', 'trfm.inverse_matrix', (["self.tool_data[self.activeTCP]['matrix']"], {}), "(self.tool_data[self.activeTCP]['matrix'])\n", (21761, 21803), True, 'import tf.transformations as trfm\n'), ((21863, 21894), 'numpy.dot', 'np.dot', (['goal_matrix', 'backing_mx'], {}), '(goal_matrix, backing_mx)\n', (21869, 21894), True, 'import numpy as np\n'), ((22532, 22574), 'numpy.array', 'np.array', (['[quat.x, quat.y, quat.z, quat.w]'], {}), '([quat.x, quat.y, quat.z, quat.w])\n', (22540, 22574), True, 'import numpy as np\n'), ((24389, 24410), 'modern_robotics.RpToTrans', 'RpToTrans', (['R_ab', 'P_ab'], {}), '(R_ab, P_ab)\n', (24398, 24410), False, 'from modern_robotics import Adjoint as homogeneous_to_adjoint, RpToTrans\n'), ((26165, 26192), 'tf.transformations.inverse_matrix', 'trfm.inverse_matrix', (['matrix'], {}), '(matrix)\n', (26184, 26192), True, 'import tf.transformations as trfm\n'), ((28126, 28144), 'geometry_msgs.msg.TransformStamped', 'TransformStamped', ([], {}), '()\n', (28142, 28144), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((30096, 30109), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (30106, 30109), False, 'import rospy\n'), ((30111, 30131), 'rospy.Duration', 'rospy.Duration', (['(1.25)'], {}), '(1.25)\n', (30125, 30131), False, 'import rospy\n'), ((32626, 32645), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (32643, 32645), False, 'import rospy\n'), ((34272, 34346), 'geometry_msgs.msg.Point', 'Point', (['self.average_speed[0]', 'self.average_speed[1]', 'self.average_speed[2]'], {}), '(self.average_speed[0], self.average_speed[1], self.average_speed[2])\n', (34277, 34346), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((39214, 39246), 'rospy.logerr', 'rospy.logerr', (['"""Killing program."""'], {}), "('Killing program.')\n", (39226, 39246), False, 'import rospy\n'), ((39858, 39913), 'rospy.logerr', 'rospy.logerr', (['"""Sleeping for 1s to damp oscillations..."""'], {}), "('Sleeping for 1s to damp oscillations...')\n", (39870, 39913), False, 'import rospy\n'), ((39967, 39981), 'rospy.sleep', 'rospy.sleep', (['(1)'], {}), '(1)\n', (39978, 39981), False, 'import rospy\n'), ((41658, 41693), 'geometry_msgs.msg.Point', 'Point', (['input[0]', 'input[1]', 'input[2]'], {}), '(input[0], input[1], input[2])\n', (41663, 41693), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((42610, 42622), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (42618, 42622), True, 'import numpy as np\n'), ((42691, 42753), 'numpy.floor', 'np.floor', (['(self._buffer_window[key] * self._rate_selected / 100)'], {}), '(self._buffer_window[key] * self._rate_selected / 100)\n', (42699, 42753), True, 'import numpy as np\n'), ((43001, 43050), 'numpy.append', 'np.append', (['self._data_buffer[key]', 'new_data_point'], {}), '(self._data_buffer[key], new_data_point)\n', (43010, 43050), True, 'import numpy as np\n'), ((43198, 43247), 'numpy.append', 'np.append', (['self._data_buffer[key]', 'new_data_point'], {}), '(self._data_buffer[key], new_data_point)\n', (43207, 43247), True, 'import numpy as np\n'), ((43318, 43354), 'numpy.delete', 'np.delete', (['self._data_buffer[key]', '(0)'], {}), '(self._data_buffer[key], 0)\n', (43327, 43354), True, 'import numpy as np\n'), ((6971, 6984), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (6981, 6984), False, 'import rospy\n'), ((6986, 7005), 'rospy.Duration', 'rospy.Duration', (['(1.0)'], {}), '(1.0)\n', (7000, 7005), False, 'import rospy\n'), ((7955, 8012), 'rospy.get_param', 'rospy.get_param', (['"""/environment_state/task_frame/position"""'], {}), "('/environment_state/task_frame/position')\n", (7970, 8012), False, 'import rospy\n'), ((8183, 8250), 'rospy.get_param', 'rospy.get_param', (["('/objects/' + self.target_hole + '/local_position')"], {}), "('/objects/' + self.target_hole + '/local_position')\n", (8198, 8250), False, 'import rospy\n'), ((14892, 14953), 'numpy.mod', 'np.mod', (['(2.0 * np.pi * frequency * curr_time_numpy)', 'max_cycles'], {}), '(2.0 * np.pi * frequency * curr_time_numpy, max_cycles)\n', (14898, 14953), True, 'import numpy as np\n'), ((18278, 18291), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (18288, 18291), False, 'import rospy\n'), ((18293, 18313), 'rospy.Duration', 'rospy.Duration', (['(10.0)'], {}), '(10.0)\n', (18307, 18313), False, 'import rospy\n'), ((18470, 18483), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (18480, 18483), False, 'import rospy\n'), ((18485, 18505), 'rospy.Duration', 'rospy.Duration', (['(10.0)'], {}), '(10.0)\n', (18499, 18505), False, 'import rospy\n'), ((28648, 28661), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (28659, 28661), False, 'from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform\n'), ((43602, 43612), 'numpy.ones', 'np.ones', (['w'], {}), '(w)\n', (43609, 43612), True, 'import numpy as np\n'), ((32697, 32716), 'rospy.Duration', 'rospy.Duration', (['(0.5)'], {}), '(0.5)\n', (32711, 32716), False, 'import rospy\n'), ((32943, 32962), 'rospy.Duration', 'rospy.Duration', (['(2.0)'], {}), '(2.0)\n', (32957, 32962), False, 'import rospy\n'), ((4691, 4708), 'controller_manager_msgs.srv.ListControllers', 'ListControllers', ([], {}), '()\n', (4706, 4708), False, 'from controller_manager_msgs.srv import SwitchController, LoadController, ListControllers\n'), ((32904, 32920), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (32918, 32920), False, 'import rospy\n'), ((32923, 32942), 'rospy.Duration', 'rospy.Duration', (['(0.1)'], {}), '(0.1)\n', (32937, 32942), False, 'import rospy\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
use this code to extract these four metrics:
1. ROUGE
2. METEOR
3. REPETITION WITHIN SUMMARY
4. OVERLAP WITH ARTICLE
5. AVG SENTS and LEN SUMMARIES
"""
import os
import glob
import json
import pyrouge
import hashlib
import logging
import subprocess
import numpy as np
from nltk import ngrams
import tensorflow as tf
from collections import Counter
from subprocess import CalledProcessError
from sklearn.feature_extraction.text import CountVectorizer
################ ARTICLE OVERLAP ################
def get_overlap_all(summary_path, article_path, num, gold=False):
if os.path.isdir(article_path):
art_path = os.path.join(article_path, "articles/*.txt")
art_files = sorted(glob.glob(art_path))
else:
art_reader = open(article_path, 'r')
art_files = art_reader.readlines() #assuming each line is 1 a multi-sentence summary
if gold:
sum_path = os.path.join(summary_path, "reference/*.txt") #should be decoded for all except gold labels
else:
sum_path = os.path.join(summary_path, "decoded/*.txt")
sum_files = sorted(glob.glob(sum_path))
#assert len(art_files) == len(sum_files), "num articles [%d] != num summaries [%d]"%(len(art_files), len(sum_files))
# art_files = art_files[:101]
# sum_files = sum_files[:101]
# ngram length : match_count
match_count = dict()
for idx, sum_file in enumerate(sum_files):
if os.path.isdir(article_path):
art_reader = open(art_files[idx], 'r')
art = art_reader.read()
else:
art = art_files[idx]
art = art.replace("\n", " ")
sum_reader = open(sum_file, 'r')
summ = sum_reader.read()
summ = summ.replace("\n", " ")
#print("\n[%d]"%idx)
get_overlap(art, summ, match_count, num)
print("")
ngram_wanted = [2, 3, 5, 8, 10, 12, 15, 18, 20, 25]
for key, value in match_count.iteritems():
if key in ngram_wanted:
#print("%d-gram avg match: %.3f"%(key, value/float(len(sum_files))))
print("%.3f"%(value/float(len(sum_files))))
#ngram_wanted = [2, 3, 5, 8, 10, 12, 15, 18, 20, 25]
AVG_OVERLAP = [94.112, 87.054, 75.152, 59.769, 50.363, 41.681, 30.951, 22.996, 18.916, 11.902]
# get_overlap('to ngramize it i', 'this is a foo bar sentences and i want to ngramize it', num=5)
def get_overlap(article, summary, match_count=None, num=5):
# get all n-grams from n to 1
if match_count is None:
match_count = dict()
for n in range(num): #goes from 0 to num-1
# create ngrams wih n
art_split = article.split()
ngart = list(ngrams(art_split[:400], n+1))
ngsum = list(ngrams(summary.split(), n+1))
# 1 if matches else 0
ngmatch = [1 if x in ngart else 0 for x in ngsum]
'''
if n+1 >= 30:
ngmatch_str = [x for x in ngsum if x in ngart]
print("ngmatch_str for %d-gram: %s"%(n+1, ngmatch_str))
'''
# dict steps
if n+1 not in match_count:
match_count[n+1] = 0 #initialize
#match_count[n+1] += sum(ngmatch) #update sum
tot_len = float(len(ngsum))
#print("# %d-grams in summary: %.3f"%(n+1, tot_len))
if tot_len == 0: #handle zero div error
tot_len = 1
match_count[n+1] += 100 * (sum(ngmatch)/tot_len) #update fraction
# if n+1 in ngram_wanted:
# print("# %d-grams: %.3f"%(n+1, 100 * (sum(ngmatch)/tot_len)) )
################ SUMMARY REPETITION ################
vectorizer = CountVectorizer()
def count_repeated_sentences(base_path, gold=False):
'''
read all files from base_path and finds repetition in a file: exact and approximate
'''
if gold:
hyp_path = os.path.join(base_path, "reference/*.txt") ##should be decoded for all except gold labels
else:
hyp_path = os.path.join(base_path, "decoded/*.txt")
hypsfilelist = sorted(glob.glob(hyp_path))
# hypsfilelist = hypsfilelist[:101]
corpus_repeat_fnames = []
corpus_bow_repeat_fnames = []
corpus_len_dist = dict()
corpus_repeat_len_dist = dict()
corpus_repeat_indices = []
corpus_bow_repeat_indices = []
for idx, fname in enumerate(hypsfilelist):
doc = open(fname, 'r')
sentences = doc.readlines()
sentences = [sentence.strip() for sentence in sentences]
count_exact_repeated_sentences(sentences, corpus_repeat_fnames, fname, corpus_repeat_len_dist, corpus_repeat_indices)
#count_bow_repeated_sentences(sentences, corpus_bow_repeat_fnames, fname, corpus_bow_repeat_indices)
for sentence in sentences:
corpus_len_dist[len(sentence)] = 0 if len(sentence) not in corpus_len_dist else corpus_len_dist[len(sentence)] + 1
#print('\navg num summaries with atleast 1 repetition: {%s}/{%s}' %(len(corpus_repeat_fnames),len(hypsfilelist)))
print('\navg repetition: %.3f' %( 100 * (len(corpus_repeat_fnames)/float(len(hypsfilelist)))) )
#print('number of summaries with .9 repeated senences: {%s}/{%s}' %(len(corpus_bow_repeat_fnames),len(hypsfilelist)))
files_with_exact_matches = corpus_repeat_fnames
files_with_approx_matches = sorted(set(corpus_bow_repeat_fnames) - set(corpus_repeat_fnames))
#print('repetition in files: %s'%corpus_repeat_fnames)
return corpus_repeat_indices, corpus_bow_repeat_indices, files_with_exact_matches, files_with_approx_matches
def count_exact_repeated_sentences(sentences, corpus_repeat_fnames, fname, corpus_repeat_len_dist, corpus_repeat_indices):
'''
finds exact repetition by comparing hash of strings
'''
hashes = [hashlib.md5(sentence).hexdigest() for sentence in sentences]
# implies duplicate elements in the list
if len(hashes) > len(set(hashes)):
corpus_repeat_fnames.append(fname.split('/')[-1])
# filtered hashes for repetition
repeated_hash = [k for k, v in Counter(hashes).items() if v > 1]
repeat_indices = []
for hsh in repeated_hash:
# indx corresponds to the sentence id
indices = [i for i, x in enumerate(hashes) if x == hsh]
indx = indices[0]
corpus_repeat_len_dist[len(sentences[indx])] = 0 if len(sentences[indx]) not in corpus_repeat_len_dist else corpus_repeat_len_dist[len(sentences[indx])] + 1
for r_idx in indices:
repeat_indx = len(" ".join(s for s in sentences[:r_idx]).split(" ")) #start from 0 so no need to add +1
repeat_indices.append(repeat_indx)
corpus_repeat_indices.append(repeat_indices)
# TODO: this need some fixing
def count_bow_repeated_sentences(sentences, corpus_bow_repeat_fnames, fname, corpus_bow_repeat_indices):
'''
finds words a matching indices encoded using BoW by using logical AND condition
'''
repeat = False
repeat_indices = []
indices = []
X = vectorizer.fit_transform(sentences)
X = X.toarray()
X = X == 1 # boolean to 1
for idx1, row1 in enumerate(X[:-1]):
for idx2, row2 in enumerate(X[idx1+1:]):
if np.sum(row1 & row2)/(np.sum(row1)+ 10^-7) >= 0.9:
repeat = True
repeat_indices.extend([idx1, idx1+1]) #use this list to only keep track of approx matches so subtract extact ones aa there be some idx issues
for r_idx in repeat_indices:
repeat_indx = len(" ".join(s for s in sentences[:r_idx]).split(" ")) #start from 0 so no need to add +1
indices.append(repeat_indx)
if repeat:
corpus_bow_repeat_fnames.append(fname.split('/')[-1])
corpus_bow_repeat_indices.append(indices)
################ AVG LEN + SENTS ################
def get_avg_stats(base_path, gold=False):
if gold:
hyp_path = os.path.join(base_path, "reference/*.txt") ##should be decoded for all except gold labels
else:
hyp_path = os.path.join(base_path, "decoded/*.txt")
hypsfilelist = sorted(glob.glob(hyp_path))
#hypsfilelist = hypsfilelist[:5]
total_nsentence = 0
total_words = 0
for f in hypsfilelist:
reader = open(f, 'r')
hyp = reader.read()
hyp = hyp.replace("\n", " ")
total_nsentence += len(hyp.strip().split(".")) #sentences are seperated by "."
total_words += len(hyp.strip().split()) # words are eperated by " "
#print("hyp: ", hyp.strip())
#print("nsentence: ", len(hyp.strip().split(".")))
#print("words: ", len(hyp.strip().split()))
avg_nsentence, avg_length = total_nsentence/float(len(hypsfilelist)), total_words/float(len(hypsfilelist))
print("\navg num sentences per summary: %.3f"%avg_nsentence)
print("avg length of a summary: %.3f"%avg_length)
################ METEOR ################
def evaluate_meteor(base_path, exact=False):
ref_path = os.path.join(base_path, "reference/*.txt")
hyp_path = os.path.join(base_path, "decoded/*.txt")
refsfilelist = sorted(glob.glob(ref_path))
hypsfilelist = sorted(glob.glob(hyp_path))
# refsfilelist = refsfilelist[:101]
# hypsfilelist = hypsfilelist[:101]
refs = []
hyps = []
for f in refsfilelist:
reader = open(f, 'r')
ref = reader.read()
ref = ref.replace("\n", " ")
refs.append(ref)
ref_filename = os.path.join(base_path, "_temp_refs") #temp file
with open(ref_filename, "w") as myfile:
for ref in refs:
myfile.write("%s\n"%ref)
for f in hypsfilelist:
reader = open(f, 'r')
hyp = reader.read()
hyp = hyp.replace("\n", " ")
hyps.append(hyp)
hyp_filename = os.path.join(base_path, "_temp_hyps") #temp file
with open(hyp_filename, "w") as myfile:
for hyp in hyps:
myfile.write("%s\n"%hyp)
assert len(refs) == len(hyps), "length of references and hypothesis are different."
# exact
if exact:
cmd = 'java -Xmx2G -jar meteor-1.5/meteor-1.5.jar "%s" "%s" -norm -m exact' % (hyp_filename, ref_filename)
# exact + stem + syn + para
else:
cmd = 'java -Xmx2G -jar meteor-1.5/meteor-1.5.jar "%s" "%s" -norm' % (hyp_filename, ref_filename)
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except CalledProcessError as e:
output = e.output
# remove the temp files
if os.path.exists(ref_filename):
os.remove(ref_filename)
if os.path.exists(hyp_filename):
os.remove(hyp_filename)
# process the output str
final_score = None
result_str = ""
wanted = ["Modules"]#, "Precision", "Recall", "f1", "fMean", "Fragmentation penalty", "Test words", "Reference words", "Chunks"]
for _l in output.split('\n'):
l = _l.strip()
if len(l) == 0:
continue
tokens = l.split(":")
if len(tokens) != 2:
continue
if tokens[0] == "Final score":
final_score = float(tokens[1].strip())*100.0
result_str += "%s"%l
break
elif tokens[0] in wanted and not tokens[0].startswith("Segment"):
result_str += "%s\n"%l
print("\n METEOR SCORES: \n%s"%result_str)
################ ROUGE ################
def rouge_eval(base_path):
"""Evaluate the files in ref_dir and dec_dir with pyrouge, returning results_dict"""
ref_dir = os.path.join(base_path, "reference")
dec_dir = os.path.join(base_path, "decoded")
r = pyrouge.Rouge155()
r.model_filename_pattern = '#ID#_reference.txt'
r.system_filename_pattern = '(\d+)_decoded.txt'
r.model_dir = ref_dir
r.system_dir = dec_dir
logging.getLogger('global').setLevel(logging.WARNING) # silence pyrouge logging
rouge_results = r.convert_and_evaluate()
results_dict = r.output_to_dict(rouge_results)
rouge_log(results_dict, base_path)
def rouge_log(results_dict, dir_to_write):
"""Log ROUGE results to screen and write to file.
Args:
results_dict: the dictionary returned by pyrouge
dir_to_write: the directory where we will write the results to"""
log_str = ""
for x in ["1","2","l"]:
log_str += "\nROUGE-%s:\n" % x
for y in ["f_score", "recall", "precision"]:
key = "rouge_%s_%s" % (x,y)
key_cb = key + "_cb"
key_ce = key + "_ce"
val = results_dict[key]
val_cb = results_dict[key_cb]
val_ce = results_dict[key_ce]
log_str += "%s: %.4f with confidence interval (%.4f, %.4f)\n" % (key, val, val_cb, val_ce)
print(log_str) # log to screen
results_file = os.path.join(dir_to_write, "ROUGE_results.txt")
tf.logging.info("Writing final ROUGE results to %s...", results_file)
with open(results_file, "w") as f:
f.write(log_str)
def extract_from_json(base_path):
attn_vis_path = os.path.join(base_path, "attn_vis")
num = len(glob.glob(attn_vis_path+"/*.json"))
idx = 0
total_log_prob = 0.0
total_sum_log_prob = 0.0
total_pgen = 0.0
for idx in range(num): #goes till num-1
json_data = json.load(open(attn_vis_path+"/%06d_attn_vis_data.json"%idx))
try:
total_log_prob += json_data['log_prob'] #one value
except:
total_log_prob += sum(json_data['log_probs']) #list
total_sum_log_prob += json_data["avg_log_prob"] #one value
total_pgen += json_data["avg_pgen"] #one value
print("avg_log_prob: %.3f"%(total_log_prob/num))
print("avg_sum_log_prob: %.3f"%(total_sum_log_prob/num))
print("avg_pgen: %.3f"%(total_pgen/num))
def get_all_stats(base_path, article_path, gold=False, scores=False, exact=False, baseline=False):
if not gold and scores:
rouge_eval(base_path)
evaluate_meteor(base_path)
evaluate_meteor(base_path, exact=True)
#extract_from_json(base_path)
if baseline:
rouge_eval(base_path)
evaluate_meteor(base_path)
evaluate_meteor(base_path, exact=True)
#ordering of samples does not matter in the following stats
get_avg_stats(base_path, gold=gold)
count_repeated_sentences(base_path, gold=gold)
get_overlap_all(base_path, article_path, num=30, gold=gold)
# this points to all the merged test articles
article_path = "/home/leena/Documents/thesis/pointer-gen/test_output/all_test_article_truncated.txt"
# this points to all the merged validation articles
val_article_path = "/home/leena/Documents/thesis/pointer-gen/test_output/all_val_article_truncated.txt"
###############################################
# Below Experiments were run
###############################################
#### these would be reported on the test dataset ####
#print("\n***Baseline***\n")
#get_all_stats("/home/leena/Documents/thesis/pointer-gen/test_output/temp/", "/home/leena/Documents/thesis/pointer-gen/test_output/temp/", baseline=True)
#print("\n***Reference***\n")
#get_all_stats("/home/leena/Documents/thesis/pointer-gen/test_output/", article_path, gold=True, scores=True)
#print("\n***Pre Coverage***\n")
#get_all_stats("/home/leena/Documents/thesis/pointer-gen/log/pre-coverage/dont_delete_8000_decode_test_400maxenc_4beam_35mindec_120maxdec_ckpt-237470/all_decode_test_400maxenc_4beam_35mindec_120maxdec_ckpt-237470/", article_path, scores=True)
#
#print("\n***Coverage***\n")
#get_all_stats("/home/leena/Documents/thesis/pointer-gen/log/cov/cov-org-dont-delete/all_decode_test_400maxenc_4beam_35mindec_120maxdec_ckpt-238410/", article_path, scores=True)
# OUR PROPOSED METHOD OUTPUTS [Below 2 didn't work as well so we moved to the increasing penalty one]
#print("\n***Penalty without coverage***\n")
#get_all_stats("/home/leena/Downloads/decode_hinge_4_04_precov_test_full/decode_test_400maxenc_4beam_35mindec_120maxdec_final_test_0_6000penalty-ckpt-237470/", article_path, scores=True)
#
#print("\n***Penalty with coverage***\n")
#get_all_stats("/home/leena/Downloads/decode_hinge_5_04_cov_test_full/decode_test_400maxenc_4beam_35mindec_120maxdec_final_test_cov_0_6000penalty-ckpt-238410/", article_path, scores=True)
##### increasing penalty [Final proposed method]
#print("\n*** Inc Penalty without coverage***\n")
#get_all_stats("/home/leena/Downloads/DECODE_NONCOV_FINAL_1/decode_test_400maxenc_4beam_35mindec_120maxdec_NONCOV_NONLOG_FINAL_0_1400penalty-ckpt-237470/", article_path, scores=True)
#print("\n***Inc Penalty with coverage***\n")
#get_all_stats("/home/leena/Downloads/DECODED_COV_FINAL_NONLOG/decode_test_400maxenc_4beam_35mindec_120maxdec_COV_NONLOG_FINAL_0_3000penalty-ckpt-238410/", article_path, scores=True)
###################################################
#### these would be reported on the val dataset ####
#### pre cov param search experments ####
#print("\n***pre cov***\n")
#get_all_stats("/home/leena/Documents/thesis/pointer-gen/log/pre-coverage/decode_val_400maxenc_4beam_35mindec_120maxdec_pre_cov_val_ckpt-237470/", val_article_path, scores=True)
#print("\n***100-44***\n")
#print("\n***CE 1 40***\n")
#get_all_stats("/home/leena/Documents/thesis/pointer-gen/log/pre-coverage/decode_val_400maxenc_4beam_35mindec_120maxdec_pre_cov_val_ce_ckpt-237470/", val_article_path, scores=True)
#print("\n***CE 3 45***\n")
#get_all_stats("/home/leena/Downloads/decode_val_400maxenc_4beam_35mindec_120maxdec_kenlm-penalty-ckpt-237470/", val_article_path, scores=True)
#print("\n***CE 2 45***\n")
#get_all_stats("/home/leena/Documents/thesis/pointer-gen/log/pre-coverage/decode_val_400maxenc_4beam_35mindec_120maxdec_pre_cov_pen_2_target_45_ckpt-237470/", val_article_path, scores=True)
#print("\n***CE 2.5 53***\n")
#get_all_stats("/home/leena/Downloads/decode_val_400maxenc_4beam_35mindec_120maxdec_kenlm-penalty-ckpt-237470/", val_article_path, scores=True)
#print("\n***40 40***\n")
#get_all_stats("/home/leena/Downloads/decode_val_400maxenc_4beam_35mindec_120maxdec_40_04penalty-ckpt-237470/", val_article_path, scores=True)
#print("\n***60 40***\n")
#get_all_stats("/home/leena/Downloads/decode_val_400maxenc_4beam_35mindec_120maxdec_60_04penalty-ckpt-237470/", val_article_path, scores=True)
#print("\n***Hinge margin 5 40 ***\n")
#get_all_stats("/home/leena/Documents/thesis/pointer-gen/log/pre-coverage/decode_val_400maxenc_4beam_35mindec_120maxdec_pre_cov_margin_5_target_40ckpt-237470/", val_article_path, scores=True)
#print("\n***Hinge margin 7 40 ***\n")
#get_all_stats("/home/leena/Documents/thesis/pointer-gen/log/pre-coverage/decode_val_400maxenc_4beam_35mindec_120maxdec_pre_cov_margin_7_target_40ckpt-237470/", val_article_path, scores=True)
#print("\n***Hinge margin 6 40 ***\n")
#get_all_stats("/home/leena/Documents/thesis/pointer-gen/log/pre-coverage/decode_val_400maxenc_4beam_35mindec_120maxdec_pre_cov_margin_6_target_40ckpt-237470/", val_article_path, scores=True)
#print("\n***Hinge margin 4 40 ***\n")
#get_all_stats("/home/leena/Downloads/decode_val_400maxenc_4beam_35mindec_120maxdec_hinge_4_04penalty-ckpt-237470/", val_article_path, scores=True)
#print("\n***Non Log Inc***\n")
#get_all_stats("/home/leena/Downloads/decode_val_400maxenc_4beam_35mindec_120maxdec_non_cov_increasing_nonlog_45_04_01penalty-ckpt-237470/", val_article_path, scores=True)
#### cov param search experiments ####
#print("\n***cov***\n")
#get_all_stats("/home/leena/Downloads/decode_val_400maxenc_4beam_35mindec_120maxdec_ckpt-238410/", val_article_path, scores=True)
#print("\n***100-44***\n")
#get_all_stats("/home/leena/Documents/thesis/pointer-gen/log/cov/decode_val_400maxenc_4beam_35mindec_120maxdec_trial-ckpt-238410/", val_article_path, scores=True)
#print("\n***80-44***\n")
#get_all_stats("/home/leena/Documents/thesis/pointer-gen/log/cov/decode_val_400maxenc_4beam_35mindec_120maxdec_trial-80-44-ckpt-238410/", val_article_path, scores=True)
#print("\n***80-53***\n")
#get_all_stats("/home/leena/Documents/thesis/pointer-gen/log/cov/decode_val_400maxenc_4beam_35mindec_120maxdec_trial-80-53-ckpt-238410/", val_article_path, scores=True)
#print("\n***70-50***\n")
#get_all_stats("/home/leena/Documents/thesis/pointer-gen/log/cov/decode_val_400maxenc_4beam_35mindec_120maxdec_trial-70-50-ckpt-238410/", val_article_path, scores=True)
#print("\n***80-40***\n")
#get_all_stats("/home/leena/Documents/thesis/pointer-gen/log/cov/decode_val_400maxenc_4beam_35mindec_120maxdec_trial-80-40-ckpt-238410/", val_article_path, scores=True)
#print("\n***70 40***\n")
#get_all_stats("/home/leena/Documents/thesis/pointer-gen/log/cov/decode_val_400maxenc_4beam_35mindec_120maxdec_cov_penalty_70_target_40_ckpt-238410/", val_article_path, scores=True)
#print("\n***Hinge 5 .4***\n")
#get_all_stats("/home/leena/Downloads/decode_val_400maxenc_4beam_35mindec_120maxdec_hinge_5_04penalty-ckpt-238410/", val_article_path, scores=True)
#print("\n***Hinge 7 .4***\n")
#get_all_stats("/home/leena/Downloads/decode_val_400maxenc_4beam_35mindec_120maxdec_hinge_7_04penalty-ckpt-238410/", val_article_path, scores=True)
#print("\n***Hinge 4 .4***\n")
#get_all_stats("/home/leena/Downloads/decode_val_400maxenc_4beam_35mindec_120maxdec_hinge_4_04penalty-ckpt-238410/", val_article_path, scores=True)
#print("\n***Log Inc***\n")
#get_all_stats("/home/leena/Downloads/decode_val_400maxenc_4beam_35mindec_120maxdec_increasing5_04penalty-ckpt-238410/", val_article_path, scores=True)
#print("\n***Non Log Inc***\n")
#get_all_stats("/home/leena/Downloads/decode_val_400maxenc_4beam_35mindec_120maxdec_increasing_nonlog_5_04penalty-ckpt-238410/", val_article_path, scores=True)
#print("\n***Non Log Inc 5 04 1***\n")
#get_all_stats("/home/leena/Downloads/decode_val_400maxenc_4beam_35mindec_120maxdec_increasing_nonlog_5_04_01penalty-ckpt-238410/", val_article_path, scores=True)
#print("\n***Log Inc 5 04 5***\n")
#get_all_stats("/home/leena/Downloads/decode_val_400maxenc_4beam_35mindec_120maxdec_increasing_log_5_04_05penalty-ckpt-238410/", val_article_path, scores=True)
| [
"subprocess.check_output",
"os.path.exists",
"logging.getLogger",
"hashlib.md5",
"sklearn.feature_extraction.text.CountVectorizer",
"tensorflow.logging.info",
"os.path.join",
"collections.Counter",
"pyrouge.Rouge155",
"numpy.sum",
"os.path.isdir",
"nltk.ngrams",
"glob.glob",
"os.remove"
] | [((3759, 3776), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (3774, 3776), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((653, 680), 'os.path.isdir', 'os.path.isdir', (['article_path'], {}), '(article_path)\n', (666, 680), False, 'import os\n'), ((9147, 9189), 'os.path.join', 'os.path.join', (['base_path', '"""reference/*.txt"""'], {}), "(base_path, 'reference/*.txt')\n", (9159, 9189), False, 'import os\n'), ((9203, 9243), 'os.path.join', 'os.path.join', (['base_path', '"""decoded/*.txt"""'], {}), "(base_path, 'decoded/*.txt')\n", (9215, 9243), False, 'import os\n'), ((9599, 9636), 'os.path.join', 'os.path.join', (['base_path', '"""_temp_refs"""'], {}), "(base_path, '_temp_refs')\n", (9611, 9636), False, 'import os\n'), ((9904, 9941), 'os.path.join', 'os.path.join', (['base_path', '"""_temp_hyps"""'], {}), "(base_path, '_temp_hyps')\n", (9916, 9941), False, 'import os\n'), ((10620, 10648), 'os.path.exists', 'os.path.exists', (['ref_filename'], {}), '(ref_filename)\n', (10634, 10648), False, 'import os\n'), ((10686, 10714), 'os.path.exists', 'os.path.exists', (['hyp_filename'], {}), '(hyp_filename)\n', (10700, 10714), False, 'import os\n'), ((11566, 11602), 'os.path.join', 'os.path.join', (['base_path', '"""reference"""'], {}), "(base_path, 'reference')\n", (11578, 11602), False, 'import os\n'), ((11615, 11649), 'os.path.join', 'os.path.join', (['base_path', '"""decoded"""'], {}), "(base_path, 'decoded')\n", (11627, 11649), False, 'import os\n'), ((11659, 11677), 'pyrouge.Rouge155', 'pyrouge.Rouge155', ([], {}), '()\n', (11675, 11677), False, 'import pyrouge\n'), ((12733, 12780), 'os.path.join', 'os.path.join', (['dir_to_write', '"""ROUGE_results.txt"""'], {}), "(dir_to_write, 'ROUGE_results.txt')\n", (12745, 12780), False, 'import os\n'), ((12784, 12853), 'tensorflow.logging.info', 'tf.logging.info', (['"""Writing final ROUGE results to %s..."""', 'results_file'], {}), "('Writing final ROUGE results to %s...', results_file)\n", (12799, 12853), True, 'import tensorflow as tf\n'), ((12970, 13005), 'os.path.join', 'os.path.join', (['base_path', '"""attn_vis"""'], {}), "(base_path, 'attn_vis')\n", (12982, 13005), False, 'import os\n'), ((701, 745), 'os.path.join', 'os.path.join', (['article_path', '"""articles/*.txt"""'], {}), "(article_path, 'articles/*.txt')\n", (713, 745), False, 'import os\n'), ((983, 1028), 'os.path.join', 'os.path.join', (['summary_path', '"""reference/*.txt"""'], {}), "(summary_path, 'reference/*.txt')\n", (995, 1028), False, 'import os\n'), ((1104, 1147), 'os.path.join', 'os.path.join', (['summary_path', '"""decoded/*.txt"""'], {}), "(summary_path, 'decoded/*.txt')\n", (1116, 1147), False, 'import os\n'), ((1181, 1200), 'glob.glob', 'glob.glob', (['sum_path'], {}), '(sum_path)\n', (1190, 1200), False, 'import glob\n'), ((1527, 1554), 'os.path.isdir', 'os.path.isdir', (['article_path'], {}), '(article_path)\n', (1540, 1554), False, 'import os\n'), ((3959, 4001), 'os.path.join', 'os.path.join', (['base_path', '"""reference/*.txt"""'], {}), "(base_path, 'reference/*.txt')\n", (3971, 4001), False, 'import os\n'), ((4074, 4114), 'os.path.join', 'os.path.join', (['base_path', '"""decoded/*.txt"""'], {}), "(base_path, 'decoded/*.txt')\n", (4086, 4114), False, 'import os\n'), ((4147, 4166), 'glob.glob', 'glob.glob', (['hyp_path'], {}), '(hyp_path)\n', (4156, 4166), False, 'import glob\n'), ((8015, 8057), 'os.path.join', 'os.path.join', (['base_path', '"""reference/*.txt"""'], {}), "(base_path, 'reference/*.txt')\n", (8027, 8057), False, 'import os\n'), ((8134, 8174), 'os.path.join', 'os.path.join', (['base_path', '"""decoded/*.txt"""'], {}), "(base_path, 'decoded/*.txt')\n", (8146, 8174), False, 'import os\n'), ((8215, 8234), 'glob.glob', 'glob.glob', (['hyp_path'], {}), '(hyp_path)\n', (8224, 8234), False, 'import glob\n'), ((9271, 9290), 'glob.glob', 'glob.glob', (['ref_path'], {}), '(ref_path)\n', (9280, 9290), False, 'import glob\n'), ((9316, 9335), 'glob.glob', 'glob.glob', (['hyp_path'], {}), '(hyp_path)\n', (9325, 9335), False, 'import glob\n'), ((10465, 10531), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'stderr': 'subprocess.STDOUT', 'shell': '(True)'}), '(cmd, stderr=subprocess.STDOUT, shell=True)\n', (10488, 10531), False, 'import subprocess\n'), ((10656, 10679), 'os.remove', 'os.remove', (['ref_filename'], {}), '(ref_filename)\n', (10665, 10679), False, 'import os\n'), ((10722, 10745), 'os.remove', 'os.remove', (['hyp_filename'], {}), '(hyp_filename)\n', (10731, 10745), False, 'import os\n'), ((13021, 13057), 'glob.glob', 'glob.glob', (["(attn_vis_path + '/*.json')"], {}), "(attn_vis_path + '/*.json')\n", (13030, 13057), False, 'import glob\n'), ((773, 792), 'glob.glob', 'glob.glob', (['art_path'], {}), '(art_path)\n', (782, 792), False, 'import glob\n'), ((2790, 2820), 'nltk.ngrams', 'ngrams', (['art_split[:400]', '(n + 1)'], {}), '(art_split[:400], n + 1)\n', (2796, 2820), False, 'from nltk import ngrams\n'), ((11829, 11856), 'logging.getLogger', 'logging.getLogger', (['"""global"""'], {}), "('global')\n", (11846, 11856), False, 'import logging\n'), ((5821, 5842), 'hashlib.md5', 'hashlib.md5', (['sentence'], {}), '(sentence)\n', (5832, 5842), False, 'import hashlib\n'), ((6091, 6106), 'collections.Counter', 'Counter', (['hashes'], {}), '(hashes)\n', (6098, 6106), False, 'from collections import Counter\n'), ((7268, 7287), 'numpy.sum', 'np.sum', (['(row1 & row2)'], {}), '(row1 & row2)\n', (7274, 7287), True, 'import numpy as np\n'), ((7289, 7301), 'numpy.sum', 'np.sum', (['row1'], {}), '(row1)\n', (7295, 7301), True, 'import numpy as np\n')] |
# Start by loading packages
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib
import time
import ot
import scipy
from scipy import linalg
from scipy import sparse
import gromovWassersteinAveraging as gwa
import spectralGW as sgw
from geodesicVisualization import *
import seaborn as sns
import pandas as pd
import json
# Load the S-GWL code
import DataIO as DataIO
import EvaluationMeasure as Eval
import GromovWassersteinGraphToolkit as GwGt
import pickle
import warnings
warnings.filterwarnings("ignore")
print('---All modules loaded')
# Load data
num_nodes = 1991
num_partitions = 12
# load data
with open('data/India_database.p', 'rb') as f:
database = pickle.load(f)
G = nx.Graph()
nG = nx.Graph()
for i in range(num_nodes):
G.add_node(i)
nG.add_node(i)
for edge in database['edges']:
G.add_edge(edge[0], edge[1])
nG.add_edge(edge[0], edge[1])
start_edges = nx.number_of_edges(G)
# # add noise
# for j in range(int( 1*G.number_of_edges() )):
# x1 = int(num_nodes * np.random.rand())
# x2 = int(num_nodes * np.random.rand())
# if database['label'][x1] != database['label'][x2]:
# nG.add_edge(x1, x2)
print('---Data loaded. {:3d} edges in raw version \n'.format(G.number_of_edges()))
# print('---Added {:d} edges to create noisy version \n'.format(nx.number_of_edges(nG)-start_edges))
# Bootstrap based on betweenness centrality
print('---Computing betweenness centrality')
bc = nx.betweenness_centrality(G)
bcsorted = sorted(bc.items(), key=lambda x: x[1], reverse=True)
# bootstrap
num_bootstrap = 10
size_bootstrap= 30
nodes = []
for n in database['idx2node'].values():
if n not in nodes:
nodes.append(n)
print('---Performing bootstrap. Selecting {:d} samples with {:d} nodes each'.format(num_bootstrap,size_bootstrap))
top = []
for n in bcsorted:
if n[1] > 5e-4:
top.append(n[0])
samples = []
AList = []
HKList3 = []
HKList7 = []
HKList11 = []
pList = []
lambdas=[]
for i in range(0,num_bootstrap):
select = np.random.choice(top,size_bootstrap,replace=False)
sG = nx.Graph()
for n in select:
sG.add_node(n)
for e in G.edges():
if e[0] in select and e[1] in select:
sG.add_edge(e[0],e[1])
samples.append(sG)
pList.append(ot.unif(nx.number_of_nodes(sG)))
AList.append(nx.adjacency_matrix(sG).toarray())
HKList3.append(sgw.undirected_normalized_heat_kernel(sG,3))
HKList7.append(sgw.undirected_normalized_heat_kernel(sG,7))
HKList11.append(sgw.undirected_normalized_heat_kernel(sG,11))
lambdas.append(1/num_bootstrap)
print('---Bootstrap completed. Computing GW averages')
# GW barycenter computation
N = size_bootstrap # size of targeted barycenter
p = ot.unif(N) #weights of targeted barycenter
num_runs = 10 # each call to gromov_barycenters gives random initialization,
# we will iterate this several times
def run_frechet(CList):
runtimes = []
frechet_loss = []
for i in range(num_runs):
start = time.time()
gwa_adj = ot.gromov.gromov_barycenters(N,CList,pList,p,lambdas,'square_loss',max_iter=100,tol=1e-3)
end = time.time()
runtimes.append(end-start)
# Frechet loss computation
gwds = []
for s in range(num_bootstrap):
T, log = ot.gromov.gromov_wasserstein(gwa_adj,CList[s],p,pList[s],'square_loss',log=True)
gwds.append(log['gw_dist'])
frechet_loss.append(1.0/num_bootstrap*sum([d**2 for d in gwds]))
#print('---Finished run {:d} in {:3.3f} seconds'.format(i,end-start))
return frechet_loss, runtimes
res_times = []
res_loss = []
res_loss_centered = []
representation = []
# Adjacency matrix
CList = AList
frechet_loss, runtimes = run_frechet(CList)
ave_loss = np.mean(frechet_loss)
for s in range(num_runs):
res_times.append(runtimes[s])
res_loss.append(frechet_loss[s])
res_loss_centered.append(frechet_loss[s]-ave_loss)
representation.append('adj')
print('---Finished run with adj')
# HK3
CList = HKList3
frechet_loss, runtimes = run_frechet(CList)
ave_loss = np.mean(frechet_loss)
for s in range(num_runs):
res_times.append(runtimes[s])
res_loss.append(frechet_loss[s])
res_loss_centered.append(frechet_loss[s]-ave_loss)
representation.append('HK3')
print('---Finished run with HK3')
# HK7
CList = HKList7
frechet_loss, runtimes = run_frechet(CList)
ave_loss = np.mean(frechet_loss)
for s in range(num_runs):
res_times.append(runtimes[s])
res_loss.append(frechet_loss[s])
res_loss_centered.append(frechet_loss[s]-ave_loss)
representation.append('HK7')
print('---Finished run with HK7')
# HK11
CList = HKList11
frechet_loss, runtimes = run_frechet(CList)
ave_loss = np.mean(frechet_loss)
for s in range(num_runs):
res_times.append(runtimes[s])
res_loss.append(frechet_loss[s])
res_loss_centered.append(frechet_loss[s]-ave_loss)
representation.append('HK11')
print('---Finished run with HK11')
res = {'representation':representation, 'loss':res_loss,
'log-loss':np.log(res_loss), 'centered-loss':res_loss_centered,
'runtime':res_times}
df = pd.DataFrame(res)
# Perform Bartlett tests
a = df[df['representation']=='adj']['centered-loss']
b = df[df['representation']=='HK3']['centered-loss']
c = df[df['representation']=='HK7']['centered-loss']
d = df[df['representation']=='HK11']['centered-loss']
_,pab = scipy.stats.bartlett(a,b)
_,pac = scipy.stats.bartlett(a,c)
_,pad = scipy.stats.bartlett(a,d)
print('---p={:3.3e} for Bartlett test of adj-HK3. Significance level needed after Bonferroni correction = 0.017'.format(pab))
print('---p={:3.3e} for Bartlett test of adj-HK7. Significance level needed after Bonferroni correction = 0.017'.format(pac))
print('---p={:3.3e} for Bartlett test of adj-HK11. Significance level needed after Bonferroni correction = 0.017'.format(pad))
p_vals = ['---p={:3.3e} for Bartlett test of adj-HK3. Significance level needed after Bonferroni correction = 0.017'.format(pab),
'---p={:3.3e} for Bartlett test of adj-HK7. Significance level needed after Bonferroni correction = 0.017'.format(pac),
'---p={:3.3e} for Bartlett test of adj-HK11. Significance level needed after Bonferroni correction = 0.017'.format(pad)]
matplotlib.style.use('ggplot')
# plt.figure()
sns.catplot(x='representation',y='loss',data=df,kind='boxen')
plt.title('Village data: loss-representation')
plt.savefig('res_gwa-village-loss.pdf',dpi=300,format='pdf',bbox_inches='tight')
# plt.figure()
sns.catplot(x='representation',y='centered-loss',data=df,kind='boxen')
plt.title('Village data: centered loss-representation')
plt.savefig('res_gwa-village-closs.pdf',dpi=300,format='pdf',bbox_inches='tight')
# plt.figure()
sns.catplot(x='representation',y='runtime',data=df,kind='boxen')
plt.title('Village data: runtime-representation')
plt.savefig('res_gwa-village-runtime.pdf',dpi=300,format='pdf',bbox_inches='tight')
plt.show()
# Save results
tab_cols = ['Representation','Frechet loss average','Frechet loss variance','Runtime']
tab_rows = []
tab_rows.append(['Adj',
np.mean(df[df['representation']=='adj']['loss']),
np.var(df[df['representation']=='adj']['loss']),
np.mean(df[df['representation']=='adj']['runtime'])])
tab_rows.append(['HK3',
np.mean(df[df['representation']=='HK3']['loss']),
np.var(df[df['representation']=='HK3']['loss']),
np.mean(df[df['representation']=='HK3']['runtime'])])
tab_rows.append(['HK7',
np.mean(df[df['representation']=='HK7']['loss']),
np.var(df[df['representation']=='HK7']['loss']),
np.mean(df[df['representation']=='HK7']['runtime'])])
tab_rows.append(['HK11',
np.mean(df[df['representation']=='HK11']['loss']),
np.var(df[df['representation']=='HK11']['loss']),
np.mean(df[df['representation']=='HK11']['runtime'])])
res_tab = pd.DataFrame(tab_rows,columns=tab_cols)
res_tab.to_csv('res_gwa_village.txt',header=True, index=False, sep='\t')
with open('res_gwa_village.txt', 'a') as outfile:
json.dump(p_vals,outfile,indent=1) | [
"scipy.stats.bartlett",
"numpy.log",
"seaborn.catplot",
"numpy.var",
"matplotlib.style.use",
"networkx.betweenness_centrality",
"ot.gromov.gromov_barycenters",
"numpy.mean",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"numpy.random.choice",
"networkx.adjacency_matrix",
"pickle.load",
... | [((518, 551), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (541, 551), False, 'import warnings\n'), ((729, 739), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (737, 739), True, 'import networkx as nx\n'), ((745, 755), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (753, 755), True, 'import networkx as nx\n'), ((937, 958), 'networkx.number_of_edges', 'nx.number_of_edges', (['G'], {}), '(G)\n', (955, 958), True, 'import networkx as nx\n'), ((1493, 1521), 'networkx.betweenness_centrality', 'nx.betweenness_centrality', (['G'], {}), '(G)\n', (1518, 1521), True, 'import networkx as nx\n'), ((2791, 2801), 'ot.unif', 'ot.unif', (['N'], {}), '(N)\n', (2798, 2801), False, 'import ot\n'), ((3852, 3873), 'numpy.mean', 'np.mean', (['frechet_loss'], {}), '(frechet_loss)\n', (3859, 3873), True, 'import numpy as np\n'), ((4172, 4193), 'numpy.mean', 'np.mean', (['frechet_loss'], {}), '(frechet_loss)\n', (4179, 4193), True, 'import numpy as np\n'), ((4492, 4513), 'numpy.mean', 'np.mean', (['frechet_loss'], {}), '(frechet_loss)\n', (4499, 4513), True, 'import numpy as np\n'), ((4818, 4839), 'numpy.mean', 'np.mean', (['frechet_loss'], {}), '(frechet_loss)\n', (4825, 4839), True, 'import numpy as np\n'), ((5231, 5248), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {}), '(res)\n', (5243, 5248), True, 'import pandas as pd\n'), ((5498, 5524), 'scipy.stats.bartlett', 'scipy.stats.bartlett', (['a', 'b'], {}), '(a, b)\n', (5518, 5524), False, 'import scipy\n'), ((5532, 5558), 'scipy.stats.bartlett', 'scipy.stats.bartlett', (['a', 'c'], {}), '(a, c)\n', (5552, 5558), False, 'import scipy\n'), ((5566, 5592), 'scipy.stats.bartlett', 'scipy.stats.bartlett', (['a', 'd'], {}), '(a, d)\n', (5586, 5592), False, 'import scipy\n'), ((6369, 6399), 'matplotlib.style.use', 'matplotlib.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (6389, 6399), False, 'import matplotlib\n'), ((6415, 6479), 'seaborn.catplot', 'sns.catplot', ([], {'x': '"""representation"""', 'y': '"""loss"""', 'data': 'df', 'kind': '"""boxen"""'}), "(x='representation', y='loss', data=df, kind='boxen')\n", (6426, 6479), True, 'import seaborn as sns\n'), ((6477, 6523), 'matplotlib.pyplot.title', 'plt.title', (['"""Village data: loss-representation"""'], {}), "('Village data: loss-representation')\n", (6486, 6523), True, 'import matplotlib.pyplot as plt\n'), ((6524, 6612), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""res_gwa-village-loss.pdf"""'], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('res_gwa-village-loss.pdf', dpi=300, format='pdf', bbox_inches=\n 'tight')\n", (6535, 6612), True, 'import matplotlib.pyplot as plt\n'), ((6622, 6695), 'seaborn.catplot', 'sns.catplot', ([], {'x': '"""representation"""', 'y': '"""centered-loss"""', 'data': 'df', 'kind': '"""boxen"""'}), "(x='representation', y='centered-loss', data=df, kind='boxen')\n", (6633, 6695), True, 'import seaborn as sns\n'), ((6693, 6748), 'matplotlib.pyplot.title', 'plt.title', (['"""Village data: centered loss-representation"""'], {}), "('Village data: centered loss-representation')\n", (6702, 6748), True, 'import matplotlib.pyplot as plt\n'), ((6749, 6838), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""res_gwa-village-closs.pdf"""'], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('res_gwa-village-closs.pdf', dpi=300, format='pdf', bbox_inches\n ='tight')\n", (6760, 6838), True, 'import matplotlib.pyplot as plt\n'), ((6847, 6914), 'seaborn.catplot', 'sns.catplot', ([], {'x': '"""representation"""', 'y': '"""runtime"""', 'data': 'df', 'kind': '"""boxen"""'}), "(x='representation', y='runtime', data=df, kind='boxen')\n", (6858, 6914), True, 'import seaborn as sns\n'), ((6912, 6961), 'matplotlib.pyplot.title', 'plt.title', (['"""Village data: runtime-representation"""'], {}), "('Village data: runtime-representation')\n", (6921, 6961), True, 'import matplotlib.pyplot as plt\n'), ((6962, 7052), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""res_gwa-village-runtime.pdf"""'], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('res_gwa-village-runtime.pdf', dpi=300, format='pdf',\n bbox_inches='tight')\n", (6973, 7052), True, 'import matplotlib.pyplot as plt\n'), ((7047, 7057), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7055, 7057), True, 'import matplotlib.pyplot as plt\n'), ((8093, 8133), 'pandas.DataFrame', 'pd.DataFrame', (['tab_rows'], {'columns': 'tab_cols'}), '(tab_rows, columns=tab_cols)\n', (8105, 8133), True, 'import pandas as pd\n'), ((710, 724), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (721, 724), False, 'import pickle\n'), ((2068, 2120), 'numpy.random.choice', 'np.random.choice', (['top', 'size_bootstrap'], {'replace': '(False)'}), '(top, size_bootstrap, replace=False)\n', (2084, 2120), True, 'import numpy as np\n'), ((2128, 2138), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (2136, 2138), True, 'import networkx as nx\n'), ((5144, 5160), 'numpy.log', 'np.log', (['res_loss'], {}), '(res_loss)\n', (5150, 5160), True, 'import numpy as np\n'), ((8261, 8297), 'json.dump', 'json.dump', (['p_vals', 'outfile'], {'indent': '(1)'}), '(p_vals, outfile, indent=1)\n', (8270, 8297), False, 'import json\n'), ((2437, 2481), 'spectralGW.undirected_normalized_heat_kernel', 'sgw.undirected_normalized_heat_kernel', (['sG', '(3)'], {}), '(sG, 3)\n', (2474, 2481), True, 'import spectralGW as sgw\n'), ((2501, 2545), 'spectralGW.undirected_normalized_heat_kernel', 'sgw.undirected_normalized_heat_kernel', (['sG', '(7)'], {}), '(sG, 7)\n', (2538, 2545), True, 'import spectralGW as sgw\n'), ((2566, 2611), 'spectralGW.undirected_normalized_heat_kernel', 'sgw.undirected_normalized_heat_kernel', (['sG', '(11)'], {}), '(sG, 11)\n', (2603, 2611), True, 'import spectralGW as sgw\n'), ((3082, 3093), 'time.time', 'time.time', ([], {}), '()\n', (3091, 3093), False, 'import time\n'), ((3112, 3213), 'ot.gromov.gromov_barycenters', 'ot.gromov.gromov_barycenters', (['N', 'CList', 'pList', 'p', 'lambdas', '"""square_loss"""'], {'max_iter': '(100)', 'tol': '(0.001)'}), "(N, CList, pList, p, lambdas, 'square_loss',\n max_iter=100, tol=0.001)\n", (3140, 3213), False, 'import ot\n'), ((3216, 3227), 'time.time', 'time.time', ([], {}), '()\n', (3225, 3227), False, 'import time\n'), ((7216, 7266), 'numpy.mean', 'np.mean', (["df[df['representation'] == 'adj']['loss']"], {}), "(df[df['representation'] == 'adj']['loss'])\n", (7223, 7266), True, 'import numpy as np\n'), ((7282, 7331), 'numpy.var', 'np.var', (["df[df['representation'] == 'adj']['loss']"], {}), "(df[df['representation'] == 'adj']['loss'])\n", (7288, 7331), True, 'import numpy as np\n'), ((7346, 7399), 'numpy.mean', 'np.mean', (["df[df['representation'] == 'adj']['runtime']"], {}), "(df[df['representation'] == 'adj']['runtime'])\n", (7353, 7399), True, 'import numpy as np\n'), ((7442, 7492), 'numpy.mean', 'np.mean', (["df[df['representation'] == 'HK3']['loss']"], {}), "(df[df['representation'] == 'HK3']['loss'])\n", (7449, 7492), True, 'import numpy as np\n'), ((7508, 7557), 'numpy.var', 'np.var', (["df[df['representation'] == 'HK3']['loss']"], {}), "(df[df['representation'] == 'HK3']['loss'])\n", (7514, 7557), True, 'import numpy as np\n'), ((7572, 7625), 'numpy.mean', 'np.mean', (["df[df['representation'] == 'HK3']['runtime']"], {}), "(df[df['representation'] == 'HK3']['runtime'])\n", (7579, 7625), True, 'import numpy as np\n'), ((7668, 7718), 'numpy.mean', 'np.mean', (["df[df['representation'] == 'HK7']['loss']"], {}), "(df[df['representation'] == 'HK7']['loss'])\n", (7675, 7718), True, 'import numpy as np\n'), ((7734, 7783), 'numpy.var', 'np.var', (["df[df['representation'] == 'HK7']['loss']"], {}), "(df[df['representation'] == 'HK7']['loss'])\n", (7740, 7783), True, 'import numpy as np\n'), ((7798, 7851), 'numpy.mean', 'np.mean', (["df[df['representation'] == 'HK7']['runtime']"], {}), "(df[df['representation'] == 'HK7']['runtime'])\n", (7805, 7851), True, 'import numpy as np\n'), ((7895, 7946), 'numpy.mean', 'np.mean', (["df[df['representation'] == 'HK11']['loss']"], {}), "(df[df['representation'] == 'HK11']['loss'])\n", (7902, 7946), True, 'import numpy as np\n'), ((7962, 8012), 'numpy.var', 'np.var', (["df[df['representation'] == 'HK11']['loss']"], {}), "(df[df['representation'] == 'HK11']['loss'])\n", (7968, 8012), True, 'import numpy as np\n'), ((8027, 8081), 'numpy.mean', 'np.mean', (["df[df['representation'] == 'HK11']['runtime']"], {}), "(df[df['representation'] == 'HK11']['runtime'])\n", (8034, 8081), True, 'import numpy as np\n'), ((2341, 2363), 'networkx.number_of_nodes', 'nx.number_of_nodes', (['sG'], {}), '(sG)\n', (2359, 2363), True, 'import networkx as nx\n'), ((3377, 3466), 'ot.gromov.gromov_wasserstein', 'ot.gromov.gromov_wasserstein', (['gwa_adj', 'CList[s]', 'p', 'pList[s]', '"""square_loss"""'], {'log': '(True)'}), "(gwa_adj, CList[s], p, pList[s], 'square_loss',\n log=True)\n", (3405, 3466), False, 'import ot\n'), ((2383, 2406), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['sG'], {}), '(sG)\n', (2402, 2406), True, 'import networkx as nx\n')] |
# -*- coding: utf-8 -*-
from Models import Regression
import pandas as pd
import numpy as np
dataset = pd.read_csv('Data.csv')
X = np.array([[14,41,1020,72], [10,40,1010,90]])
best_model = None
for regression in Regression.__subclasses__():
model = regression(dataset)
model.train_regressor()
if best_model == None or best_model.score() < model.score():
best_model = model
print('The best model is: ' + best_model.__class__.__name__)
print(best_model.score())
print(best_model.predict(X)) | [
"numpy.array",
"Models.Regression.__subclasses__",
"pandas.read_csv"
] | [((104, 127), 'pandas.read_csv', 'pd.read_csv', (['"""Data.csv"""'], {}), "('Data.csv')\n", (115, 127), True, 'import pandas as pd\n'), ((132, 182), 'numpy.array', 'np.array', (['[[14, 41, 1020, 72], [10, 40, 1010, 90]]'], {}), '([[14, 41, 1020, 72], [10, 40, 1010, 90]])\n', (140, 182), True, 'import numpy as np\n'), ((214, 241), 'Models.Regression.__subclasses__', 'Regression.__subclasses__', ([], {}), '()\n', (239, 241), False, 'from Models import Regression\n')] |
import copy
import time
import optuna
import warnings
import numpy as np
import pandas as pd
from datetime import datetime
from sklearn.model_selection import KFold
from sklearn.metrics import SCORERS
class OptunaGridSearch:
def __init__(self, model, cv=KFold(n_splits=10), scoring='accuracy', verbose=0, timeout=3600,
candidates=250):
"""
Wrapper for Optuna Grid Search. Takes any model support by Amplo.AutoML.Modelling.
The parameter search space is predefined for each model.
Parameters
----------
model obj: Model object to optimize
cv obj: Scikit CV object
scoring str: From Scikits Scorers*
verbose int: How much to print
timeout int: Time limit of optimization
candidates int: Candidate limits to evaluate
"""
self.model = model
if hasattr(model, 'is_fitted'):
assert not model.is_fitted(), 'Model already fitted'
self.cv = cv
self.scoring = SCORERS[scoring] if isinstance(scoring, str) else scoring
self.verbose = verbose
self.timeout = timeout
self.nTrials = candidates
self.x, self.y = None, None
self.binary = True
self.samples = None
# Model specific settings
if type(self.model).__name__ == 'LinearRegression':
self.nTrials = 1
# Input tests
assert model is not None, 'Need to provide a model'
if scoring is None:
if 'Classifier' in type(model).__name__:
self.scoring = SCORERS['accuracy']
elif 'Regressor' in type(model).__name__:
self.scoring = SCORERS['neg_mean_squared_error']
else:
raise ValueError('Model mode unknown')
def get_params(self, trial):
# todo support suggest log uniform
if type(self.model).__name__ == 'LinearRegression':
return {}
elif type(self.model).__name__ == 'Lasso' or \
'Ridge' in type(self.model).__name__:
return {
'alpha': trial.suggest_uniform('alpha', 0, 10),
}
elif 'SV' in type(self.model).__name__:
return {
'gamma': trial.suggest_categorical('gamma', ['scale', 'auto', 0.001, 0.01, 0.1, 0.5, 1]),
'C': trial.suggest_uniform('C', 0, 10),
}
elif 'KNeighbors' in type(self.model).__name__:
return {
'n_neighbors': trial.suggest_int('n_neighbors', 5, min(50, int(self.samples / 10))),
'weights': trial.suggest_categorical('weights', ['uniform', 'distance']),
'leaf_size': trial.suggest_int('leaf_size', 1, min(100, int(self.samples / 10))),
}
# Regression models
elif type(self.model).__name__ == 'DecisionTreeRegressor':
return {
'criterion': trial.suggest_categorical('criterion', ['squared_error', 'friedman_mse', 'absolute_error', 'poisson']),
'max_depth': trial.sugest_int('max_depth', 3, min(25, int(np.log2(self.samples)))),
}
elif type(self.model).__name__ == 'BaggingRegressor':
return {
'n_estimators': trial.suggest_int('n_estimators', 10, 250),
'max_samples': trial.suggest_uniform('max_samples', 0.5, 1),
'max_features': trial.suggest_uniform('max_features', 0.5, 1),
}
elif type(self.model).__name__ == 'CatBoostRegressor':
return dict(n_estimators=trial.suggest_int('n_estimators', 500, 2000), verbose=0, early_stopping_rounds=100,
od_pval=1e-5,
loss_function=trial.suggest_categorical('loss_function', ['MAE', 'RMSE']),
learning_rate=trial.suggest_loguniform('learning_rate', 0.001, 0.5),
l2_leaf_reg=trial.suggest_uniform('l2_leaf_reg', 0, 10),
depth=trial.suggest_int('depth', 3, min(10, int(np.log2(self.samples)))),
min_data_in_leaf=trial.suggest_int('min_data_in_leaf', 1, min(1000, int(self.samples / 10))),
grow_policy=trial.suggest_categorical('grow_policy',
['SymmetricTree', 'Depthwise', 'Lossguide']))
elif type(self.model).__name__ == 'GradientBoostingRegressor':
return {
'loss': trial.suggest_categorical('loss', ['ls', 'lad', 'huber']),
'learning_rate': trial.suggest_loguniform('learning_rate', 0.001, 0.5),
'max_depth': trial.suggest_int('max_depth', 3, min(10, int(np.log2(self.samples)))),
'n_estimators': trial.suggest_int('n_estimators', 100, 1000),
'min_samples_leaf': trial.suggest_int('min_samples_leaf', 1, min(1000, int(self.samples / 10))),
'max_features': trial.suggest_uniform('max_features', 0.5, 1),
'subsample': trial.suggest_uniform('subsample', 0.5, 1),
}
elif type(self.model).__name__ == 'HistGradientBoostingRegressor':
return {
'loss': trial.suggest_categorical('loss', ['least_squares', 'least_absolute_deviation']),
'learning_rate': trial.suggest_loguniform('learning_rate', 0.001, 0.5),
'max_iter': trial.suggest_int('max_iter', 100, 250),
'max_leaf_nodes': trial.suggest_int('max_leaf_nodes', 30, 150),
'max_depth': trial.suggest_int('max_depth', 3, min(10, int(np.log2(self.samples)))),
'min_samples_leaf': trial.suggest_int('min_samples_leaf', 1, min(1000, int(self.samples / 10))),
'l2_regularization': trial.suggest_uniform('l2_regularization', 0, 10),
'max_bins': trial.suggest_int('max_bins', 100, 255),
'early_stopping': [True],
}
elif type(self.model).__name__ == 'RandomForestRegressor':
return {
'n_estimators': trial.suggest_int('n_estimators', 50, 1000),
'criterion': trial.suggest_categorical('criterion', ['squared_error', 'absolute_error']),
'max_depth': trial.suggest_int('max_depth', 3, min(15, int(np.log2(self.samples)))),
'max_features': trial.suggest_categorical('max_features', ['auto', 'sqrt']),
'min_samples_split': trial.suggest_int('min_samples_split', 2, 50),
'min_samples_leaf': trial.suggest_int('min_samples_leaf', 1, min(1000, int(self.samples / 10))),
'bootstrap': trial.suggest_categorical('bootstrap', [True, False]),
}
elif type(self.model).__name__ == 'XGBRegressor':
param = {
"objective": 'reg:squarederror',
"eval_metric": "rmse",
"booster": trial.suggest_categorical("booster", ["gbtree", "gblinear", "dart"]),
"lambda": trial.suggest_loguniform("lambda", 1e-8, 1.0),
"alpha": trial.suggest_loguniform("alpha", 1e-8, 1.0),
"callbacks": optuna.integration.XGBoostPruningCallback(trial, "validation-rmse"),
'learning_rate': trial.suggest_loguniform('learning_rate', 0.001, 0.5),
}
if param["booster"] == "gbtree" or param["booster"] == "dart":
param["max_depth"] = trial.suggest_int("max_depth", 1, min(10, int(np.log2(self.samples))))
param["eta"] = trial.suggest_loguniform("eta", 1e-8, 1.0)
param["gamma"] = trial.suggest_loguniform("gamma", 1e-8, 1.0)
param["grow_policy"] = trial.suggest_categorical("grow_policy", ["depthwise", "lossguide"])
if param["booster"] == "dart":
param["sample_type"] = trial.suggest_categorical("sample_type", ["uniform", "weighted"])
param["normalize_type"] = trial.suggest_categorical("normalize_type", ["tree", "forest"])
param["rate_drop"] = trial.suggest_loguniform("rate_drop", 1e-8, 1.0)
param["skip_drop"] = trial.suggest_loguniform("skip_drop", 1e-8, 1.0)
return param
elif type(self.model).__name__ == 'LGBMRegressor':
return {
'num_leaves': trial.suggest_int('num_leaves', 10, 150),
'min_data_in_leaf': trial.suggest_int('min_data_in_leaf', 1, min(1000, int(self.samples / 10))),
'min_sum_hessian_in_leaf': trial.suggest_uniform('min_sum_hessian_in_leaf', 1e-3, 0.5),
'subsample': trial.suggest_uniform('subsample', 0.5, 1),
'colsample_bytree': trial.suggest_uniform('colsample_bytree', 0, 1),
'reg_alpha': trial.suggest_uniform('reg_alpha', 0, 1),
'reg_lambda': trial.suggest_uniform('reg_lambda', 0, 1),
'callbacks': [optuna.integration.LightGBMPruningCallback(trial, "mean_absolute_error", "valid_1")],
}
# Classifiers
elif type(self.model).__name__ == 'BaggingClassifier':
return {
'n_estimators': trial.suggest_int('n_estimators', 10, 250),
'max_samples': trial.suggest_uniform('max_samples', 0.5, 1),
'max_features': trial.suggest_uniform('max_features', 0.5, 1),
}
elif type(self.model).__name__ == 'CatBoostClassifier':
return {
'n_estimators': trial.suggest_int('n_estimators', 500, 2000),
"verbose": 0,
'early_stopping_rounds': 100,
'od_pval': 1e-5,
'loss_function': 'Logloss' if self.binary else 'MultiClass',
'learning_rate': trial.suggest_loguniform('learning_rate', 0.001, 0.5),
'l2_leaf_reg': trial.suggest_uniform('l2_leaf_reg', 0, 10),
'depth': trial.suggest_int('depth', 1, min(10, int(np.log2(self.samples)))),
'min_data_in_leaf': trial.suggest_int('min_data_in_leaf', 1, min(1000, int(self.samples / 10))),
'grow_policy': trial.suggest_categorical('grow_policy', ['SymmetricTree', 'Depthwise', 'Lossguide']),
}
elif type(self.model).__name__ == 'GradientBoostingClassifier':
return {
'loss': trial.suggest_categorical('loss', ['deviance', 'exponential']),
'learning_rate': trial.suggest_loguniform('learning_rate', 0.001, 0.5),
'max_depth': trial.suggest_int('max_depth', 3, min(15, int(np.log2(self.samples)))),
'n_estimators': trial.suggest_int('n_estimators', 100, 250),
'min_samples_leaf': trial.suggest_int('min_samples_leaf', 1, min(1000, int(self.samples / 10))),
'max_features': trial.suggest_uniform('max_features', 0.5, 1),
'subsample': trial.suggest_uniform('subsample', 0.5, 1),
}
elif type(self.model).__name__ == 'HistGradientBoostingClassifier':
return {
'learning_rate': trial.suggest_loguniform('learning_rate', 0.001, 0.5),
'max_iter': trial.suggest_int('max_iter', 100, 1000),
'max_leaf_nodes': trial.suggest_int('max_leaf_nodes', 30, 150),
'max_depth': trial.suggest_int('max_depth', 3, min(10, int(np.log2(self.samples)))),
'min_samples_leaf': trial.suggest_int('min_samples_leaf', 1, min(1000, int(self.samples / 10))),
'l2_regularization': trial.suggest_uniform('l2_regularization', 0, 10),
'max_bins': trial.suggest_int('max_bins', 100, 255),
'early_stopping': [True],
}
elif type(self.model).__name__ == 'RandomForestClassifier':
return {
'n_estimators': trial.suggest_int('n_estimators', 50, 1000),
'criterion': trial.suggest_categorical('criterion', ['gini', 'entropy']),
'max_depth': trial.suggest_int('max_depth', 3, min(15, int(np.log2(self.samples)))),
'max_features': trial.suggest_categorical('max_features', ['auto', 'sqrt']),
'min_samples_split': trial.suggest_int('min_samples_split', 2, 50),
'min_samples_leaf': trial.suggest_int('min_samples_leaf', 1, min(1000, int(self.samples / 10))),
'bootstrap': trial.suggest_categorical('bootstrap', [True, False]),
}
elif type(self.model).__name__ == 'XGBClassifier':
param = {
"objective": "binary:logistic" if self.binary else 'multi:softprob',
"eval_metric": "logloss",
"booster": trial.suggest_categorical("booster", ["gbtree", "gblinear", "dart"]),
"lambda": trial.suggest_loguniform("lambda", 1e-8, 1.0),
"alpha": trial.suggest_loguniform("alpha", 1e-8, 1.0),
"callbacks": optuna.integration.XGBoostPruningCallback(trial, "validation-logloss"),
'learning_rate': trial.suggest_loguniform('learning_rate', 0.001, 0.5),
}
if param["booster"] == "gbtree" or param["booster"] == "dart":
param["max_depth"] = trial.suggest_int("max_depth", 1, min(10, int(np.log2(self.samples))))
param["eta"] = trial.suggest_loguniform("eta", 1e-8, 1.0)
param["gamma"] = trial.suggest_loguniform("gamma", 1e-8, 1.0)
param["grow_policy"] = trial.suggest_categorical("grow_policy", ["depthwise", "lossguide"])
if param["booster"] == "dart":
param["sample_type"] = trial.suggest_categorical("sample_type", ["uniform", "weighted"])
param["normalize_type"] = trial.suggest_categorical("normalize_type", ["tree", "forest"])
param["rate_drop"] = trial.suggest_loguniform("rate_drop", 1e-8, 1.0)
param["skip_drop"] = trial.suggest_loguniform("skip_drop", 1e-8, 1.0)
return param
elif type(self.model).__name__ == 'LGBMClassifier':
return {
"objective": "binary" if self.binary else 'multiclass',
"metric": trial.suggest_categorical("metric", ['binary_error', 'auc', 'average_precision',
'binary_logloss']) if self.binary else
trial.suggest_categorical('metric', ['multi_error', 'multi_logloss', 'auc_mu']),
"verbosity": -1,
"boosting_type": "gbdt",
"lambda_l1": trial.suggest_loguniform("lambda_l1", 1e-8, 10.0),
"lambda_l2": trial.suggest_loguniform("lambda_l2", 1e-8, 10.0),
"num_leaves": trial.suggest_int("num_leaves", 10, 5000),
"max_depth": trial.suggest_int("max_depth", 5, 20),
"min_data_in_leaf": trial.suggest_int("min_data_in_leaf", 1, min(1000, int(self.samples / 10))),
"min_gain_to_split": trial.suggest_uniform("min_gain_to_split", 0, 5),
"feature_fraction": trial.suggest_uniform("feature_fraction", 0.4, 1.0),
"bagging_fraction": trial.suggest_uniform("bagging_fraction", 0.4, 1.0),
"bagging_freq": trial.suggest_int("bagging_freq", 1, 7),
'callbacks': [optuna.integration.LightGBMPruningCallback(trial, "neg_log_loss", "valid_1")],
}
else:
# Raise error if nothing is returned
warnings.warn('Hyper parameter tuning not implemented for {}'.format(type(self.model).__name__))
return {}
def fit(self, x, y):
if isinstance(y, pd.DataFrame):
assert len(y.keys()) == 1, 'Multiple target columns not supported.'
y = y[y.keys()[0]]
assert isinstance(x, pd.DataFrame), 'X should be Pandas DataFrame'
assert isinstance(y, pd.Series), 'Y should be Pandas Series or DataFrame'
# Set mode
self.binary = y.nunique() == 2
self.samples = len(y)
# Store
self.x, self.y = x, y
# Set up study
study = optuna.create_study(sampler=optuna.samplers.TPESampler(seed=236868), direction='maximize')
study.optimize(self.objective, timeout=self.timeout, n_trials=self.nTrials)
# Parse results
optuna_results = study.trials_dataframe()
results = pd.DataFrame({
'date': datetime.today().strftime('%d %b %y'),
'model': type(self.model).__name__,
'params': [x.params for x in study.get_trials()],
'mean_objective': optuna_results['value'],
'std_objective': optuna_results['user_attrs_std_value'],
'worst_case': optuna_results['value'] - optuna_results['user_attrs_std_value'],
'mean_time': optuna_results['user_attrs_mean_time'],
'std_time': optuna_results['user_attrs_std_time']
})
return results
def objective(self, trial):
# Metrics
scores = []
times = []
master = copy.deepcopy(self.model)
# Cross Validation
for t, v in self.cv.split(self.x, self.y):
# Split data
xt, xv, yt, yv = self.x.iloc[t], self.x.iloc[v], self.y.iloc[t], self.y.iloc[v]
# Train model
t_start = time.time()
model = copy.deepcopy(master)
model.set_params(**self.get_params(trial))
model.fit(xt, yt)
# Results
scores.append(self.scoring(model, xv, yv))
times.append(time.time() - t_start)
# Set manual metrics
trial.set_user_attr('mean_time', np.mean(times))
trial.set_user_attr('std_time', np.std(times))
trial.set_user_attr('std_value', np.std(scores))
# Stop trail (avoid overwriting)
if trial.number == self.nTrials:
trial.study.stop()
return np.mean(scores)
| [
"numpy.mean",
"copy.deepcopy",
"numpy.log2",
"optuna.integration.XGBoostPruningCallback",
"optuna.integration.LightGBMPruningCallback",
"numpy.std",
"datetime.datetime.today",
"optuna.samplers.TPESampler",
"sklearn.model_selection.KFold",
"time.time"
] | [((261, 279), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)'}), '(n_splits=10)\n', (266, 279), False, 'from sklearn.model_selection import KFold\n'), ((17055, 17080), 'copy.deepcopy', 'copy.deepcopy', (['self.model'], {}), '(self.model)\n', (17068, 17080), False, 'import copy\n'), ((17920, 17935), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (17927, 17935), True, 'import numpy as np\n'), ((17326, 17337), 'time.time', 'time.time', ([], {}), '()\n', (17335, 17337), False, 'import time\n'), ((17358, 17379), 'copy.deepcopy', 'copy.deepcopy', (['master'], {}), '(master)\n', (17371, 17379), False, 'import copy\n'), ((17662, 17676), 'numpy.mean', 'np.mean', (['times'], {}), '(times)\n', (17669, 17676), True, 'import numpy as np\n'), ((17718, 17731), 'numpy.std', 'np.std', (['times'], {}), '(times)\n', (17724, 17731), True, 'import numpy as np\n'), ((17774, 17788), 'numpy.std', 'np.std', (['scores'], {}), '(scores)\n', (17780, 17788), True, 'import numpy as np\n'), ((16146, 16185), 'optuna.samplers.TPESampler', 'optuna.samplers.TPESampler', ([], {'seed': '(236868)'}), '(seed=236868)\n', (16172, 16185), False, 'import optuna\n'), ((17568, 17579), 'time.time', 'time.time', ([], {}), '()\n', (17577, 17579), False, 'import time\n'), ((16421, 16437), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (16435, 16437), False, 'from datetime import datetime\n'), ((3107, 3128), 'numpy.log2', 'np.log2', (['self.samples'], {}), '(self.samples)\n', (3114, 3128), True, 'import numpy as np\n'), ((4043, 4064), 'numpy.log2', 'np.log2', (['self.samples'], {}), '(self.samples)\n', (4050, 4064), True, 'import numpy as np\n'), ((4710, 4731), 'numpy.log2', 'np.log2', (['self.samples'], {}), '(self.samples)\n', (4717, 4731), True, 'import numpy as np\n'), ((7157, 7224), 'optuna.integration.XGBoostPruningCallback', 'optuna.integration.XGBoostPruningCallback', (['trial', '"""validation-rmse"""'], {}), "(trial, 'validation-rmse')\n", (7198, 7224), False, 'import optuna\n'), ((5607, 5628), 'numpy.log2', 'np.log2', (['self.samples'], {}), '(self.samples)\n', (5614, 5628), True, 'import numpy as np\n'), ((6305, 6326), 'numpy.log2', 'np.log2', (['self.samples'], {}), '(self.samples)\n', (6312, 6326), True, 'import numpy as np\n'), ((8923, 9010), 'optuna.integration.LightGBMPruningCallback', 'optuna.integration.LightGBMPruningCallback', (['trial', '"""mean_absolute_error"""', '"""valid_1"""'], {}), "(trial, 'mean_absolute_error',\n 'valid_1')\n", (8965, 9010), False, 'import optuna\n'), ((7486, 7507), 'numpy.log2', 'np.log2', (['self.samples'], {}), '(self.samples)\n', (7493, 7507), True, 'import numpy as np\n'), ((9956, 9977), 'numpy.log2', 'np.log2', (['self.samples'], {}), '(self.samples)\n', (9963, 9977), True, 'import numpy as np\n'), ((10571, 10592), 'numpy.log2', 'np.log2', (['self.samples'], {}), '(self.samples)\n', (10578, 10592), True, 'import numpy as np\n'), ((12938, 13008), 'optuna.integration.XGBoostPruningCallback', 'optuna.integration.XGBoostPruningCallback', (['trial', '"""validation-logloss"""'], {}), "(trial, 'validation-logloss')\n", (12979, 13008), False, 'import optuna\n'), ((11363, 11384), 'numpy.log2', 'np.log2', (['self.samples'], {}), '(self.samples)\n', (11370, 11384), True, 'import numpy as np\n'), ((12046, 12067), 'numpy.log2', 'np.log2', (['self.samples'], {}), '(self.samples)\n', (12053, 12067), True, 'import numpy as np\n'), ((15321, 15397), 'optuna.integration.LightGBMPruningCallback', 'optuna.integration.LightGBMPruningCallback', (['trial', '"""neg_log_loss"""', '"""valid_1"""'], {}), "(trial, 'neg_log_loss', 'valid_1')\n", (15363, 15397), False, 'import optuna\n'), ((13270, 13291), 'numpy.log2', 'np.log2', (['self.samples'], {}), '(self.samples)\n', (13277, 13291), True, 'import numpy as np\n')] |
import numpy as np
from cumm import tensorview as tv
from spconv.utils import Point2VoxelCPU3d
from spconv.pytorch.utils import PointToVoxel, gather_features_by_pc_voxel_id
import torch
import numpy as np
np.random.seed(50051)
# voxel gen source code: spconv/csrc/sparse/pointops.py
gen = PointToVoxel(vsize_xyz=[1, 1, 4],
coors_range_xyz=[-10, -4, -2, 10, 4, 2],
num_point_features=4,
max_num_voxels=100,
max_num_points_per_voxel=5)
pc = np.array([
[1.1, 1.9, 1.3, 121.34253],
])
print(pc.shape)
pc_th = torch.from_numpy(pc)
voxels, indices, num_per_voxel = gen(pc_th)
indices = indices.permute(3,2,1,0)
print("voxels = {}".format(voxels))
print("indices = {}".format(indices))
print("num_per_voxel = {}".format(num_per_voxel))
| [
"numpy.array",
"spconv.pytorch.utils.PointToVoxel",
"numpy.random.seed",
"torch.from_numpy"
] | [((208, 229), 'numpy.random.seed', 'np.random.seed', (['(50051)'], {}), '(50051)\n', (222, 229), True, 'import numpy as np\n'), ((292, 440), 'spconv.pytorch.utils.PointToVoxel', 'PointToVoxel', ([], {'vsize_xyz': '[1, 1, 4]', 'coors_range_xyz': '[-10, -4, -2, 10, 4, 2]', 'num_point_features': '(4)', 'max_num_voxels': '(100)', 'max_num_points_per_voxel': '(5)'}), '(vsize_xyz=[1, 1, 4], coors_range_xyz=[-10, -4, -2, 10, 4, 2],\n num_point_features=4, max_num_voxels=100, max_num_points_per_voxel=5)\n', (304, 440), False, 'from spconv.pytorch.utils import PointToVoxel, gather_features_by_pc_voxel_id\n'), ((519, 557), 'numpy.array', 'np.array', (['[[1.1, 1.9, 1.3, 121.34253]]'], {}), '([[1.1, 1.9, 1.3, 121.34253]])\n', (527, 557), True, 'import numpy as np\n'), ((589, 609), 'torch.from_numpy', 'torch.from_numpy', (['pc'], {}), '(pc)\n', (605, 609), False, 'import torch\n')] |
## An Eve optimizer implementation in Chainer
# By <NAME>
# https://github.com/muupan/chainer-eve
# Modified by <NAME>
from __future__ import division
import math
import numpy
from chainer import optimizer
from chainer.optimizers import adam
_default_hyperparam = optimizer.Hyperparameter()
_default_hyperparam.alpha = 0.001
_default_hyperparam.beta1 = 0.9
_default_hyperparam.beta2 = 0.999
_default_hyperparam.beta3 = 0.999
_default_hyperparam.c = 10.0
_default_hyperparam.eps = 1e-8
_default_hyperparam.eta = 1.0
_default_hyperparam.f_star = 0.0
_default_hyperparam.weight_decay_rate = 0
_default_hyperparam.amsgrad = False
_default_hyperparam.adabound = False
def _learning_rate(hp, t, d_tilde):
if t == 0:
raise RuntimeError(
'Can\'t determine the learning rate of Eve optimizer '
'because the update steps have not been started.')
fix1 = 1. - math.pow(hp.beta1, t)
fix2 = 1. - math.pow(hp.beta2, t)
return (hp.alpha / d_tilde) * math.sqrt(fix2) / fix1
class EveRule(adam.AdamRule):
"""Update rule of Eve optimization algorithm.
See: https://arxiv.org/abs/1611.01505v3
Before calling :meth:`update`, :attr:`d_tilde` must be set.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
alpha (float): Coefficient of learning rate.
beta1 (float): Exponential decay rate of the first order moment.
beta2 (float): Exponential decay rate of the second order moment.
eps (float): Small value for the numerical stability.
eta (float): Schedule multiplier, can be used for warm restarts.
weight_decay_rate (float): Weight decay rate.
amsgrad (bool): Whether to use the AMSGrad variant of Eve.
"""
d_tilde = None
@property
def lr(self):
assert self.d_tilde is not None
return _learning_rate(self.hyperparam, self.t, self.d_tilde)
class Eve(optimizer.GradientMethod):
"""Eve optimizer.
See: https://arxiv.org/abs/1611.01505v3
Args:
alpha (float): Coefficient of learning rate.
beta1 (float): Exponential decay rate of the first order moment.
beta2 (float): Exponential decay rate of the second order moment.
beta3 (float): Exponential decay rate of the objective-dependent
coefficient of learning rate.
c (float): Constant used to clip the objective-dependent coefficient.
eps (float): Small value for the numerical stability.
eta (float): Schedule multiplier, can be used for warm restarts.
f_star (float): Minimum value that the loss function can take.
weight_decay_rate (float): Weight decay rate.
amsgrad (bool): Whether to use AMSGrad variant of Eve.
"""
def __init__(self,
alpha=_default_hyperparam.alpha,
beta1=_default_hyperparam.beta1,
beta2=_default_hyperparam.beta2,
beta3=_default_hyperparam.beta3,
c=_default_hyperparam.c,
eps=_default_hyperparam.eps,
eta=_default_hyperparam.eta,
f_star=_default_hyperparam.f_star,
weight_decay_rate=_default_hyperparam.weight_decay_rate,
amsgrad=_default_hyperparam.amsgrad,
adabound=_default_hyperparam.adabound,
):
super(Eve, self).__init__()
self.hyperparam.alpha = alpha
self.hyperparam.beta1 = beta1
self.hyperparam.beta2 = beta2
self.hyperparam.beta3 = beta3
self.hyperparam.c = c
self.hyperparam.eps = eps
self.hyperparam.eta = eta
self.hyperparam.f_star = f_star
self.hyperparam.weight_decay_rate = weight_decay_rate
self.hyperparam.amsgrad = amsgrad
self.hyperparam.adabound = adabound
alpha = optimizer.HyperparameterProxy('alpha')
beta1 = optimizer.HyperparameterProxy('beta1')
beta2 = optimizer.HyperparameterProxy('beta2')
beta3 = optimizer.HyperparameterProxy('beta3')
c = optimizer.HyperparameterProxy('c')
eps = optimizer.HyperparameterProxy('eps')
eta = optimizer.HyperparameterProxy('eta')
f_star = optimizer.HyperparameterProxy('f_star')
weight_decay_rate = optimizer.HyperparameterProxy('weight_decay_rate')
amsgrad = optimizer.HyperparameterProxy('amsgrad')
def setup(self, link):
"""Sets a target link and initializes the optimizer states.
Given link is set to the :attr:`target` attribute. It also prepares the
optimizer state dictionaries corresponding to all parameters in the
link hierarchy. The existing states are discarded.
Args:
link (~chainer.Link): Target link object.
Returns:
The optimizer instance.
.. note::
As of v4.0.0, this function returns the optimizer instance itself
so that you can instantiate and setup the optimizer in one line,
e.g., ``optimizer = SomeOptimizer().setup(link)``.
"""
super(Eve, self).setup(link)
self.d_tilde = numpy.nan
self.f = numpy.nan
return self
def create_update_rule(self):
return EveRule(self.hyperparam)
@property
def lr(self):
return _learning_rate(self.hyperparam, self.t, self.d_tilde)
def update(self, loss, *args, **kwds):
"""Updates parameters based on a loss function or computed gradients.
Because Eve uses loss values, `lossfun` is required unlike in the
case of other optimizers.
Args:
lossfun (callable): Callable that returns a ~chainer.Variable to be
minimized.
*args, **kwds: Arguments passed to `lossfun`.
"""
use_cleargrads = getattr(self, '_use_cleargrads', True)
loss_value = float(loss.array)
self.reallocate_cleared_grads()
self.call_hooks('pre')
self.t += 1
self._update_d_tilde_and_f(loss_value)
for param in self.target.params():
param.update_rule.d_tilde = self.d_tilde
param.update()
self.reallocate_cleared_grads()
self.call_hooks('post')
def serialize(self, serializer):
"""Serializes or deserializes the optimizer.
It only saves or loads the following things:
- Optimizer states
- Global states (:attr:`t`, :attr:`epoch`, :attr:`d_tilde`, and
:attr:`f`)
**It does not saves nor loads the parameters of the target link.** They
should be separately saved or loaded.
Args:
serializer (~chainer.AbstractSerializer): Serializer or
deserializer object.
"""
super(Eve, self).serialize(serializer)
self.d_tilde = serializer('d_tilde', self.d_tilde)
self.f = serializer('f', self.f)
def _update_d_tilde_and_f(self, loss):
if self.t > 1:
d = abs(loss - self.f) / (min(loss, self.f) - self.f_star)
d_hat = numpy.clip(d, 1/self.c, self.c)
self.d_tilde = self.beta3 * self.d_tilde + (1 - self.beta3) * d_hat
else:
self.d_tilde = 1
self.f = loss
| [
"numpy.clip",
"math.pow",
"math.sqrt",
"chainer.optimizer.HyperparameterProxy",
"chainer.optimizer.Hyperparameter"
] | [((270, 296), 'chainer.optimizer.Hyperparameter', 'optimizer.Hyperparameter', ([], {}), '()\n', (294, 296), False, 'from chainer import optimizer\n'), ((3911, 3949), 'chainer.optimizer.HyperparameterProxy', 'optimizer.HyperparameterProxy', (['"""alpha"""'], {}), "('alpha')\n", (3940, 3949), False, 'from chainer import optimizer\n'), ((3962, 4000), 'chainer.optimizer.HyperparameterProxy', 'optimizer.HyperparameterProxy', (['"""beta1"""'], {}), "('beta1')\n", (3991, 4000), False, 'from chainer import optimizer\n'), ((4013, 4051), 'chainer.optimizer.HyperparameterProxy', 'optimizer.HyperparameterProxy', (['"""beta2"""'], {}), "('beta2')\n", (4042, 4051), False, 'from chainer import optimizer\n'), ((4064, 4102), 'chainer.optimizer.HyperparameterProxy', 'optimizer.HyperparameterProxy', (['"""beta3"""'], {}), "('beta3')\n", (4093, 4102), False, 'from chainer import optimizer\n'), ((4111, 4145), 'chainer.optimizer.HyperparameterProxy', 'optimizer.HyperparameterProxy', (['"""c"""'], {}), "('c')\n", (4140, 4145), False, 'from chainer import optimizer\n'), ((4156, 4192), 'chainer.optimizer.HyperparameterProxy', 'optimizer.HyperparameterProxy', (['"""eps"""'], {}), "('eps')\n", (4185, 4192), False, 'from chainer import optimizer\n'), ((4203, 4239), 'chainer.optimizer.HyperparameterProxy', 'optimizer.HyperparameterProxy', (['"""eta"""'], {}), "('eta')\n", (4232, 4239), False, 'from chainer import optimizer\n'), ((4253, 4292), 'chainer.optimizer.HyperparameterProxy', 'optimizer.HyperparameterProxy', (['"""f_star"""'], {}), "('f_star')\n", (4282, 4292), False, 'from chainer import optimizer\n'), ((4317, 4367), 'chainer.optimizer.HyperparameterProxy', 'optimizer.HyperparameterProxy', (['"""weight_decay_rate"""'], {}), "('weight_decay_rate')\n", (4346, 4367), False, 'from chainer import optimizer\n'), ((4382, 4422), 'chainer.optimizer.HyperparameterProxy', 'optimizer.HyperparameterProxy', (['"""amsgrad"""'], {}), "('amsgrad')\n", (4411, 4422), False, 'from chainer import optimizer\n'), ((896, 917), 'math.pow', 'math.pow', (['hp.beta1', 't'], {}), '(hp.beta1, t)\n', (904, 917), False, 'import math\n'), ((934, 955), 'math.pow', 'math.pow', (['hp.beta2', 't'], {}), '(hp.beta2, t)\n', (942, 955), False, 'import math\n'), ((990, 1005), 'math.sqrt', 'math.sqrt', (['fix2'], {}), '(fix2)\n', (999, 1005), False, 'import math\n'), ((7096, 7129), 'numpy.clip', 'numpy.clip', (['d', '(1 / self.c)', 'self.c'], {}), '(d, 1 / self.c, self.c)\n', (7106, 7129), False, 'import numpy\n')] |
import cantera as ct
import numpy as np
import funcs.simulation.cell_size as cs
relTol = 1e-4
absTol = 1e-6
# noinspection PyProtectedMember
class TestAgainstDemo:
# Test calculated values against demo script
original_cell_sizes = {
'Gavrikov': 1.9316316546518768e-02,
'Ng': 6.5644825968914763e-03,
'Westbrook': 3.4852910825972942e-03
}
original_induction_lengths = {
'Gavrikov': 0.0001137734347788197,
'Ng': 0.00014438224858385156,
'Westbrook': 0.00012018245112404462
}
cj_speed = 1967.8454767711942
init_press = 100000
init_temp = 300
mechanism = 'Mevel2017.cti'
def test_induction_lengths(self):
c = cs.CellSize()
c(
base_mechanism=self.mechanism,
initial_temp=self.init_temp,
initial_press=self.init_press,
fuel='H2',
oxidizer='O2:1, N2:3.76',
equivalence=1,
diluent='None',
diluent_mol_frac=0,
cj_speed=self.cj_speed
)
assert (
all([[
abs(length - c.induction_length[correlation]) / length < relTol
for correlation, length in
self.original_induction_lengths.items()
], [
abs(length - c.induction_length[correlation]) < absTol
for correlation, length in
self.original_induction_lengths.items()
]
])
)
def test_cell_sizes(self):
c = cs.CellSize()
test = c(
base_mechanism=self.mechanism,
initial_temp=self.init_temp,
initial_press=self.init_press,
fuel='H2',
oxidizer='O2:1, N2:3.76',
equivalence=1,
diluent='None',
diluent_mol_frac=0,
cj_speed=self.cj_speed
)
assert (
all([[
abs(cell - test[correlation]) / cell < relTol
for correlation, cell in self.original_cell_sizes.items()
], [
abs(cell - test[correlation]) < absTol
for correlation, cell in self.original_cell_sizes.items()
]]
)
)
def test_build_gas_no_dilution(self):
c = cs.CellSize()
undiluted = {'H2': 2 / 3, 'O2': 1 / 3}
# should not dilute with diluent=None
c.mechanism = 'Mevel2017.cti'
c.initial_temp = self.init_temp
c.initial_press = self.init_press
c.fuel = 'H2'
c.oxidizer = 'O2'
c.inert = None
c.equivalence = 1
c.diluent = None
c.diluent_mol_frac = 0.5
c.perturbed_reaction = -1
test = c._build_gas_object().mole_fraction_dict()
check_none = [
np.isclose(undiluted[key], value) for key, value in test.items()
]
# should not dilute with diluent_mol_frac=0
c.diluent = 'AR'
c.diluent_mol_frac = 0
test = c._build_gas_object().mole_fraction_dict()
check_zero = [
np.isclose(undiluted[key], value) for key, value in test.items()
]
assert all([check_none, check_zero])
def test_build_gas_with_dilution(self):
c = cs.CellSize()
c.mechanism = 'Mevel2017.cti'
c.initial_temp = self.init_temp
c.initial_press = self.init_press
c.fuel = 'H2'
c.oxidizer = 'O2'
c.inert = None
c.equivalence = 1
c.diluent = 'AR'
c.diluent_mol_frac = 0.1
c.perturbed_reaction = -1
test = c._build_gas_object().mole_fraction_dict()
check = [
np.isclose(test['H2'] / test['O2'], 2),
np.isclose(test['AR'], 0.1)
]
assert all(check)
def test_perturbed(self):
c = cs.CellSize()
pert = 3
pert_frac = 0.01
c(
base_mechanism=self.mechanism,
initial_temp=self.init_temp,
initial_press=self.init_press,
fuel='H2',
oxidizer='O2:1, N2:3.76',
equivalence=1,
diluent=None,
diluent_mol_frac=0,
cj_speed=self.cj_speed,
perturbed_reaction=pert,
perturbation_fraction=pert_frac
)
n_rxns = c.base_gas.n_reactions
correct_multipliers = np.ones(n_rxns)
correct_multipliers[pert] = 1 + pert_frac
multipliers = [c.base_gas.multiplier(i) for i in range(n_rxns)]
assert np.allclose(multipliers, correct_multipliers)
def test_perturbed_diluted(self):
c = cs.CellSize()
pert = 3
pert_frac = 0.01
c(
base_mechanism='Mevel2017.cti',
initial_temp=300,
initial_press=101325,
fuel='H2',
oxidizer='O2',
equivalence=1,
diluent='AR',
diluent_mol_frac=0.02,
cj_speed=2834.9809153464994,
perturbed_reaction=pert,
perturbation_fraction=pert_frac
)
n_rxns = c.base_gas.n_reactions
correct_multipliers = np.ones(n_rxns)
correct_multipliers[pert] = 1 + pert_frac
multipliers = [c.base_gas.multiplier(i) for i in range(n_rxns)]
assert np.allclose(multipliers, correct_multipliers)
def test_perturbed_inert(self):
c = cs.CellSize()
pert = 3
pert_frac = 0.01
inert = 'AR'
c(
base_mechanism='Mevel2017.cti',
initial_temp=300,
initial_press=101325,
fuel='H2',
oxidizer='O2',
equivalence=1,
diluent='AR',
diluent_mol_frac=0.02,
cj_speed=2834.9809153464994,
perturbed_reaction=pert,
perturbation_fraction=pert_frac,
inert=inert
)
gas = ct.Solution(self.mechanism)
rxns = [rxn for rxn in gas.reactions() if
inert not in rxn.reactants or inert not in rxn.products]
checks = [c.base_gas.n_reactions == len(rxns)]
checks += [rxn0 == rxn1 for rxn0, rxn1 in
zip(rxns, c.base_gas.reactions())]
if __name__ == '__main__': # pragma: no cover
import subprocess
from tests.test_simulation.test_database import remove_stragglers
try:
subprocess.check_call(
'pytest test_cell_size.py -vv --noconftest --cov '
'--cov-report html'
)
except subprocess.CalledProcessError as e:
# clean up in case of an unexpected error cropping up
remove_stragglers()
raise e
remove_stragglers()
| [
"numpy.allclose",
"numpy.isclose",
"numpy.ones",
"subprocess.check_call",
"funcs.simulation.cell_size.CellSize",
"cantera.Solution",
"tests.test_simulation.test_database.remove_stragglers"
] | [((6643, 6662), 'tests.test_simulation.test_database.remove_stragglers', 'remove_stragglers', ([], {}), '()\n', (6660, 6662), False, 'from tests.test_simulation.test_database import remove_stragglers\n'), ((706, 719), 'funcs.simulation.cell_size.CellSize', 'cs.CellSize', ([], {}), '()\n', (717, 719), True, 'import funcs.simulation.cell_size as cs\n'), ((1536, 1549), 'funcs.simulation.cell_size.CellSize', 'cs.CellSize', ([], {}), '()\n', (1547, 1549), True, 'import funcs.simulation.cell_size as cs\n'), ((2300, 2313), 'funcs.simulation.cell_size.CellSize', 'cs.CellSize', ([], {}), '()\n', (2311, 2313), True, 'import funcs.simulation.cell_size as cs\n'), ((3264, 3277), 'funcs.simulation.cell_size.CellSize', 'cs.CellSize', ([], {}), '()\n', (3275, 3277), True, 'import funcs.simulation.cell_size as cs\n'), ((3834, 3847), 'funcs.simulation.cell_size.CellSize', 'cs.CellSize', ([], {}), '()\n', (3845, 3847), True, 'import funcs.simulation.cell_size as cs\n'), ((4371, 4386), 'numpy.ones', 'np.ones', (['n_rxns'], {}), '(n_rxns)\n', (4378, 4386), True, 'import numpy as np\n'), ((4524, 4569), 'numpy.allclose', 'np.allclose', (['multipliers', 'correct_multipliers'], {}), '(multipliers, correct_multipliers)\n', (4535, 4569), True, 'import numpy as np\n'), ((4621, 4634), 'funcs.simulation.cell_size.CellSize', 'cs.CellSize', ([], {}), '()\n', (4632, 4634), True, 'import funcs.simulation.cell_size as cs\n'), ((5136, 5151), 'numpy.ones', 'np.ones', (['n_rxns'], {}), '(n_rxns)\n', (5143, 5151), True, 'import numpy as np\n'), ((5289, 5334), 'numpy.allclose', 'np.allclose', (['multipliers', 'correct_multipliers'], {}), '(multipliers, correct_multipliers)\n', (5300, 5334), True, 'import numpy as np\n'), ((5384, 5397), 'funcs.simulation.cell_size.CellSize', 'cs.CellSize', ([], {}), '()\n', (5395, 5397), True, 'import funcs.simulation.cell_size as cs\n'), ((5889, 5916), 'cantera.Solution', 'ct.Solution', (['self.mechanism'], {}), '(self.mechanism)\n', (5900, 5916), True, 'import cantera as ct\n'), ((6357, 6452), 'subprocess.check_call', 'subprocess.check_call', (['"""pytest test_cell_size.py -vv --noconftest --cov --cov-report html"""'], {}), "(\n 'pytest test_cell_size.py -vv --noconftest --cov --cov-report html')\n", (6378, 6452), False, 'import subprocess\n'), ((2810, 2843), 'numpy.isclose', 'np.isclose', (['undiluted[key]', 'value'], {}), '(undiluted[key], value)\n', (2820, 2843), True, 'import numpy as np\n'), ((3087, 3120), 'numpy.isclose', 'np.isclose', (['undiluted[key]', 'value'], {}), '(undiluted[key], value)\n', (3097, 3120), True, 'import numpy as np\n'), ((3675, 3713), 'numpy.isclose', 'np.isclose', (["(test['H2'] / test['O2'])", '(2)'], {}), "(test['H2'] / test['O2'], 2)\n", (3685, 3713), True, 'import numpy as np\n'), ((3727, 3754), 'numpy.isclose', 'np.isclose', (["test['AR']", '(0.1)'], {}), "(test['AR'], 0.1)\n", (3737, 3754), True, 'import numpy as np\n'), ((6602, 6621), 'tests.test_simulation.test_database.remove_stragglers', 'remove_stragglers', ([], {}), '()\n', (6619, 6621), False, 'from tests.test_simulation.test_database import remove_stragglers\n')] |
try:
import numpy as np
except ImportError:
raise RuntimeError('cannot import numpy, make sure numpy package is installed')
from carla.client import VehicleControl
from carla.agent import Agent
import matplotlib.pyplot as plt
import psutil
import gc
gc.enable()
import pickle
import time
import os
import ZGH_BRL_VBVC as ZGH
class ZGH_BRL_Agent(Agent):
"""
Agent implementation class for using with Carla 8.4
"""
def __init__(self, trained_model_pth=None, verbose=False,
start_as_test=False, save_models=True,
segment_image=False):
super(ZGH_BRL_Agent, self).__init__()
self.segment_image = segment_image
self.save_models_at_end_of_phase = save_models
self.verbose = verbose
self.show_img = False
self.trained_model_pth = trained_model_pth
self.dirs = ['Reach goal', 'Unknown', 'Lane follow', 'Left', 'Right', 'Forward']
self._dist_for_success = 2.0 # Also used by driving_benchmark.py for detecting success
self.reset_counter = 0
if self.trained_model_pth is not None:
print("Loading model from: {}".format(self.trained_model_pth))
self.load_model(self.trained_model_pth)
print(self.info.episode)
if start_as_test:
print("The loaded model will be put in test mode (No learning)")
for agent in self.learner:
agent.isLearning = False
self.info.phase = 4
self.info.episode = self.info.epi_max_III
self.save_models_at_end_of_phase = False
else:
print("Model training will be resumed from checkpoint")
# No need to do anything since everything is already loaded
else:
self.info, self.learner = ZGH.Initialization.init_all()
self.info.was_reset = True
self.info.reset_collision()
self.reset = True
self.frame_cnt = 0
self.train_frq = 7
self.intermediate_frq = 10
self.last_control = VehicleControl()
# Default value for master action (This will change during phase 1)
self.master_action = 0
self.old_target_dist = 0.
if self.segment_image:
import torch
import torchvision.transforms as transforms
import torch.nn as nn
import sys
sys.path.append("fcn")
from fcn.prepare_psp import get_psp_resnet50_ade
from fcn.prepare_EncNet import get_encnet_resnet101_ade
self.use_psp = False
pretrained = False
freeze_up_to = None
aux = True
fusion_method = None
norm_layer = nn.BatchNorm2d
if self.use_psp:
my_nclass = 9
test_model_file = 'path_to_weights' + '/pspnet_001.pth'
single_model, _ = get_psp_resnet50_ade(pretrained, my_nclass, freeze_up_to,
aux, norm_layer, fusion_method)
else:
my_nclass = 10
test_model_file = 'path_to_weights' + '/EncNet_no_dataAug_epoch145.pth'
single_model = get_encnet_resnet101_ade(my_nclass, pretrained, se_loss=True)
single_model.cuda()
bestparams = torch.load(test_model_file)
single_model.load_state_dict(bestparams['model_state'])
single_model.aux = False
single_model.eval()
del single_model.auxlayer
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
# Depth and Normal are using the same normalization values as for RBGs now.
])
self.sem_model = single_model
self.memory_used = psutil.virtual_memory().used
if self.verbose:
print("Initialization of BRL agent done!")
def load_model(self, fname):
data = pickle.load(open(fname, 'rb'))
self.info = data['info']
self.learner = data['agents']
def save_model(self):
fname = "{}/models/ZGH_phase{}_episodes{}_{}.pck".format(os.path.dirname(os.path.realpath(__file__)),
self.info.phase, self.info.episode, time.time())
data = {'info': self.info, 'agents': self.learner}
pickle.dump(data, open(fname, 'wb'))
def print_memory_used(self):
print(psutil.virtual_memory().used - self.memory_used)
def run_step(self, measurements, sensor_data, directions, target):
"""
Defines the steps taken by the agent
:param measurements: World measurements
:param sensor_data: Data from specified sensors
:param directions: Direction to take in intersection (Left, Right, Forward)
:param target: ???
:return: VehicleControl message
"""
if True:
self.frame_cnt += 1
if measurements.player_measurements.intersection_offroad > 0.99:
# Do nothing when outside of road
return VehicleControl()
# After each reset the timestamp is reset, wait until simulator is ready (t_delay ms)
t_delay = 3000
if measurements.game_timestamp < t_delay:
# Less than X ms since episode start
self.reset = True
self.last_control = VehicleControl()
self.info.was_reset = True
return self.last_control
elif self.reset:
self.info.was_reset = True
self.reset = False
self.reset_counter += 1
self.frame_cnt = 0
self.old_target_dist = self._dist_to_target(measurements, target)
if self.frame_cnt < self.train_frq:
# Resend signal
return self.last_control
else:
self.frame_cnt = 0
# Get sensor data of interest
if self.info.phase == 1 and self.frame_cnt % (self.train_frq * 20) == 0:
self.master_action = np.random.choice([0, 3])
if self.frame_cnt % self.train_frq == 0:
if self.segment_image:
self.sem_image_GT = sensor_data.get('CameraSem', None)
if self.sem_image_GT is not None:
self.sem_image_GT = self.sem_image_GT.data
rgb_img = sensor_data.get('Camera')
if rgb_img is not None:
rgb_img = rgb_img.data.copy() # np.expand_dims(rgb_img.data, axis=0)
import torch
with torch.no_grad():
output = self.sem_model(torch.unsqueeze(self.transform(rgb_img).cuda(), 0))
score_maps = output[0].data
_, predict = torch.max(score_maps, 1)
self.sem_image = predict.cpu().numpy().astype('uint8')
if self.use_psp:
# Process image to keep same labels (9 classes)
self.sem_image[self.sem_image == 7] = 10
self.sem_image[self.sem_image == 4] = 7
self.sem_image[self.sem_image == 1] = 4
self.sem_image[self.sem_image == 0] = 1
self.sem_image[self.sem_image == 6] = 9
self.sem_image[self.sem_image == 3] = 6
self.sem_image[self.sem_image == 8] = 11
self.sem_image[self.sem_image == 5] = 8
self.sem_image[self.sem_image == 255] = 3
else:
# Process image to keep same labels (10 classes)
self.sem_image[(self.sem_image >= 2) & (self.sem_image <= 9)] += 2
self.sem_image[self.sem_image == 255] = 3
self.sem_image[self.sem_image == 1] = 2
self.sem_image[self.sem_image == 0] = 1
# Clear some memory
del rgb_img
del output
del score_maps
del predict
# Specify ROI
dh = 40
dw = 5
# Shape: height x width
self.sem_image = np.transpose(self.sem_image, axes=(1, 2, 0))[:, :, 0]
self.sem_image = self.sem_image[dh:self.sem_image.shape[0]-dh, dw:self.sem_image.shape[1]-dw]
else:
self.sem_image = None
else:
self.sem_image = sensor_data.get('CameraSem', None)
if self.sem_image is not None:
self.sem_image = self.sem_image.data
if self.sem_image is not None:
if self.show_img:
plt.imshow(self.sem_image)
plt.waitforbuttonpress()
# Depth Maps
self.depth_image = sensor_data.get('CameraDepth', None)
if self.depth_image is not None:
self.depth_image = self.depth_image.data
brl_epi = self._progress(self.sem_image, self.depth_image, measurements, target, directions)
gc.collect()
return self.last_control
def _progress(self, image, depth_image, measurements, target, directions):
"""
Function for performing all in main if (train /predict)
Will update the self.last_control
image: Input image
measurements; Object with all vehicle information
target: Information about the target position
directions: Top-level planning instructions (Forward, Backward, Left, Right)
"""
# self.info.episode += 1
action = None
if self.info.episode < self.info.epi_max_II:
if self.info.phase != 2:
self.info.phase = 2
print("\nStarting phase 2")
self.learner[0].isLearning = True
else:
if self.info.phase != 4:
print("\nStarting test phase")
print("reset_counter: ", self.reset_counter)
self.info.phase = 4
self.learner[0].isLearning = False
print("", end='\r')
print("Episode: {}\t speed: {} \t direction: {}".format(self.info.episode,
measurements.player_measurements.forward_speed * 3.6,
self.dirs[int(directions)]), end=" ", flush=True)
# Update state of all agents (agents.nx is calculated)
self.learner[0].update_state(image, depth_image, self.segment_image)
# Handle if simulation has been reset or in the start when no motor action performed
if self.info.episode == 1 or self.info.episode == self.info.epi_max_II or self.info.was_reset:
self.info.was_reset = False
self.last_control = self._get_control_signal(0)
else:
# Based on the updated state (agent.nx),
# motor_rewarding, br_learning, updating_brl_components and log_statistics are done
dist = self._dist_to_target(measurements, target)
self.learner = ZGH.Rewarding.get_motor_reward(self.info, self.learner, measurements, image,
self.old_target_dist - dist)
self.old_target_dist = dist
self.learner[0].perform_brl_learning()
self.learner[0].update_learning_parameters()
self.learner[0].update_log_statistics()
# Update the current state as well as
self.learner[0].x = self.learner[6].nx
self.learner[0].update_components()
self.learner, action, action_type = ZGH.DecisionMaking.action_selection(self.learner,
self.dirs[int(directions)])
if action is not None:
self.last_control = self._get_control_signal(action)
self.info.episode += 1
if self.verbose:
print("Episode nr. {}".format(self.info.episode), end="\n")
print("Action: {}\tAction type: {}".format(action, action_type))
print("Control msg: {}".format(self.last_control), end="\r")
return self.info.episode
@staticmethod
def _dist_to_target(measurements, target):
"""
Calculates the bird distance from current position to goal
:param measurments:
:param target:
:return: Distance from player to target
"""
lp = measurements.player_measurements.transform.location
lt = target.location
return np.sqrt((lp.x - lt.x) ** 2 + (lp.y - lt.y) ** 2 + (lp.z - lt.z) ** 2)
@staticmethod
def _get_control_signal(action):
"""
Convert an action index to a VehicleControl() msg
:param action: Action index
:return: VehicleControl msg
"""
control = VehicleControl()
control.reverse = False
control.steer = 0.
control.throttle = 0.
control.brake = 0.
if action == 0: # Fast forward
control.throttle = 0.5
elif action == 1: # right turn
control.steer = 0.4
control.throttle = 0.35
elif action == 2: # left turn
control.steer = -0.4
control.throttle = 0.35
elif action == 3: # reverse
control.reverse = True
control.throttle = 0.4
return control
| [
"numpy.sqrt",
"torch.max",
"psutil.virtual_memory",
"fcn.prepare_EncNet.get_encnet_resnet101_ade",
"fcn.prepare_psp.get_psp_resnet50_ade",
"sys.path.append",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.waitforbuttonpress",
"gc.enable",
"torchvision.transforms.ToTensor",
"numpy.random.choice",... | [((261, 272), 'gc.enable', 'gc.enable', ([], {}), '()\n', (270, 272), False, 'import gc\n'), ((2097, 2113), 'carla.client.VehicleControl', 'VehicleControl', ([], {}), '()\n', (2111, 2113), False, 'from carla.client import VehicleControl\n'), ((9683, 9695), 'gc.collect', 'gc.collect', ([], {}), '()\n', (9693, 9695), False, 'import gc\n'), ((13205, 13274), 'numpy.sqrt', 'np.sqrt', (['((lp.x - lt.x) ** 2 + (lp.y - lt.y) ** 2 + (lp.z - lt.z) ** 2)'], {}), '((lp.x - lt.x) ** 2 + (lp.y - lt.y) ** 2 + (lp.z - lt.z) ** 2)\n', (13212, 13274), True, 'import numpy as np\n'), ((13503, 13519), 'carla.client.VehicleControl', 'VehicleControl', ([], {}), '()\n', (13517, 13519), False, 'from carla.client import VehicleControl\n'), ((1850, 1879), 'ZGH_BRL_VBVC.Initialization.init_all', 'ZGH.Initialization.init_all', ([], {}), '()\n', (1877, 1879), True, 'import ZGH_BRL_VBVC as ZGH\n'), ((2440, 2462), 'sys.path.append', 'sys.path.append', (['"""fcn"""'], {}), "('fcn')\n", (2455, 2462), False, 'import sys\n'), ((3385, 3412), 'torch.load', 'torch.load', (['test_model_file'], {}), '(test_model_file)\n', (3395, 3412), False, 'import torch\n'), ((3949, 3972), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (3970, 3972), False, 'import psutil\n'), ((4448, 4459), 'time.time', 'time.time', ([], {}), '()\n', (4457, 4459), False, 'import time\n'), ((11721, 11830), 'ZGH_BRL_VBVC.Rewarding.get_motor_reward', 'ZGH.Rewarding.get_motor_reward', (['self.info', 'self.learner', 'measurements', 'image', '(self.old_target_dist - dist)'], {}), '(self.info, self.learner, measurements, image,\n self.old_target_dist - dist)\n', (11751, 11830), True, 'import ZGH_BRL_VBVC as ZGH\n'), ((2952, 3045), 'fcn.prepare_psp.get_psp_resnet50_ade', 'get_psp_resnet50_ade', (['pretrained', 'my_nclass', 'freeze_up_to', 'aux', 'norm_layer', 'fusion_method'], {}), '(pretrained, my_nclass, freeze_up_to, aux, norm_layer,\n fusion_method)\n', (2972, 3045), False, 'from fcn.prepare_psp import get_psp_resnet50_ade\n'), ((3265, 3326), 'fcn.prepare_EncNet.get_encnet_resnet101_ade', 'get_encnet_resnet101_ade', (['my_nclass', 'pretrained'], {'se_loss': '(True)'}), '(my_nclass, pretrained, se_loss=True)\n', (3289, 3326), False, 'from fcn.prepare_EncNet import get_encnet_resnet101_ade\n'), ((4318, 4344), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (4334, 4344), False, 'import os\n'), ((5261, 5277), 'carla.client.VehicleControl', 'VehicleControl', ([], {}), '()\n', (5275, 5277), False, 'from carla.client import VehicleControl\n'), ((5581, 5597), 'carla.client.VehicleControl', 'VehicleControl', ([], {}), '()\n', (5595, 5597), False, 'from carla.client import VehicleControl\n'), ((6288, 6312), 'numpy.random.choice', 'np.random.choice', (['[0, 3]'], {}), '([0, 3])\n', (6304, 6312), True, 'import numpy as np\n'), ((3655, 3676), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3674, 3676), True, 'import torchvision.transforms as transforms\n'), ((3694, 3769), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (3714, 3769), True, 'import torchvision.transforms as transforms\n'), ((4614, 4637), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (4635, 4637), False, 'import psutil\n'), ((9277, 9303), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.sem_image'], {}), '(self.sem_image)\n', (9287, 9303), True, 'import matplotlib.pyplot as plt\n'), ((9328, 9352), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (9350, 9352), True, 'import matplotlib.pyplot as plt\n'), ((6864, 6879), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6877, 6879), False, 'import torch\n'), ((7083, 7107), 'torch.max', 'torch.max', (['score_maps', '(1)'], {}), '(score_maps, 1)\n', (7092, 7107), False, 'import torch\n'), ((8716, 8760), 'numpy.transpose', 'np.transpose', (['self.sem_image'], {'axes': '(1, 2, 0)'}), '(self.sem_image, axes=(1, 2, 0))\n', (8728, 8760), True, 'import numpy as np\n')] |
from os.path import join
from numpy import sqrt, pi, linspace, array, zeros
from numpy.testing import assert_almost_equal
from multiprocessing import cpu_count
import pytest
from SciDataTool.Functions.Plot.plot_2D import plot_2D
from pyleecan.Classes.OPdq import OPdq
from pyleecan.Classes.Simu1 import Simu1
from pyleecan.Classes.InputCurrent import InputCurrent
from pyleecan.Classes.Electrical import Electrical
from pyleecan.Classes.EEC_PMSM import EEC_PMSM
from pyleecan.Classes.MagFEMM import MagFEMM
from pyleecan.Classes.VarLoadCurrent import VarLoadCurrent
from pyleecan.Functions.load import load
from pyleecan.Functions.Plot import dict_2D
from pyleecan.definitions import DATA_DIR
from Tests import save_validation_path as save_path
is_show_fig = False
@pytest.mark.long_5s
@pytest.mark.MagFEMM
@pytest.mark.EEC_PMSM
@pytest.mark.IPMSM
@pytest.mark.periodicity
@pytest.mark.SingleOP
def test_EEC_PMSM(nb_worker=int(0.5 * cpu_count())):
"""Validation of the PMSM Electrical Equivalent Circuit by comparing torque with MagFEMM"""
Toyota_Prius = load(join(DATA_DIR, "Machine", "Toyota_Prius.json"))
simu = Simu1(name="test_EEC_PMSM", machine=Toyota_Prius)
# Definition of the input
simu.input = InputCurrent(OP=OPdq(N0=2000), Nt_tot=8 * 16, Na_tot=2048)
simu.input.set_Id_Iq(I0=250 / sqrt(2), Phi0=60 * pi / 180)
# Definition of the magnetic simulation
simu_mag = simu.copy()
simu_mag.mag = MagFEMM(
is_periodicity_a=True, is_periodicity_t=True, nb_worker=nb_worker, T_mag=60
)
# Definition of the electrical simulation
simu.elec = Electrical()
simu.elec.eec = EEC_PMSM(
fluxlink=MagFEMM(
is_periodicity_t=True,
is_periodicity_a=True,
nb_worker=nb_worker,
T_mag=60,
),
)
out = simu.run()
out_mag = simu_mag.run()
# from Yang et al, 2013
assert out.elec.Tem_av_ref == pytest.approx(82.1, rel=0.1)
assert out_mag.mag.Tem_av == pytest.approx(82, rel=0.1)
# Plot 3-phase current function of time
if is_show_fig:
out.elec.get_Is().plot_2D_Data(
"time",
"phase[]",
# save_path=join(save_path, "EEC_FEMM_IPMSM_currents.png"),
# is_show_fig=False,
**dict_2D
)
return out
@pytest.mark.long_5s
@pytest.mark.long_1m
@pytest.mark.MagFEMM
@pytest.mark.EEC_PMSM
@pytest.mark.IPMSM
@pytest.mark.periodicity
def test_EEC_PMSM_sync_rel(nb_worker=int(0.5 * cpu_count())):
"""Validation of the PMSM Electrical Equivalent Circuit with the Prius machine
Compute Torque from EEC results and compare with Yang et al, 2013
"""
Toyota_Prius = load(join(DATA_DIR, "Machine", "Toyota_Prius.json"))
simu = Simu1(name="test_EEC_PMSM_sync_rel", machine=Toyota_Prius)
# Definition of the input
simu.input = InputCurrent(
OP=OPdq(N0=2000, Tem_av_ref=79), Nt_tot=8 * 16, Na_tot=2048
)
simu.input.set_Id_Iq(I0=250 / sqrt(2), Phi0=60 * pi / 180)
# Definition of the simulation (FEMM)
simu.elec = Electrical()
simu.elec.eec = EEC_PMSM(
fluxlink=MagFEMM(
is_periodicity_t=True,
is_periodicity_a=True,
nb_worker=nb_worker,
T_mag=60,
),
)
# Creating the Operating point matrix
Tem_av_ref = array([79, 125, 160, 192, 237, 281, 319, 343, 353, 332, 266, 164, 22])
N_simu = Tem_av_ref.size
Phi0_ref = linspace(60 * pi / 180, 180 * pi / 180, N_simu)
OP_matrix = zeros((N_simu, 4))
# Set N0 = 2000 [rpm] for all simulation
OP_matrix[:, 0] = 2000
# Set I0 = 250/sqrt(2) [A] (RMS) for all simulations
OP_matrix[:, 1] = 250 / sqrt(2)
# Set Phi0 from 60 to 180
OP_matrix[:, 2] = Phi0_ref
# Set reference torque from Yang et al, 2013
OP_matrix[:, 3] = Tem_av_ref
simu.var_simu = VarLoadCurrent(
is_torque=True, OP_matrix=OP_matrix, type_OP_matrix=0, is_keep_all_output=True
)
out = simu.run()
Tem_eec = [out_ii.elec.Tem_av_ref for out_ii in out.output_list]
Tem_sync = zeros(N_simu)
Tem_rel = zeros(N_simu)
for ii, out_ii in enumerate(out.output_list):
Tem_sync[ii], Tem_rel[ii] = out_ii.elec.eec.comp_torque_sync_rel()
Tem2 = Tem_sync + Tem_rel
assert_almost_equal(Tem_eec - Tem2, 0, decimal=12)
if is_show_fig:
plot_2D(
array([x * 180 / pi for x in out.xoutput_dict["Phi0"].result]),
[Tem_eec, Tem_av_ref],
legend_list=["Pyleecan", "Yang et al, 2013"],
xlabel="Current angle [deg]",
ylabel="Electrical torque [N.m]",
title="Electrical torque vs current angle",
**dict_2D
)
plot_2D(
array([x * 180 / pi for x in out.xoutput_dict["Phi0"].result]),
[Tem_eec, Tem_sync, Tem_rel],
legend_list=["Overall", "Synchronous", "Reluctant"],
xlabel="Current angle [deg]",
ylabel="Electrical torque [N.m]",
title="Electrical torque vs current angle",
**dict_2D
)
return out
# To run it without pytest
if __name__ == "__main__":
out = test_EEC_PMSM()
out = test_EEC_PMSM_sync_rel()
print("Done")
| [
"pytest.approx",
"numpy.sqrt",
"pyleecan.Classes.Simu1.Simu1",
"pyleecan.Classes.OPdq.OPdq",
"os.path.join",
"multiprocessing.cpu_count",
"pyleecan.Classes.Electrical.Electrical",
"pyleecan.Classes.MagFEMM.MagFEMM",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"numpy.testing.assert_almost_e... | [((1139, 1188), 'pyleecan.Classes.Simu1.Simu1', 'Simu1', ([], {'name': '"""test_EEC_PMSM"""', 'machine': 'Toyota_Prius'}), "(name='test_EEC_PMSM', machine=Toyota_Prius)\n", (1144, 1188), False, 'from pyleecan.Classes.Simu1 import Simu1\n'), ((1450, 1538), 'pyleecan.Classes.MagFEMM.MagFEMM', 'MagFEMM', ([], {'is_periodicity_a': '(True)', 'is_periodicity_t': '(True)', 'nb_worker': 'nb_worker', 'T_mag': '(60)'}), '(is_periodicity_a=True, is_periodicity_t=True, nb_worker=nb_worker,\n T_mag=60)\n', (1457, 1538), False, 'from pyleecan.Classes.MagFEMM import MagFEMM\n'), ((1612, 1624), 'pyleecan.Classes.Electrical.Electrical', 'Electrical', ([], {}), '()\n', (1622, 1624), False, 'from pyleecan.Classes.Electrical import Electrical\n'), ((2765, 2823), 'pyleecan.Classes.Simu1.Simu1', 'Simu1', ([], {'name': '"""test_EEC_PMSM_sync_rel"""', 'machine': 'Toyota_Prius'}), "(name='test_EEC_PMSM_sync_rel', machine=Toyota_Prius)\n", (2770, 2823), False, 'from pyleecan.Classes.Simu1 import Simu1\n'), ((3082, 3094), 'pyleecan.Classes.Electrical.Electrical', 'Electrical', ([], {}), '()\n', (3092, 3094), False, 'from pyleecan.Classes.Electrical import Electrical\n'), ((3353, 3423), 'numpy.array', 'array', (['[79, 125, 160, 192, 237, 281, 319, 343, 353, 332, 266, 164, 22]'], {}), '([79, 125, 160, 192, 237, 281, 319, 343, 353, 332, 266, 164, 22])\n', (3358, 3423), False, 'from numpy import sqrt, pi, linspace, array, zeros\n'), ((3468, 3515), 'numpy.linspace', 'linspace', (['(60 * pi / 180)', '(180 * pi / 180)', 'N_simu'], {}), '(60 * pi / 180, 180 * pi / 180, N_simu)\n', (3476, 3515), False, 'from numpy import sqrt, pi, linspace, array, zeros\n'), ((3532, 3550), 'numpy.zeros', 'zeros', (['(N_simu, 4)'], {}), '((N_simu, 4))\n', (3537, 3550), False, 'from numpy import sqrt, pi, linspace, array, zeros\n'), ((3880, 3978), 'pyleecan.Classes.VarLoadCurrent.VarLoadCurrent', 'VarLoadCurrent', ([], {'is_torque': '(True)', 'OP_matrix': 'OP_matrix', 'type_OP_matrix': '(0)', 'is_keep_all_output': '(True)'}), '(is_torque=True, OP_matrix=OP_matrix, type_OP_matrix=0,\n is_keep_all_output=True)\n', (3894, 3978), False, 'from pyleecan.Classes.VarLoadCurrent import VarLoadCurrent\n'), ((4097, 4110), 'numpy.zeros', 'zeros', (['N_simu'], {}), '(N_simu)\n', (4102, 4110), False, 'from numpy import sqrt, pi, linspace, array, zeros\n'), ((4125, 4138), 'numpy.zeros', 'zeros', (['N_simu'], {}), '(N_simu)\n', (4130, 4138), False, 'from numpy import sqrt, pi, linspace, array, zeros\n'), ((4299, 4349), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['(Tem_eec - Tem2)', '(0)'], {'decimal': '(12)'}), '(Tem_eec - Tem2, 0, decimal=12)\n', (4318, 4349), False, 'from numpy.testing import assert_almost_equal\n'), ((1080, 1126), 'os.path.join', 'join', (['DATA_DIR', '"""Machine"""', '"""Toyota_Prius.json"""'], {}), "(DATA_DIR, 'Machine', 'Toyota_Prius.json')\n", (1084, 1126), False, 'from os.path import join\n'), ((1937, 1965), 'pytest.approx', 'pytest.approx', (['(82.1)'], {'rel': '(0.1)'}), '(82.1, rel=0.1)\n', (1950, 1965), False, 'import pytest\n'), ((1999, 2025), 'pytest.approx', 'pytest.approx', (['(82)'], {'rel': '(0.1)'}), '(82, rel=0.1)\n', (2012, 2025), False, 'import pytest\n'), ((2706, 2752), 'os.path.join', 'join', (['DATA_DIR', '"""Machine"""', '"""Toyota_Prius.json"""'], {}), "(DATA_DIR, 'Machine', 'Toyota_Prius.json')\n", (2710, 2752), False, 'from os.path import join\n'), ((3708, 3715), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (3712, 3715), False, 'from numpy import sqrt, pi, linspace, array, zeros\n'), ((944, 955), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (953, 955), False, 'from multiprocessing import cpu_count\n'), ((1253, 1266), 'pyleecan.Classes.OPdq.OPdq', 'OPdq', ([], {'N0': '(2000)'}), '(N0=2000)\n', (1257, 1266), False, 'from pyleecan.Classes.OPdq import OPdq\n'), ((1672, 1760), 'pyleecan.Classes.MagFEMM.MagFEMM', 'MagFEMM', ([], {'is_periodicity_t': '(True)', 'is_periodicity_a': '(True)', 'nb_worker': 'nb_worker', 'T_mag': '(60)'}), '(is_periodicity_t=True, is_periodicity_a=True, nb_worker=nb_worker,\n T_mag=60)\n', (1679, 1760), False, 'from pyleecan.Classes.MagFEMM import MagFEMM\n'), ((2505, 2516), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (2514, 2516), False, 'from multiprocessing import cpu_count\n'), ((2897, 2925), 'pyleecan.Classes.OPdq.OPdq', 'OPdq', ([], {'N0': '(2000)', 'Tem_av_ref': '(79)'}), '(N0=2000, Tem_av_ref=79)\n', (2901, 2925), False, 'from pyleecan.Classes.OPdq import OPdq\n'), ((3142, 3230), 'pyleecan.Classes.MagFEMM.MagFEMM', 'MagFEMM', ([], {'is_periodicity_t': '(True)', 'is_periodicity_a': '(True)', 'nb_worker': 'nb_worker', 'T_mag': '(60)'}), '(is_periodicity_t=True, is_periodicity_a=True, nb_worker=nb_worker,\n T_mag=60)\n', (3149, 3230), False, 'from pyleecan.Classes.MagFEMM import MagFEMM\n'), ((4401, 4465), 'numpy.array', 'array', (["[(x * 180 / pi) for x in out.xoutput_dict['Phi0'].result]"], {}), "([(x * 180 / pi) for x in out.xoutput_dict['Phi0'].result])\n", (4406, 4465), False, 'from numpy import sqrt, pi, linspace, array, zeros\n'), ((4764, 4828), 'numpy.array', 'array', (["[(x * 180 / pi) for x in out.xoutput_dict['Phi0'].result]"], {}), "([(x * 180 / pi) for x in out.xoutput_dict['Phi0'].result])\n", (4769, 4828), False, 'from numpy import sqrt, pi, linspace, array, zeros\n'), ((1330, 1337), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (1334, 1337), False, 'from numpy import sqrt, pi, linspace, array, zeros\n'), ((2994, 3001), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (2998, 3001), False, 'from numpy import sqrt, pi, linspace, array, zeros\n')] |
import numpy as np
class ClusterProcessor(object):
def __init__(self, dataset):
self.dataset = dataset
self.dtype = np.float32
def __len__(self):
return self.dataset.size
def build_adj(self, node, edge):
node = list(node)
abs2rel = {}
rel2abs = {}
for i, n in enumerate(node):
abs2rel[n] = i
rel2abs[i] = n
size = len(node)
adj = np.eye(size)
for e in edge:
w = 1.
if len(e) == 2:
e1, e2 = e
elif len(e) == 3:
e1, e2, dist = e
if not self.dataset.wo_weight:
w = 1. - dist
else:
raise ValueError('Unknown length of e: {}'.format(e))
v1 = abs2rel[e1]
v2 = abs2rel[e2]
adj[v1][v2] = w
adj[v2][v1] = w
if self.dataset.is_norm_adj:
adj /= adj.sum(axis=1, keepdims=True)
return adj, abs2rel, rel2abs
def build_features(self, node):
if self.dataset.featureless:
features = np.ones(len(node)).reshape(-1, 1)
else:
features = self.dataset.features[node, :]
return features
def __getitem__(self, idx):
raise NotImplementedError
| [
"numpy.eye"
] | [((442, 454), 'numpy.eye', 'np.eye', (['size'], {}), '(size)\n', (448, 454), True, 'import numpy as np\n')] |
from pgmpy.models import MarkovModel
from pgmpy.factors.discrete import JointProbabilityDistribution, DiscreteFactor
from itertools import combinations
from flyingsquid.helpers import *
import numpy as np
import math
from tqdm import tqdm
import sys
import random
class Mixin:
'''
Functions to compute observable properties.
'''
def _compute_class_balance(self, class_balance=None, Y_dev=None):
# generate class balance of Ys
Ys_ordered = [ 'Y_{}'.format(i) for i in range(self.v) ]
cardinalities = [ 2 for i in range(self.v) ]
if class_balance is not None:
class_balance = class_balance / sum(class_balance)
cb = JointProbabilityDistribution(
Ys_ordered, cardinalities, class_balance
)
elif Y_dev is not None:
Ys_ordered = [ 'Y_{}'.format(i) for i in range(self.v) ]
vals = { Y: (-1, 1) for Y in Ys_ordered }
Y_vecs = sorted([
[ vec_dict[Y] for Y in Ys_ordered ]
for vec_dict in dict_product(vals)
])
counts = {
tuple(Y_vec): 0
for Y_vec in Y_vecs
}
for data_point in Y_dev:
counts[tuple(data_point)] += 1
cb = JointProbabilityDistribution(
Ys_ordered, cardinalities,
[
float(counts[tuple(Y_vec)]) / len(Y_dev)
for Y_vec in Y_vecs
])
else:
num_combinations = 2 ** self.v
cb = JointProbabilityDistribution(
Ys_ordered, cardinalities, [
1. / num_combinations for i in range(num_combinations)
])
return cb
def _compute_Y_marginals(self, Y_marginals):
for marginal in Y_marginals:
nodes = [ 'Y_{}'.format(idx) for idx in marginal ]
Y_marginals[marginal] = self.cb.marginal_distribution(
nodes,
inplace=False
)
return Y_marginals
def _compute_Y_equals_one(self, Y_equals_one):
# compute from class balance
for factor in Y_equals_one:
nodes = [ 'Y_{}'.format(idx) for idx in factor ]
Y_marginal = self.cb.marginal_distribution(
nodes,
inplace=False
)
vals = { Y: (-1, 1) for Y in nodes }
Y_vecs = sorted([
[ vec_dict[Y] for Y in nodes ]
for vec_dict in dict_product(vals)
])
# add up the probabilities of all the vectors whose values multiply to +1
total_prob = 0
for Y_vec in Y_vecs:
if np.prod(Y_vec) == 1:
vector_prob = Y_marginal.reduce(
[
(Y_i, Y_val if Y_val == 1 else 0)
for Y_i, Y_val in zip(nodes, Y_vec)
],
inplace=False
).values
total_prob += vector_prob
Y_equals_one[factor] = total_prob
return Y_equals_one | [
"numpy.prod",
"pgmpy.factors.discrete.JointProbabilityDistribution"
] | [((688, 758), 'pgmpy.factors.discrete.JointProbabilityDistribution', 'JointProbabilityDistribution', (['Ys_ordered', 'cardinalities', 'class_balance'], {}), '(Ys_ordered, cardinalities, class_balance)\n', (716, 758), False, 'from pgmpy.factors.discrete import JointProbabilityDistribution, DiscreteFactor\n'), ((2783, 2797), 'numpy.prod', 'np.prod', (['Y_vec'], {}), '(Y_vec)\n', (2790, 2797), True, 'import numpy as np\n')] |
import numpy as np
from hand import Hand
iterations = 250000
starting_size = 8 #inclusive
mullto = 7 #inclusive
hand = Hand("decklists/affinity.txt")
hand_types = ["t1 2-drop", "t1 3-drop"]
hand_counts = np.zeros(((starting_size + 1) - mullto,len(hand_types)))
totals = np.zeros(((starting_size + 1) - mullto,1))
zero_creatures = ["Memnite", "Ornithopter"]
zero_others = "Welding Jar"
ones = ["Signal Pest", "Vault Skirge"]
twos = ["Arcbound Ravager", "Cranial Plating", "Steel Overseer"]
threes = ["Etched Champion", "Master of Etherium"]
lands = ["Darksteel Citadel", "Spire of Industry", "Glimmervoid", "Inkmoth Nexus", "Blinkmoth Nexus", "Island"]
for i in range(iterations):
for j in range(0,(starting_size + 1) - mullto):
hand.new_hand(starting_size - j)
count_opal = hand.count_of("Mox Opal")
count_lands = hand.count_of(lands)
has_drum = hand.contains("Springleaf Drum")
count_zero_creatures = hand.count_of(zero_creatures)
count_zeros = count_zero_creatures + hand.count_of(zero_others) + hand.contains("Darksteel Citadel")
count_ones = hand.count_of(ones) + has_drum * 1
has_two = hand.contains(twos)
has_three = hand.contains(threes)
t1_opal = (count_zeros >= 2) or (has_drum and (count_zero_creatures > 0) and (count_lands > 0))
t1_pay_opal = (count_ones > 0) and (count_zeros > 0) and (count_lands > 0)
t1_mana = count_opal * t1_opal + (count_lands > 0) + ((not t1_opal) * t1_pay_opal * max((count_opal - 1),0))
# t2_accel = t1_accel or (has_drum and (count_zero_creatures > 0)) or ((count_ones > 0) and (count_zeros > 0) and (count_opal > 0)) \
results = [(t1_mana >= 2) and has_two, (t1_mana >= 3) and has_three]
hand_counts[starting_size - j - mullto,:] += results
totals[starting_size - j - mullto,:] += 1
# print(np.flip(hand_counts, axis = 0))
# print(np.flip(totals, axis = 0))
p_hands = hand_counts / totals
with open("output/affinity_output.csv","wb") as file:
file.write(str(iterations) + " iterations\n\n")
# hand size headers
file.write("hand sizes/types,")
for type in hand_types:
file.write(str(type) + ",")
#results
file.write("\n")
for size in reversed(range(len(p_hands))):
file.write(str(size + mullto) + ",")
for p in p_hands[size,:]:
file.write(str(p) + ",")
file.write("\n")
file.close()
print(np.flip(p_hands, axis = 0)) | [
"numpy.flip",
"numpy.zeros",
"hand.Hand"
] | [((120, 150), 'hand.Hand', 'Hand', (['"""decklists/affinity.txt"""'], {}), "('decklists/affinity.txt')\n", (124, 150), False, 'from hand import Hand\n'), ((271, 312), 'numpy.zeros', 'np.zeros', (['(starting_size + 1 - mullto, 1)'], {}), '((starting_size + 1 - mullto, 1))\n', (279, 312), True, 'import numpy as np\n'), ((2293, 2317), 'numpy.flip', 'np.flip', (['p_hands'], {'axis': '(0)'}), '(p_hands, axis=0)\n', (2300, 2317), True, 'import numpy as np\n')] |
from collections import Counter
from imblearn.datasets import make_imbalance
from imblearn.metrics import classification_report_imbalanced
from imblearn.pipeline import make_pipeline
from imblearn.under_sampling import ClusterCentroids
from imblearn.under_sampling import NearMiss
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sns
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
def scatter_plot_2d(x_ls, y_ls):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y_ls))])
# plot class samples
for idx, c1 in enumerate(np.unique(y_ls)):
plt.scatter(x = x_ls[y_ls == c1, 0], y = x_ls[y_ls == c1, 1],
alpha = .8, c = cmap(idx),
marker = markers[idx], label = c1)
# plt.show()
def deci_bdry_plot_2d(x_ls, y_ls, classifier, resolution = .02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y_ls))])
# plot the decision surface
x1_min, x1_max = x_ls[:, 0].min() - 1, x_ls[:, 0].max() + 1
x2_min, x2_max = x_ls[:, 1].min() - 1, x_ls[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha = .4, cmap = cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, c1 in enumerate(np.unique(y_ls)):
plt.scatter(x = x_ls[y_ls == c1, 0], y = x_ls[y_ls == c1, 1],
alpha = .8, c = cmap(idx),
marker = markers[idx], label = c1)
# plt.show()
def multi_class_under_sampling():
'''
EXAMPLE: Multiclass classification with under-sampling
'''
RANDOM_STATE = 42
iris = load_iris()
X, y = make_imbalance(iris.data, iris.target, ratio = {0:25, 1:50, 2:50}, random_state = 0)
# print (X[:, [1, 2]])
# print (type(y))
X_train, X_test, y_train, y_test = train_test_split(X[:, [1, 2]], y, random_state = RANDOM_STATE)
# print ('Training target statistics: {}'.format(Counter(y_train)))
# print ('Testing target statistics: {}'.format(Counter(y_test)))
nm = NearMiss(version = 1, random_state = RANDOM_STATE)
X_resample_nm, y_resample_nm = nm.fit_sample(X_train, y_train)
cc = ClusterCentroids(random_state = 0)
X_resample_cc, y_resample_cc = cc.fit_sample(X_train, y_train)
'''plot two in one frame'''
fig, (ax0, ax1) = plt.subplots(ncols = 2)
# ax0, ax1 = axes.flatten()
ax0 = scatter_plot_2d(X_resample_nm, y_resample_nm)
ax1 = scatter_plot_2d(X_resample_nm, y_resample_nm)
# fig.tight_layout()
plt.show()
# pipeline_nm = make_pipeline(NearMiss(version = 1, random_state = RANDOM_STATE), LinearSVC(random_state = RANDOM_STATE))
# pipeline_nm.fit(X_train, y_train)
# pipeline_cc = make_pipeline(ClusterCentroids(random_state = 0), LinearSVC(random_state = RANDOM_STATE))
# pipeline_cc.fit(X_train, y_train)
# print (classification_report_imbalanced(y_test, pipeline_nm.predict(X_test)))
# deci_bdry_plot_2d(X[:, [1, 2]], y, pipeline_nm)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.scatter_plot(X[:, [1, 2]], y, pipeline)
# print (classification_report_imbalanced(y_test, pipeline.predict(X_test)))
pipeline_1= make_pipeline(NearMiss(version = 1, random_state = RANDOM_STATE), LinearSVC(random_state = RANDOM_STATE))
pipeline_1.fit(X_train, y_train)
ax2 = fig.add_subplot(212)
ax2.scatter_plot(X[:, [1, 2]], y, pipeline_1)
plt.show()
def wendy_try_iris():
'''
EXAMPLE: Multiclass classification with under-sampling
'''
RANDOM_STATE = 42
iris = load_iris()
# X, y = make_imbalance(iris.data, iris.target, ratio = {0:25, 1:50, 2:50}, random_state = 0)
X = pd.DataFrame(iris.data, columns = ['Sepal_length', 'Sepal_width', 'Petal_length', 'Petal_width'])
y = pd.DataFrame(iris.target, columns = ['Species'])
df = X
df['Species'] = y
'''pair plot for the features'''
# sns.set(style='whitegrid', context='notebook')
# cols = ['Sepal_length', 'Sepal_width', 'Petal_length', 'Petal_width']
# sns.pairplot(df, vars = cols, size=2.5, hue = 'Species')
# plt.show()
'''dimension reduction'''
# print (classification_report_imbalanced(y_test, pipeline_cc.predict(X_test)))
# deci_bdry_plot_2d(X[:, [1, 2]], y, pipeline_cc)
if __name__ == '__main__':
wendy_try_iris() | [
"sklearn.datasets.load_iris",
"matplotlib.pyplot.contourf",
"numpy.unique",
"matplotlib.pyplot.show",
"sklearn.model_selection.train_test_split",
"sklearn.svm.LinearSVC",
"imblearn.datasets.make_imbalance",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"imblearn.under_sampling.NearMiss",
"matp... | [((1686, 1733), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx1', 'xx2', 'Z'], {'alpha': '(0.4)', 'cmap': 'cmap'}), '(xx1, xx2, Z, alpha=0.4, cmap=cmap)\n', (1698, 1733), True, 'import matplotlib.pyplot as plt\n'), ((2215, 2226), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (2224, 2226), False, 'from sklearn.datasets import load_iris\n'), ((2238, 2331), 'imblearn.datasets.make_imbalance', 'make_imbalance', (['iris.data', 'iris.target'], {'ratio': '{(0): 25, (1): 50, (2): 50}', 'random_state': '(0)'}), '(iris.data, iris.target, ratio={(0): 25, (1): 50, (2): 50},\n random_state=0)\n', (2252, 2331), False, 'from imblearn.datasets import make_imbalance\n'), ((2417, 2477), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X[:, [1, 2]]', 'y'], {'random_state': 'RANDOM_STATE'}), '(X[:, [1, 2]], y, random_state=RANDOM_STATE)\n', (2433, 2477), False, 'from sklearn.model_selection import train_test_split\n'), ((2633, 2679), 'imblearn.under_sampling.NearMiss', 'NearMiss', ([], {'version': '(1)', 'random_state': 'RANDOM_STATE'}), '(version=1, random_state=RANDOM_STATE)\n', (2641, 2679), False, 'from imblearn.under_sampling import NearMiss\n'), ((2761, 2793), 'imblearn.under_sampling.ClusterCentroids', 'ClusterCentroids', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2777, 2793), False, 'from imblearn.under_sampling import ClusterCentroids\n'), ((2922, 2943), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)'}), '(ncols=2)\n', (2934, 2943), True, 'import matplotlib.pyplot as plt\n'), ((3121, 3131), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3129, 3131), True, 'import matplotlib.pyplot as plt\n'), ((3603, 3615), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3613, 3615), True, 'import matplotlib.pyplot as plt\n'), ((4023, 4033), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4031, 4033), True, 'import matplotlib.pyplot as plt\n'), ((4166, 4177), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (4175, 4177), False, 'from sklearn.datasets import load_iris\n'), ((4284, 4383), 'pandas.DataFrame', 'pd.DataFrame', (['iris.data'], {'columns': "['Sepal_length', 'Sepal_width', 'Petal_length', 'Petal_width']"}), "(iris.data, columns=['Sepal_length', 'Sepal_width',\n 'Petal_length', 'Petal_width'])\n", (4296, 4383), True, 'import pandas as pd\n'), ((4390, 4436), 'pandas.DataFrame', 'pd.DataFrame', (['iris.target'], {'columns': "['Species']"}), "(iris.target, columns=['Species'])\n", (4402, 4436), True, 'import pandas as pd\n'), ((841, 856), 'numpy.unique', 'np.unique', (['y_ls'], {}), '(y_ls)\n', (850, 856), True, 'import numpy as np\n'), ((1508, 1545), 'numpy.arange', 'np.arange', (['x1_min', 'x1_max', 'resolution'], {}), '(x1_min, x1_max, resolution)\n', (1517, 1545), True, 'import numpy as np\n'), ((1547, 1584), 'numpy.arange', 'np.arange', (['x2_min', 'x2_max', 'resolution'], {}), '(x2_min, x2_max, resolution)\n', (1556, 1584), True, 'import numpy as np\n'), ((1862, 1877), 'numpy.unique', 'np.unique', (['y_ls'], {}), '(y_ls)\n', (1871, 1877), True, 'import numpy as np\n'), ((3807, 3853), 'imblearn.under_sampling.NearMiss', 'NearMiss', ([], {'version': '(1)', 'random_state': 'RANDOM_STATE'}), '(version=1, random_state=RANDOM_STATE)\n', (3815, 3853), False, 'from imblearn.under_sampling import NearMiss\n'), ((3859, 3895), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': 'RANDOM_STATE'}), '(random_state=RANDOM_STATE)\n', (3868, 3895), False, 'from sklearn.svm import LinearSVC\n'), ((763, 778), 'numpy.unique', 'np.unique', (['y_ls'], {}), '(y_ls)\n', (772, 778), True, 'import numpy as np\n'), ((1301, 1316), 'numpy.unique', 'np.unique', (['y_ls'], {}), '(y_ls)\n', (1310, 1316), True, 'import numpy as np\n')] |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import math
import numpy as np
import states.area
import states.face
import states.fail
import states.success
from challenge import Challenge
class NoseState:
MAXIMUM_DURATION_IN_SECONDS = 10
AREA_BOX_TOLERANCE = 0.05
NOSE_BOX_TOLERANCE = 0.55
TRAJECTORY_ERROR_THRESHOLD = 0.01
HISTOGRAM_BINS = 3
MIN_DIST = 0.10
ROTATION_THRESHOLD = 5.0
MIN_DIST_FACTOR_ROTATED = 0.75
MIN_DIST_FACTOR_NOT_ROTATED = 1.5
def __init__(self, challenge, original_frame):
self.challenge = challenge
self.image_width = challenge['imageWidth']
self.image_height = challenge['imageHeight']
# Applying tolerance
area_width_tolerance = challenge['areaWidth'] * NoseState.AREA_BOX_TOLERANCE
area_height_tolerance = challenge['areaHeight'] * NoseState.AREA_BOX_TOLERANCE
self.area_box = (challenge['areaLeft'] - area_width_tolerance,
challenge['areaTop'] - area_height_tolerance,
challenge['areaWidth'] + 2*area_width_tolerance,
challenge['areaHeight'] + 2*area_height_tolerance)
nose_width_tolerance = challenge['noseWidth'] * NoseState.NOSE_BOX_TOLERANCE
nose_height_tolerance = challenge['noseHeight'] * NoseState.NOSE_BOX_TOLERANCE
self.nose_box = (challenge['noseLeft'] - nose_width_tolerance,
challenge['noseTop'] - nose_height_tolerance,
challenge['noseWidth'] + 2*nose_width_tolerance,
challenge['noseHeight'] + 2*nose_height_tolerance)
self.challenge_in_the_right = challenge['noseLeft'] + Challenge.NOSE_BOX_SIZE/2 > self.image_width/2
self.original_frame = original_frame
self.original_landmarks = original_frame['rekMetadata'][0]['Landmarks']
self.nose_trajectory = []
def process(self, frame):
rek_metadata = frame['rekMetadata'][0]
rek_face_bbox = [
self.image_width * rek_metadata['BoundingBox']['Left'],
self.image_height * rek_metadata['BoundingBox']['Top'],
self.image_width * rek_metadata['BoundingBox']['Width'],
self.image_height * rek_metadata['BoundingBox']['Height']
]
if not states.area.AreaState.is_inside_area_box(self.area_box, rek_face_bbox):
return False
rek_landmarks = rek_metadata['Landmarks']
rek_pose = rek_metadata['Pose']
if self.is_inside_nose_box(rek_landmarks):
verified = self.verify_challenge(rek_landmarks, rek_pose, self.challenge_in_the_right)
return verified
return None
def is_inside_nose_box(self, landmarks):
for landmark in landmarks:
if landmark['Type'] == 'nose':
nose_left = self.image_width * landmark['X']
nose_top = self.image_height * landmark['Y']
self.nose_trajectory.append((landmark['X'], landmark['Y']))
return (self.nose_box[0] <= nose_left <= self.nose_box[0] + self.nose_box[2] and
self.nose_box[1] <= nose_top <= self.nose_box[1] + self.nose_box[3])
return False
def get_next_state_failure(self):
return states.fail.FailState()
def get_next_state_success(self):
return states.success.SuccessState()
def verify_challenge(self, current_landmarks, pose, challenge_in_the_right):
# Validating continuous and linear nose trajectory
nose_trajectory_x = [nose[0] for nose in self.nose_trajectory]
nose_trajectory_y = [nose[1] for nose in self.nose_trajectory]
_, residuals, _, _, _ = np.polyfit(nose_trajectory_x, nose_trajectory_y, 2, full=True)
trajectory_error = math.sqrt(residuals/len(self.nose_trajectory))
if trajectory_error > NoseState.TRAJECTORY_ERROR_THRESHOLD:
return False
# Plotting landmarks from the first frame in a histogram
original_landmarks_x = [self.image_width * landmark['X'] for landmark in self.original_landmarks]
original_landmarks_y = [self.image_height * landmark['Y'] for landmark in self.original_landmarks]
original_histogram, _, _ = np.histogram2d(original_landmarks_x,
original_landmarks_y,
bins=NoseState.HISTOGRAM_BINS)
original_histogram = np.reshape(original_histogram, NoseState.HISTOGRAM_BINS**2) / len(original_landmarks_x)
# Plotting landmarks from the last frame in a histogram
current_landmarks_x = [self.image_width * landmark['X'] for landmark in current_landmarks]
current_landmarks_y = [self.image_height * landmark['Y'] for landmark in current_landmarks]
current_histogram, _, _ = np.histogram2d(current_landmarks_x,
current_landmarks_y,
bins=NoseState.HISTOGRAM_BINS)
current_histogram = np.reshape(current_histogram, NoseState.HISTOGRAM_BINS**2) / len(current_landmarks_x)
# Calculating the Euclidean distance between histograms
dist = np.linalg.norm(original_histogram - current_histogram)
# Estimating left and right rotation
yaw = pose['Yaw']
rotated_right = yaw > NoseState.ROTATION_THRESHOLD
rotated_left = yaw < - NoseState.ROTATION_THRESHOLD
rotated_face = rotated_left or rotated_right
# Validating distance according to rotation
if (rotated_right and challenge_in_the_right) or (rotated_left and not challenge_in_the_right):
min_dist = NoseState.MIN_DIST * NoseState.MIN_DIST_FACTOR_ROTATED
elif not rotated_face:
min_dist = NoseState.MIN_DIST * NoseState.MIN_DIST_FACTOR_NOT_ROTATED
else:
return False
if dist > min_dist:
return True
return False
| [
"numpy.histogram2d",
"numpy.linalg.norm",
"numpy.reshape",
"numpy.polyfit"
] | [((3767, 3829), 'numpy.polyfit', 'np.polyfit', (['nose_trajectory_x', 'nose_trajectory_y', '(2)'], {'full': '(True)'}), '(nose_trajectory_x, nose_trajectory_y, 2, full=True)\n', (3777, 3829), True, 'import numpy as np\n'), ((4311, 4405), 'numpy.histogram2d', 'np.histogram2d', (['original_landmarks_x', 'original_landmarks_y'], {'bins': 'NoseState.HISTOGRAM_BINS'}), '(original_landmarks_x, original_landmarks_y, bins=NoseState.\n HISTOGRAM_BINS)\n', (4325, 4405), True, 'import numpy as np\n'), ((4915, 5007), 'numpy.histogram2d', 'np.histogram2d', (['current_landmarks_x', 'current_landmarks_y'], {'bins': 'NoseState.HISTOGRAM_BINS'}), '(current_landmarks_x, current_landmarks_y, bins=NoseState.\n HISTOGRAM_BINS)\n', (4929, 5007), True, 'import numpy as np\n'), ((5294, 5348), 'numpy.linalg.norm', 'np.linalg.norm', (['(original_histogram - current_histogram)'], {}), '(original_histogram - current_histogram)\n', (5308, 5348), True, 'import numpy as np\n'), ((4530, 4591), 'numpy.reshape', 'np.reshape', (['original_histogram', '(NoseState.HISTOGRAM_BINS ** 2)'], {}), '(original_histogram, NoseState.HISTOGRAM_BINS ** 2)\n', (4540, 4591), True, 'import numpy as np\n'), ((5129, 5189), 'numpy.reshape', 'np.reshape', (['current_histogram', '(NoseState.HISTOGRAM_BINS ** 2)'], {}), '(current_histogram, NoseState.HISTOGRAM_BINS ** 2)\n', (5139, 5189), True, 'import numpy as np\n')] |
# This file was generated
import array
import ctypes
import datetime
import threading
import nitclk._attributes as _attributes
import nitclk._converters as _converters
import nitclk._library_singleton as _library_singleton
import nitclk._visatype as _visatype
import nitclk.errors as errors
# Used for __repr__ and __str__
import pprint
pp = pprint.PrettyPrinter(indent=4)
_session_instance = None
_session_instance_lock = threading.Lock()
# Helper functions for creating ctypes needed for calling into the driver DLL
def get_ctypes_pointer_for_buffer(value=None, library_type=None, size=None):
if isinstance(value, array.array):
assert library_type is not None, 'library_type is required for array.array'
addr, _ = value.buffer_info()
return ctypes.cast(addr, ctypes.POINTER(library_type))
elif str(type(value)).find("'numpy.ndarray'") != -1:
import numpy
return numpy.ctypeslib.as_ctypes(value)
elif isinstance(value, list):
assert library_type is not None, 'library_type is required for list'
return (library_type * len(value))(*value)
else:
if library_type is not None and size is not None:
return (library_type * size)()
else:
return None
class SessionReference(object):
'''Properties container for NI-TClk attributes.
Note: Constructing this class is an advanced use case and should not be needed in most circumstances.
'''
# This is needed during __init__. Without it, __setattr__ raises an exception
_is_frozen = False
exported_sync_pulse_output_terminal = _attributes.AttributeViString(2)
'''Type: str
Specifies the destination of the Sync Pulse. This property is most often used when synchronizing a multichassis system.
Values
Empty string. Empty string is a valid value, indicating that the signal is not exported.
PXI Devices - 'PXI_Trig0' through 'PXI_Trig7' and device-specific settings
PCI Devices - 'RTSI_0' through 'RTSI_7' and device-specific settings
Examples of Device-Specific Settings
- NI PXI-5122 supports 'PFI0' and 'PFI1'
- NI PXI-5421 supports 'PFI0', 'PFI1', 'PFI4', and 'PFI5'
- NI PXI-6551/6552 supports 'PFI0', 'PFI1', 'PFI2', and 'PFI3'
Default Value is empty string
'''
exported_tclk_output_terminal = _attributes.AttributeViString(9)
'''Type: str
Specifies the destination of the device's TClk signal.
Values
Empty string. Empty string is a valid value, indicating that the signal is not exported.
PXI Devices - 'PXI_Trig0' through 'PXI_Trig7' and device-specific settings
PCI Devices - 'RTSI_0' through 'RTSI_7' and device-specific settings
Examples of Device-Specific Settings
- NI PXI-5122 supports 'PFI0' and 'PFI1'
- NI PXI-5421 supports 'PFI0', 'PFI1', 'PFI4', and 'PFI5'
- NI PXI-6551/6552 supports 'PFI0', 'PFI1', 'PFI2', and 'PFI3'
Default Value is empty string
'''
pause_trigger_master_session = _attributes.AttributeSessionReference(6)
'''Type: Driver Session or nitclk.SessionReference
Specifies the pause trigger master session.
For external triggers, the session that originally receives the trigger. For None (no trigger configured) or software triggers, the session that originally generates the trigger.
'''
ref_trigger_master_session = _attributes.AttributeSessionReference(4)
'''Type: Driver Session or nitclk.SessionReference
Specifies the reference trigger master session.
For external triggers, the session that originally receives the trigger. For None (no trigger configured) or software triggers, the session that originally generates the trigger.
'''
sample_clock_delay = _attributes.AttributeViReal64TimeDeltaSeconds(11)
'''Type: float in seconds or datetime.timedelta
Specifies the sample clock delay.
Specifies the delay, in seconds, to apply to the session sample clock relative to the other synchronized sessions. During synchronization, NI-TClk aligns the sample clocks on the synchronized devices. If you want to delay the sample clocks, set this property before calling synchronize.
not supported for acquisition sessions.
Values - Between minus one and plus one period of the sample clock.
One sample clock period is equal to (1/sample clock rate). For example, for a session with sample rate of 100 MS/s, you can specify sample clock delays between -10.0 ns and +10.0 ns.
Default Value is 0
Note: Sample clock delay is supported for generation sessions only; it is
'''
sequencer_flag_master_session = _attributes.AttributeSessionReference(16)
'''Type: Driver Session or nitclk.SessionReference
Specifies the sequencer flag master session.
For external triggers, the session that originally receives the trigger.
For None (no trigger configured) or software triggers, the session that
originally generates the trigger.
'''
start_trigger_master_session = _attributes.AttributeSessionReference(3)
'''Type: Driver Session or nitclk.SessionReference
Specifies the start trigger master session.
For external triggers, the session that originally receives the trigger. For None (no trigger configured) or software triggers, the session that originally generates the trigger.
'''
sync_pulse_clock_source = _attributes.AttributeViString(10)
'''Type: str
Specifies the Sync Pulse Clock source. This property is typically used to synchronize PCI devices when you want to control RTSI 7 yourself. Make sure that a 10 MHz clock is driven onto RTSI 7.
Values
PCI Devices - 'RTSI_7' and 'None'
PXI Devices - 'PXI_CLK10' and 'None'
Default Value - 'None' directs synchronize to create the necessary routes. For PCI, one of the synchronized devices drives a 10 MHz clock on RTSI 7 unless that line is already being driven.
'''
sync_pulse_sender_sync_pulse_source = _attributes.AttributeViString(13)
'''Type: str
Specifies the external sync pulse source for the Sync Pulse Sender. You can use this source to synchronize the Sync Pulse Sender with an external non-TClk source.
Values
Empty string. Empty string is a valid value, indicating that the signal is not exported.
PXI Devices - 'PXI_Trig0' through 'PXI_Trig7' and device-specific settings
PCI Devices - 'RTSI_0' through 'RTSI_7' and device-specific settings
Examples of Device-Specific Settings
- NI PXI-5122 supports 'PFI0' and 'PFI1'
- NI PXI-5421 supports 'PFI0', 'PFI1', 'PFI4', and 'PFI5'
- NI PXI-6551/6552 supports 'PFI0', 'PFI1', 'PFI2', and 'PFI3'
Default Value is empty string
'''
sync_pulse_source = _attributes.AttributeViString(1)
'''Type: str
Specifies the Sync Pulse source. This property is most often used when synchronizing a multichassis system.
Values
Empty string
PXI Devices - 'PXI_Trig0' through 'PXI_Trig7' and device-specific settings
PCI Devices - 'RTSI_0' through 'RTSI_7' and device-specific settings
Examples of Device-Specific Settings
- NI PXI-5122 supports 'PFI0' and 'PFI1'
- NI PXI-5421 supports 'PFI0', 'PFI1', 'PFI2', and 'PFI3'
- NI PXI-6551/6552 supports 'PFI0', 'PFI1', 'PFI2', and 'PFI3'
Default Value - Empty string. This default value directs synchronize to set this property when all the synchronized devices are in one PXI chassis. To synchronize a multichassis system, you must set this property before calling synchronize.
'''
tclk_actual_period = _attributes.AttributeViReal64(8)
'''Type: float
Indicates the computed TClk period that will be used during the acquisition.
'''
def __init__(self, session_number, encoding='windows-1251'):
self._session_number = session_number
self._library = _library_singleton.get()
self._encoding = encoding
# We need a self._repeated_capability string for passing down to function calls on _Library class. We just need to set it to empty string.
self._repeated_capability = ''
# Store the parameter list for later printing in __repr__
param_list = []
param_list.append("session_number=" + pp.pformat(session_number))
param_list.append("encoding=" + pp.pformat(encoding))
self._param_list = ', '.join(param_list)
self._is_frozen = True
def __repr__(self):
return '{0}.{1}({2})'.format('nitclk', self.__class__.__name__, self._param_list)
def __setattr__(self, key, value):
if self._is_frozen and key not in dir(self):
raise AttributeError("'{0}' object has no attribute '{1}'".format(type(self).__name__, key))
object.__setattr__(self, key, value)
def _get_error_description(self, error_code):
'''_get_error_description
Returns the error description.
'''
try:
'''
It is expected for _get_error to raise when the session is invalid
(IVI spec requires GetError to fail).
Use _error_message instead. It doesn't require a session.
'''
error_string = self._get_extended_error_info()
return error_string
except errors.Error:
return "Failed to retrieve error description."
def _get_tclk_session_reference(self):
return self._session_number
def _get_attribute_vi_real64(self, attribute_id):
r'''_get_attribute_vi_real64
Gets the value of an NI-TClk ViReal64 property.
Tip:
This method requires repeated capabilities. If called directly on the
nitclk.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nitclk.Session repeated capabilities container, and calling this method on the result.
Args:
attribute_id (int): The ID of the property that you want to get Supported Property
sample_clock_delay
Returns:
value (float): The value that you are getting
'''
session_ctype = _visatype.ViSession(self._session_number) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
value_ctype = _visatype.ViReal64() # case S220
error_code = self._library.niTClk_GetAttributeViReal64(session_ctype, channel_name_ctype, attribute_id_ctype, None if value_ctype is None else (ctypes.pointer(value_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return float(value_ctype.value)
def _get_attribute_vi_session(self, attribute_id):
r'''_get_attribute_vi_session
Gets the value of an NI-TClk ViSession property.
Tip:
This method requires repeated capabilities. If called directly on the
nitclk.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nitclk.Session repeated capabilities container, and calling this method on the result.
Args:
attribute_id (int): The ID of the property that you want to set Supported Properties
start_trigger_master_session
ref_trigger_master_session
script_trigger_master_session
pause_trigger_master_session
Returns:
value (int): The value that you are getting
'''
session_ctype = _visatype.ViSession(self._session_number) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
value_ctype = _visatype.ViSession() # case S220
error_code = self._library.niTClk_GetAttributeViSession(session_ctype, channel_name_ctype, attribute_id_ctype, None if value_ctype is None else (ctypes.pointer(value_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(value_ctype.value)
def _get_attribute_vi_string(self, attribute_id):
r'''_get_attribute_vi_string
This method queries the value of an NI-TClk ViString property. You
must provide a ViChar array to serve as a buffer for the value. You pass
the number of bytes in the buffer as bufSize. If the current value of
the property, including the terminating NULL byte, is larger than the
size you indicate in bufSize, the method copies bufSize minus 1 bytes
into the buffer, places an ASCII NULL byte at the end of the buffer, and
returns the array size that you must pass to get the entire value. For
example, if the value is "123456" and bufSize is 4, the method places
"123" into the buffer and returns 7. If you want to call
_get_attribute_vi_string just to get the required array size, pass 0
for bufSize and VI_NULL for the value.
Tip:
This method requires repeated capabilities. If called directly on the
nitclk.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nitclk.Session repeated capabilities container, and calling this method on the result.
Args:
attribute_id (int): The ID of the property that you want to get Supported Properties
sync_pulse_source
sync_pulse_clock_source
exported_sync_pulse_output_terminal
Returns:
value (str): The value that you are getting
'''
session_ctype = _visatype.ViSession(self._session_number) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
buf_size_ctype = _visatype.ViInt32() # case S170
value_ctype = None # case C050
error_code = self._library.niTClk_GetAttributeViString(session_ctype, channel_name_ctype, attribute_id_ctype, buf_size_ctype, value_ctype)
errors.handle_error(self, error_code, ignore_warnings=True, is_error_handling=False)
buf_size_ctype = _visatype.ViInt32(error_code) # case S180
value_ctype = (_visatype.ViChar * buf_size_ctype.value)() # case C060
error_code = self._library.niTClk_GetAttributeViString(session_ctype, channel_name_ctype, attribute_id_ctype, buf_size_ctype, value_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return value_ctype.value.decode(self._encoding)
def _get_extended_error_info(self):
r'''_get_extended_error_info
Reports extended error information for the most recent NI-TClk method
that returned an error. To establish the method that returned an
error, use the return values of the individual methods because once
_get_extended_error_info reports an errorString, it does not report
an empty string again.
Returns:
error_string (str): Extended error description. If errorString is NULL, then it is not large
enough to hold the entire error description. In this case, the return
value of _get_extended_error_info is the size that you should use
for _get_extended_error_info to return the full error string.
'''
error_string_ctype = None # case C050
error_string_size_ctype = _visatype.ViUInt32() # case S170
error_code = self._library.niTClk_GetExtendedErrorInfo(error_string_ctype, error_string_size_ctype)
errors.handle_error(self, error_code, ignore_warnings=True, is_error_handling=True)
error_string_size_ctype = _visatype.ViUInt32(error_code) # case S180
error_string_ctype = (_visatype.ViChar * error_string_size_ctype.value)() # case C060
error_code = self._library.niTClk_GetExtendedErrorInfo(error_string_ctype, error_string_size_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=True)
return error_string_ctype.value.decode(self._encoding)
def _set_attribute_vi_real64(self, attribute_id, value):
r'''_set_attribute_vi_real64
Sets the value of an NI-TClk VIReal64 property.
_set_attribute_vi_real64 is a low-level method that you can use to
set the values NI-TClk properties. NI-TClk contains high-level methods
that set most of the properties. It is best to use the high-level
methods as much as possible.
Tip:
This method requires repeated capabilities. If called directly on the
nitclk.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nitclk.Session repeated capabilities container, and calling this method on the result.
Args:
attribute_id (int): The ID of the property that you want to set Supported Property
sample_clock_delay
value (float): The value for the property
'''
session_ctype = _visatype.ViSession(self._session_number) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
value_ctype = _visatype.ViReal64(value) # case S150
error_code = self._library.niTClk_SetAttributeViReal64(session_ctype, channel_name_ctype, attribute_id_ctype, value_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
def _set_attribute_vi_session(self, attribute_id, value):
r'''_set_attribute_vi_session
Sets the value of an NI-TClk ViSession property.
_set_attribute_vi_session is a low-level method that you can use
to set the values NI-TClk properties. NI-TClk contains high-level
methods that set most of the properties. It is best to use the
high-level methods as much as possible.
Tip:
This method requires repeated capabilities. If called directly on the
nitclk.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nitclk.Session repeated capabilities container, and calling this method on the result.
Args:
attribute_id (int): The ID of the property that you want to set Supported Properties
start_trigger_master_session
ref_trigger_master_session
script_trigger_master_session
pause_trigger_master_session
value (int): The value for the property
'''
session_ctype = _visatype.ViSession(self._session_number) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
value_ctype = _visatype.ViSession(value) # case S150
error_code = self._library.niTClk_SetAttributeViSession(session_ctype, channel_name_ctype, attribute_id_ctype, value_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
def _set_attribute_vi_string(self, attribute_id, value):
r'''_set_attribute_vi_string
Sets the value of an NI-TClk VIString property.
_set_attribute_vi_string is a low-level method that you can use to
set the values of NI-TClk properties. NI-TClk contain high-level
methods that set most of the properties. It is best to use the
high-level methods as much as possible.
Tip:
This method requires repeated capabilities. If called directly on the
nitclk.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nitclk.Session repeated capabilities container, and calling this method on the result.
Args:
attribute_id (int): Pass the ID of the property that you want to set Supported Properties
sync_pulse_source
sync_pulse_clock_source
exported_sync_pulse_output_terminal
value (str): Pass the value for the property
'''
session_ctype = _visatype.ViSession(self._session_number) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
value_ctype = ctypes.create_string_buffer(value.encode(self._encoding)) # case C020
error_code = self._library.niTClk_SetAttributeViString(session_ctype, channel_name_ctype, attribute_id_ctype, value_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
class _Session(object):
'''Private class
This class allows reusing function templates that are used in all other drivers. If
we don't do this, we would need new template(s) that the only difference is in the
indentation.
'''
def __init__(self):
self._library = _library_singleton.get()
self._encoding = 'windows-1251'
# Instantiate any repeated capability objects
# Store the parameter list for later printing in __repr__
param_list = []
self._param_list = ', '.join(param_list)
self._is_frozen = True
def _get_error_description(self, error_code):
'''_get_error_description
Returns the error description.
'''
try:
'''
It is expected for _get_error to raise when the session is invalid
(IVI spec requires GetError to fail).
Use _error_message instead. It doesn't require a session.
'''
error_string = self._get_extended_error_info()
return error_string
except errors.Error:
return "Failed to retrieve error description."
''' These are code-generated '''
def configure_for_homogeneous_triggers(self, sessions):
r'''configure_for_homogeneous_triggers
Configures the properties commonly required for the TClk synchronization
of device sessions with homogeneous triggers in a single PXI chassis or
a single PC. Use configure_for_homogeneous_triggers to configure
the properties for the reference clocks, start triggers, reference
triggers, script triggers, and pause triggers. If
configure_for_homogeneous_triggers cannot perform all the steps
appropriate for the given sessions, it returns an error. If an error is
returned, use the instrument driver methods and properties for signal
routing, along with the following NI-TClk properties:
start_trigger_master_session
ref_trigger_master_session
script_trigger_master_session
pause_trigger_master_session
configure_for_homogeneous_triggers affects the following clocks and
triggers: - Reference clocks - Start triggers - Reference triggers -
Script triggers - Pause triggers Reference Clocks
configure_for_homogeneous_triggers configures the reference clocks
if they are needed. Specifically, if the internal sample clocks or
internal sample clock timebases are used, and the reference clock source
is not configured--or is set to None (no trigger
configured)--configure_for_homogeneous_triggers configures the
following: PXI--The reference clock source on all devices is set to be
the 10 MHz PXI backplane clock (PXI_CLK10). PCI--One of the devices
exports its 10 MHz onboard reference clock to RTSI 7. The reference
clock source on all devices is set to be RTSI 7. Note: If the reference
clock source is set to a value other than None,
configure_for_homogeneous_triggers cannot configure the reference
clock source. Start Triggers If the start trigger is set to None (no
trigger configured) for all sessions, the sessions are configured to
share the start trigger. The start trigger is shared by: - Implicitly
exporting the start trigger from one session - Configuring the other
sessions for digital edge start triggers with sources corresponding to
the exported start trigger - Setting
start_trigger_master_session to the session that is
exporting the trigger for all sessions If the start triggers are None
for all except one session, configure_for_homogeneous_triggers
configures the sessions to share the start trigger from the one excepted
session. The start trigger is shared by: - Implicitly exporting start
trigger from the session with the start trigger that is not None -
Configuring the other sessions for digital-edge start triggers with
sources corresponding to the exported start trigger - Setting
start_trigger_master_session to the session that is
exporting the trigger for all sessions If start triggers are configured
for all sessions, configure_for_homogeneous_triggers does not
affect the start triggers. Start triggers are considered to be
configured for all sessions if either of the following conditions is
true: - No session has a start trigger that is None - One session has a
start trigger that is None, and all other sessions have start triggers
other than None. The one session with the None trigger must have
start_trigger_master_session set to itself, indicating
that the session itself is the start trigger master Reference Triggers
configure_for_homogeneous_triggers configures sessions that support
reference triggers to share the reference triggers if the reference
triggers are None (no trigger configured) for all except one session.
The reference triggers are shared by: - Implicitly exporting the
reference trigger from the session whose reference trigger is not None -
Configuring the other sessions that support the reference trigger for
digital-edge reference triggers with sources corresponding to the
exported reference trigger - Setting
ref_trigger_master_session to the session that is
exporting the trigger for all sessions that support reference trigger If
the reference triggers are configured for all sessions that support
reference triggers, configure_for_homogeneous_triggers does not
affect the reference triggers. Reference triggers are considered to be
configured for all sessions if either one or the other of the following
conditions is true: - No session has a reference trigger that is None -
One session has a reference trigger that is None, and all other sessions
have reference triggers other than None. The one session with the None
trigger must have ref_trigger_master_session set to
itself, indicating that the session itself is the reference trigger
master Reference Trigger Holdoffs Acquisition sessions may be configured
with the reference trigger. For acquisition sessions, when the reference
trigger is shared, configure_for_homogeneous_triggers configures
the holdoff properties (which are instrument driver specific) on the
reference trigger master session so that the session does not recognize
the reference trigger before the other sessions are ready. This
condition is only relevant when the sample clock rates, sample clock
timebase rates, sample counts, holdoffs, and/or any delays for the
acquisitions are different. When the sample clock rates, sample clock
timebase rates, and/or the sample counts are different in acquisition
sessions sharing the reference trigger, you should also set the holdoff
properties for the reference trigger master using the instrument driver.
Script Triggers configure_for_homogeneous_triggers configures
sessions that support script triggers to share them, if the script
triggers are None (no trigger configured) for all except one session.
The script triggers are shared in the following ways: - Implicitly
exporting the script trigger from the session whose script trigger is
not None - Configuring the other sessions that support the script
trigger for digital-edge script triggers with sources corresponding to
the exported script trigger - Setting
script_trigger_master_session to the session that is
exporting the trigger for all sessions that support script triggers If
the script triggers are configured for all sessions that support script
triggers, configure_for_homogeneous_triggers does not affect script
triggers. Script triggers are considered to be configured for all
sessions if either one or the other of the following conditions are
true: - No session has a script trigger that is None - One session has a
script trigger that is None and all other sessions have script triggers
other than None. The one session with the None trigger must have
script_trigger_master_session set to itself, indicating
that the session itself is the script trigger master Pause Triggers
configure_for_homogeneous_triggers configures generation sessions
that support pause triggers to share them, if the pause triggers are
None (no trigger configured) for all except one session. The pause
triggers are shared by: - Implicitly exporting the pause trigger from
the session whose script trigger is not None - Configuring the other
sessions that support the pause trigger for digital-edge pause triggers
with sources corresponding to the exported pause trigger - Setting
pause_trigger_master_session to the session that is
exporting the trigger for all sessions that support script triggers If
the pause triggers are configured for all generation sessions that
support pause triggers, configure_for_homogeneous_triggers does not
affect pause triggers. Pause triggers are considered to be configured
for all sessions if either one or the other of the following conditions
is true: - No session has a pause trigger that is None - One session has
a pause trigger that is None and all other sessions have pause triggers
other than None. The one session with the None trigger must have
pause_trigger_master_session set to itself, indicating
that the session itself is the pause trigger master Note: TClk
synchronization is not supported for pause triggers on acquisition
sessions.
Args:
sessions (list of (Driver Session or nitclk.SessionReference)): sessions is an array of sessions that are being synchronized.
'''
session_count_ctype = _visatype.ViUInt32(0 if sessions is None else len(sessions)) # case S160
sessions_ctype = get_ctypes_pointer_for_buffer(value=_converters.convert_to_nitclk_session_number_list(sessions), library_type=_visatype.ViSession) # case B520
error_code = self._library.niTClk_ConfigureForHomogeneousTriggers(session_count_ctype, sessions_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
def finish_sync_pulse_sender_synchronize(self, sessions, min_time=datetime.timedelta(seconds=0.0)):
r'''finish_sync_pulse_sender_synchronize
Finishes synchronizing the Sync Pulse Sender.
Args:
sessions (list of (nimi-python Session class or nitclk.SessionReference)): sessions is an array of sessions that are being synchronized.
min_time (float in seconds or datetime.timedelta): Minimal period of TClk, expressed in seconds. Supported values are
between 0.0 s and 0.050 s (50 ms). Minimal period for a single
chassis/PC is 200 ns. If the specified value is less than 200 ns,
NI-TClk automatically coerces minTime to 200 ns. For multichassis
synchronization, adjust this value to account for propagation delays
through the various devices and cables.
'''
session_count_ctype = _visatype.ViUInt32(0 if sessions is None else len(sessions)) # case S160
sessions_ctype = get_ctypes_pointer_for_buffer(value=_converters.convert_to_nitclk_session_number_list(sessions), library_type=_visatype.ViSession) # case B520
min_time_ctype = _converters.convert_timedelta_to_seconds_real64(min_time) # case S140
error_code = self._library.niTClk_FinishSyncPulseSenderSynchronize(session_count_ctype, sessions_ctype, min_time_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
def _get_extended_error_info(self):
r'''_get_extended_error_info
Reports extended error information for the most recent NI-TClk method
that returned an error. To establish the method that returned an
error, use the return values of the individual methods because once
_get_extended_error_info reports an errorString, it does not report
an empty string again.
Returns:
error_string (str): Extended error description. If errorString is NULL, then it is not large
enough to hold the entire error description. In this case, the return
value of _get_extended_error_info is the size that you should use
for _get_extended_error_info to return the full error string.
'''
error_string_ctype = None # case C050
error_string_size_ctype = _visatype.ViUInt32() # case S170
error_code = self._library.niTClk_GetExtendedErrorInfo(error_string_ctype, error_string_size_ctype)
errors.handle_error(self, error_code, ignore_warnings=True, is_error_handling=True)
error_string_size_ctype = _visatype.ViUInt32(error_code) # case S180
error_string_ctype = (_visatype.ViChar * error_string_size_ctype.value)() # case C060
error_code = self._library.niTClk_GetExtendedErrorInfo(error_string_ctype, error_string_size_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=True)
return error_string_ctype.value.decode(self._encoding)
def initiate(self, sessions):
r'''initiate
Initiates the acquisition or generation sessions specified, taking into
consideration any special requirements needed for synchronization. For
example, the session exporting the TClk-synchronized start trigger is
not initiated until after initiate initiates all the sessions
that import the TClk-synchronized start trigger.
Args:
sessions (list of (Driver Session or nitclk.SessionReference)): sessions is an array of sessions that are being synchronized.
'''
session_count_ctype = _visatype.ViUInt32(0 if sessions is None else len(sessions)) # case S160
sessions_ctype = get_ctypes_pointer_for_buffer(value=_converters.convert_to_nitclk_session_number_list(sessions), library_type=_visatype.ViSession) # case B520
error_code = self._library.niTClk_Initiate(session_count_ctype, sessions_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
def is_done(self, sessions):
r'''is_done
Monitors the progress of the acquisitions and/or generations
corresponding to sessions.
Args:
sessions (list of (Driver Session or nitclk.SessionReference)): sessions is an array of sessions that are being synchronized.
Returns:
done (bool): Indicates that the operation is done. The operation is done when each
session has completed without any errors or when any one of the sessions
reports an error.
'''
session_count_ctype = _visatype.ViUInt32(0 if sessions is None else len(sessions)) # case S160
sessions_ctype = get_ctypes_pointer_for_buffer(value=_converters.convert_to_nitclk_session_number_list(sessions), library_type=_visatype.ViSession) # case B520
done_ctype = _visatype.ViBoolean() # case S220
error_code = self._library.niTClk_IsDone(session_count_ctype, sessions_ctype, None if done_ctype is None else (ctypes.pointer(done_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return bool(done_ctype.value)
def setup_for_sync_pulse_sender_synchronize(self, sessions, min_time=datetime.timedelta(seconds=0.0)):
r'''setup_for_sync_pulse_sender_synchronize
Configures the TClks on all the devices and prepares the Sync Pulse Sender for synchronization
Args:
sessions (list of (Driver Session or nitclk.SessionReference)): sessions is an array of sessions that are being synchronized.
min_time (float in seconds or datetime.timedelta): Minimal period of TClk, expressed in seconds. Supported values are
between 0.0 s and 0.050 s (50 ms). Minimal period for a single
chassis/PC is 200 ns. If the specified value is less than 200 ns,
NI-TClk automatically coerces minTime to 200 ns. For multichassis
synchronization, adjust this value to account for propagation delays
through the various devices and cables.
'''
session_count_ctype = _visatype.ViUInt32(0 if sessions is None else len(sessions)) # case S160
sessions_ctype = get_ctypes_pointer_for_buffer(value=_converters.convert_to_nitclk_session_number_list(sessions), library_type=_visatype.ViSession) # case B520
min_time_ctype = _converters.convert_timedelta_to_seconds_real64(min_time) # case S140
error_code = self._library.niTClk_SetupForSyncPulseSenderSynchronize(session_count_ctype, sessions_ctype, min_time_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
def synchronize(self, sessions, min_tclk_period=datetime.timedelta(seconds=0.0)):
r'''synchronize
Synchronizes the TClk signals on the given sessions. After
synchronize executes, TClk signals from all sessions are
synchronized. Note: Before using this NI-TClk method, verify that your
system is configured as specified in the PXI Trigger Lines and RTSI
Lines topic of the NI-TClk Synchronization Help. You can locate this
help file at Start>>Programs>>National Instruments>>NI-TClk.
Args:
sessions (list of (Driver Session or nitclk.SessionReference)): sessions is an array of sessions that are being synchronized.
min_tclk_period (float in seconds or datetime.timedelta): Minimal period of TClk, expressed in seconds. Supported values are
between 0.0 s and 0.050 s (50 ms). Minimal period for a single
chassis/PC is 200 ns. If the specified value is less than 200 ns,
NI-TClk automatically coerces minTime to 200 ns. For multichassis
synchronization, adjust this value to account for propagation delays
through the various devices and cables.
'''
session_count_ctype = _visatype.ViUInt32(0 if sessions is None else len(sessions)) # case S160
sessions_ctype = get_ctypes_pointer_for_buffer(value=_converters.convert_to_nitclk_session_number_list(sessions), library_type=_visatype.ViSession) # case B520
min_tclk_period_ctype = _converters.convert_timedelta_to_seconds_real64(min_tclk_period) # case S140
error_code = self._library.niTClk_Synchronize(session_count_ctype, sessions_ctype, min_tclk_period_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
def synchronize_to_sync_pulse_sender(self, sessions, min_time=datetime.timedelta(seconds=0.0)):
r'''synchronize_to_sync_pulse_sender
Synchronizes the other devices to the Sync Pulse Sender.
Args:
sessions (list of (Driver Session or nitclk.SessionReference)): sessions is an array of sessions that are being synchronized.
min_time (float in seconds or datetime.timedelta): Minimal period of TClk, expressed in seconds. Supported values are
between 0.0 s and 0.050 s (50 ms). Minimal period for a single
chassis/PC is 200 ns. If the specified value is less than 200 ns,
NI-TClk automatically coerces minTime to 200 ns. For multichassis
synchronization, adjust this value to account for propagation delays
through the various devices and cables.
'''
session_count_ctype = _visatype.ViUInt32(0 if sessions is None else len(sessions)) # case S160
sessions_ctype = get_ctypes_pointer_for_buffer(value=_converters.convert_to_nitclk_session_number_list(sessions), library_type=_visatype.ViSession) # case B520
min_time_ctype = _converters.convert_timedelta_to_seconds_real64(min_time) # case S140
error_code = self._library.niTClk_SynchronizeToSyncPulseSender(session_count_ctype, sessions_ctype, min_time_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
def wait_until_done(self, sessions, timeout=datetime.timedelta(seconds=0.0)):
r'''wait_until_done
Call this method to pause execution of your program until the
acquisitions and/or generations corresponding to sessions are done or
until the method returns a timeout error. wait_until_done is a
blocking method that periodically checks the operation status. It
returns control to the calling program if the operation completes
successfully or an error occurs (including a timeout error). This
method is most useful for finite data operations that you expect to
complete within a certain time.
Args:
sessions (list of (Driver Session or nitclk.SessionReference)): sessions is an array of sessions that are being synchronized.
timeout (float in seconds or datetime.timedelta): The amount of time in seconds that wait_until_done waits for the
sessions to complete. If timeout is exceeded, wait_until_done
returns an error.
'''
session_count_ctype = _visatype.ViUInt32(0 if sessions is None else len(sessions)) # case S160
sessions_ctype = get_ctypes_pointer_for_buffer(value=_converters.convert_to_nitclk_session_number_list(sessions), library_type=_visatype.ViSession) # case B520
timeout_ctype = _converters.convert_timedelta_to_seconds_real64(timeout) # case S140
error_code = self._library.niTClk_WaitUntilDone(session_count_ctype, sessions_ctype, timeout_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
def configure_for_homogeneous_triggers(sessions):
'''configure_for_homogeneous_triggers
Configures the properties commonly required for the TClk synchronization
of device sessions with homogeneous triggers in a single PXI chassis or
a single PC. Use configure_for_homogeneous_triggers to configure
the properties for the reference clocks, start triggers, reference
triggers, script triggers, and pause triggers. If
configure_for_homogeneous_triggers cannot perform all the steps
appropriate for the given sessions, it returns an error. If an error is
returned, use the instrument driver methods and properties for signal
routing, along with the following NI-TClk properties:
start_trigger_master_session
ref_trigger_master_session
script_trigger_master_session
pause_trigger_master_session
configure_for_homogeneous_triggers affects the following clocks and
triggers: - Reference clocks - Start triggers - Reference triggers -
Script triggers - Pause triggers Reference Clocks
configure_for_homogeneous_triggers configures the reference clocks
if they are needed. Specifically, if the internal sample clocks or
internal sample clock timebases are used, and the reference clock source
is not configured--or is set to None (no trigger
configured)--configure_for_homogeneous_triggers configures the
following: PXI--The reference clock source on all devices is set to be
the 10 MHz PXI backplane clock (PXI_CLK10). PCI--One of the devices
exports its 10 MHz onboard reference clock to RTSI 7. The reference
clock source on all devices is set to be RTSI 7. Note: If the reference
clock source is set to a value other than None,
configure_for_homogeneous_triggers cannot configure the reference
clock source. Start Triggers If the start trigger is set to None (no
trigger configured) for all sessions, the sessions are configured to
share the start trigger. The start trigger is shared by: - Implicitly
exporting the start trigger from one session - Configuring the other
sessions for digital edge start triggers with sources corresponding to
the exported start trigger - Setting
start_trigger_master_session to the session that is
exporting the trigger for all sessions If the start triggers are None
for all except one session, configure_for_homogeneous_triggers
configures the sessions to share the start trigger from the one excepted
session. The start trigger is shared by: - Implicitly exporting start
trigger from the session with the start trigger that is not None -
Configuring the other sessions for digital-edge start triggers with
sources corresponding to the exported start trigger - Setting
start_trigger_master_session to the session that is
exporting the trigger for all sessions If start triggers are configured
for all sessions, configure_for_homogeneous_triggers does not
affect the start triggers. Start triggers are considered to be
configured for all sessions if either of the following conditions is
true: - No session has a start trigger that is None - One session has a
start trigger that is None, and all other sessions have start triggers
other than None. The one session with the None trigger must have
start_trigger_master_session set to itself, indicating
that the session itself is the start trigger master Reference Triggers
configure_for_homogeneous_triggers configures sessions that support
reference triggers to share the reference triggers if the reference
triggers are None (no trigger configured) for all except one session.
The reference triggers are shared by: - Implicitly exporting the
reference trigger from the session whose reference trigger is not None -
Configuring the other sessions that support the reference trigger for
digital-edge reference triggers with sources corresponding to the
exported reference trigger - Setting
ref_trigger_master_session to the session that is
exporting the trigger for all sessions that support reference trigger If
the reference triggers are configured for all sessions that support
reference triggers, configure_for_homogeneous_triggers does not
affect the reference triggers. Reference triggers are considered to be
configured for all sessions if either one or the other of the following
conditions is true: - No session has a reference trigger that is None -
One session has a reference trigger that is None, and all other sessions
have reference triggers other than None. The one session with the None
trigger must have ref_trigger_master_session set to
itself, indicating that the session itself is the reference trigger
master Reference Trigger Holdoffs Acquisition sessions may be configured
with the reference trigger. For acquisition sessions, when the reference
trigger is shared, configure_for_homogeneous_triggers configures
the holdoff properties (which are instrument driver specific) on the
reference trigger master session so that the session does not recognize
the reference trigger before the other sessions are ready. This
condition is only relevant when the sample clock rates, sample clock
timebase rates, sample counts, holdoffs, and/or any delays for the
acquisitions are different. When the sample clock rates, sample clock
timebase rates, and/or the sample counts are different in acquisition
sessions sharing the reference trigger, you should also set the holdoff
properties for the reference trigger master using the instrument driver.
Script Triggers configure_for_homogeneous_triggers configures
sessions that support script triggers to share them, if the script
triggers are None (no trigger configured) for all except one session.
The script triggers are shared in the following ways: - Implicitly
exporting the script trigger from the session whose script trigger is
not None - Configuring the other sessions that support the script
trigger for digital-edge script triggers with sources corresponding to
the exported script trigger - Setting
script_trigger_master_session to the session that is
exporting the trigger for all sessions that support script triggers If
the script triggers are configured for all sessions that support script
triggers, configure_for_homogeneous_triggers does not affect script
triggers. Script triggers are considered to be configured for all
sessions if either one or the other of the following conditions are
true: - No session has a script trigger that is None - One session has a
script trigger that is None and all other sessions have script triggers
other than None. The one session with the None trigger must have
script_trigger_master_session set to itself, indicating
that the session itself is the script trigger master Pause Triggers
configure_for_homogeneous_triggers configures generation sessions
that support pause triggers to share them, if the pause triggers are
None (no trigger configured) for all except one session. The pause
triggers are shared by: - Implicitly exporting the pause trigger from
the session whose script trigger is not None - Configuring the other
sessions that support the pause trigger for digital-edge pause triggers
with sources corresponding to the exported pause trigger - Setting
pause_trigger_master_session to the session that is
exporting the trigger for all sessions that support script triggers If
the pause triggers are configured for all generation sessions that
support pause triggers, configure_for_homogeneous_triggers does not
affect pause triggers. Pause triggers are considered to be configured
for all sessions if either one or the other of the following conditions
is true: - No session has a pause trigger that is None - One session has
a pause trigger that is None and all other sessions have pause triggers
other than None. The one session with the None trigger must have
pause_trigger_master_session set to itself, indicating
that the session itself is the pause trigger master Note: TClk
synchronization is not supported for pause triggers on acquisition
sessions.
Args:
sessions (list of (Driver Session or nitclk.SessionReference)): sessions is an array of sessions that are being synchronized.
'''
return _Session().configure_for_homogeneous_triggers(sessions)
def finish_sync_pulse_sender_synchronize(sessions, min_time):
'''finish_sync_pulse_sender_synchronize
Finishes synchronizing the Sync Pulse Sender.
Args:
sessions (list of (nimi-python Session class or nitclk.SessionReference)): sessions is an array of sessions that are being synchronized.
min_time (float in seconds or datetime.timedelta): Minimal period of TClk, expressed in seconds. Supported values are
between 0.0 s and 0.050 s (50 ms). Minimal period for a single
chassis/PC is 200 ns. If the specified value is less than 200 ns,
NI-TClk automatically coerces minTime to 200 ns. For multichassis
synchronization, adjust this value to account for propagation delays
through the various devices and cables.
'''
return _Session().finish_sync_pulse_sender_synchronize(sessions, min_time)
def initiate(sessions):
'''initiate
Initiates the acquisition or generation sessions specified, taking into
consideration any special requirements needed for synchronization. For
example, the session exporting the TClk-synchronized start trigger is
not initiated until after initiate initiates all the sessions
that import the TClk-synchronized start trigger.
Args:
sessions (list of (Driver Session or nitclk.SessionReference)): sessions is an array of sessions that are being synchronized.
'''
return _Session().initiate(sessions)
def is_done(sessions):
'''is_done
Monitors the progress of the acquisitions and/or generations
corresponding to sessions.
Args:
sessions (list of (Driver Session or nitclk.SessionReference)): sessions is an array of sessions that are being synchronized.
Returns:
done (bool): Indicates that the operation is done. The operation is done when each
session has completed without any errors or when any one of the sessions
reports an error.
'''
return _Session().is_done(sessions)
def setup_for_sync_pulse_sender_synchronize(sessions, min_time):
'''setup_for_sync_pulse_sender_synchronize
Configures the TClks on all the devices and prepares the Sync Pulse Sender for synchronization
Args:
sessions (list of (Driver Session or nitclk.SessionReference)): sessions is an array of sessions that are being synchronized.
min_time (float in seconds or datetime.timedelta): Minimal period of TClk, expressed in seconds. Supported values are
between 0.0 s and 0.050 s (50 ms). Minimal period for a single
chassis/PC is 200 ns. If the specified value is less than 200 ns,
NI-TClk automatically coerces minTime to 200 ns. For multichassis
synchronization, adjust this value to account for propagation delays
through the various devices and cables.
'''
return _Session().setup_for_sync_pulse_sender_synchronize(sessions, min_time)
def synchronize(sessions, min_tclk_period):
'''synchronize
Synchronizes the TClk signals on the given sessions. After
synchronize executes, TClk signals from all sessions are
synchronized. Note: Before using this NI-TClk method, verify that your
system is configured as specified in the PXI Trigger Lines and RTSI
Lines topic of the NI-TClk Synchronization Help. You can locate this
help file at Start>>Programs>>National Instruments>>NI-TClk.
Args:
sessions (list of (Driver Session or nitclk.SessionReference)): sessions is an array of sessions that are being synchronized.
min_tclk_period (float in seconds or datetime.timedelta): Minimal period of TClk, expressed in seconds. Supported values are
between 0.0 s and 0.050 s (50 ms). Minimal period for a single
chassis/PC is 200 ns. If the specified value is less than 200 ns,
NI-TClk automatically coerces minTime to 200 ns. For multichassis
synchronization, adjust this value to account for propagation delays
through the various devices and cables.
'''
return _Session().synchronize(sessions, min_tclk_period)
def synchronize_to_sync_pulse_sender(sessions, min_time):
'''synchronize_to_sync_pulse_sender
Synchronizes the other devices to the Sync Pulse Sender.
Args:
sessions (list of (Driver Session or nitclk.SessionReference)): sessions is an array of sessions that are being synchronized.
min_time (float in seconds or datetime.timedelta): Minimal period of TClk, expressed in seconds. Supported values are
between 0.0 s and 0.050 s (50 ms). Minimal period for a single
chassis/PC is 200 ns. If the specified value is less than 200 ns,
NI-TClk automatically coerces minTime to 200 ns. For multichassis
synchronization, adjust this value to account for propagation delays
through the various devices and cables.
'''
return _Session().synchronize_to_sync_pulse_sender(sessions, min_time)
def wait_until_done(sessions, timeout):
'''wait_until_done
Call this method to pause execution of your program until the
acquisitions and/or generations corresponding to sessions are done or
until the method returns a timeout error. wait_until_done is a
blocking method that periodically checks the operation status. It
returns control to the calling program if the operation completes
successfully or an error occurs (including a timeout error). This
method is most useful for finite data operations that you expect to
complete within a certain time.
Args:
sessions (list of (Driver Session or nitclk.SessionReference)): sessions is an array of sessions that are being synchronized.
timeout (float in seconds or datetime.timedelta): The amount of time in seconds that wait_until_done waits for the
sessions to complete. If timeout is exceeded, wait_until_done
returns an error.
'''
return _Session().wait_until_done(sessions, timeout)
| [
"nitclk._visatype.ViBoolean",
"ctypes.pointer",
"datetime.timedelta",
"nitclk._visatype.ViAttr",
"nitclk.errors.handle_error",
"threading.Lock",
"pprint.PrettyPrinter",
"nitclk._attributes.AttributeViReal64",
"nitclk._converters.convert_timedelta_to_seconds_real64",
"nitclk._visatype.ViSession",
... | [((345, 375), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (365, 375), False, 'import pprint\n'), ((427, 443), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (441, 443), False, 'import threading\n'), ((1612, 1644), 'nitclk._attributes.AttributeViString', '_attributes.AttributeViString', (['(2)'], {}), '(2)\n', (1641, 1644), True, 'import nitclk._attributes as _attributes\n'), ((2352, 2384), 'nitclk._attributes.AttributeViString', '_attributes.AttributeViString', (['(9)'], {}), '(9)\n', (2381, 2384), True, 'import nitclk._attributes as _attributes\n'), ((3025, 3065), 'nitclk._attributes.AttributeSessionReference', '_attributes.AttributeSessionReference', (['(6)'], {}), '(6)\n', (3062, 3065), True, 'import nitclk._attributes as _attributes\n'), ((3396, 3436), 'nitclk._attributes.AttributeSessionReference', '_attributes.AttributeSessionReference', (['(4)'], {}), '(4)\n', (3433, 3436), True, 'import nitclk._attributes as _attributes\n'), ((3763, 3812), 'nitclk._attributes.AttributeViReal64TimeDeltaSeconds', '_attributes.AttributeViReal64TimeDeltaSeconds', (['(11)'], {}), '(11)\n', (3808, 3812), True, 'import nitclk._attributes as _attributes\n'), ((4650, 4691), 'nitclk._attributes.AttributeSessionReference', '_attributes.AttributeSessionReference', (['(16)'], {}), '(16)\n', (4687, 4691), True, 'import nitclk._attributes as _attributes\n'), ((5031, 5071), 'nitclk._attributes.AttributeSessionReference', '_attributes.AttributeSessionReference', (['(3)'], {}), '(3)\n', (5068, 5071), True, 'import nitclk._attributes as _attributes\n'), ((5399, 5432), 'nitclk._attributes.AttributeViString', '_attributes.AttributeViString', (['(10)'], {}), '(10)\n', (5428, 5432), True, 'import nitclk._attributes as _attributes\n'), ((5991, 6024), 'nitclk._attributes.AttributeViString', '_attributes.AttributeViString', (['(13)'], {}), '(13)\n', (6020, 6024), True, 'import nitclk._attributes as _attributes\n'), ((6764, 6796), 'nitclk._attributes.AttributeViString', '_attributes.AttributeViString', (['(1)'], {}), '(1)\n', (6793, 6796), True, 'import nitclk._attributes as _attributes\n'), ((7618, 7650), 'nitclk._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(8)'], {}), '(8)\n', (7647, 7650), True, 'import nitclk._attributes as _attributes\n'), ((7896, 7920), 'nitclk._library_singleton.get', '_library_singleton.get', ([], {}), '()\n', (7918, 7920), True, 'import nitclk._library_singleton as _library_singleton\n'), ((10236, 10277), 'nitclk._visatype.ViSession', '_visatype.ViSession', (['self._session_number'], {}), '(self._session_number)\n', (10255, 10277), True, 'import nitclk._visatype as _visatype\n'), ((10440, 10470), 'nitclk._visatype.ViAttr', '_visatype.ViAttr', (['attribute_id'], {}), '(attribute_id)\n', (10456, 10470), True, 'import nitclk._visatype as _visatype\n'), ((10506, 10526), 'nitclk._visatype.ViReal64', '_visatype.ViReal64', ([], {}), '()\n', (10524, 10526), True, 'import nitclk._visatype as _visatype\n'), ((10730, 10819), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (10749, 10819), True, 'import nitclk.errors as errors\n'), ((11792, 11833), 'nitclk._visatype.ViSession', '_visatype.ViSession', (['self._session_number'], {}), '(self._session_number)\n', (11811, 11833), True, 'import nitclk._visatype as _visatype\n'), ((11996, 12026), 'nitclk._visatype.ViAttr', '_visatype.ViAttr', (['attribute_id'], {}), '(attribute_id)\n', (12012, 12026), True, 'import nitclk._visatype as _visatype\n'), ((12062, 12083), 'nitclk._visatype.ViSession', '_visatype.ViSession', ([], {}), '()\n', (12081, 12083), True, 'import nitclk._visatype as _visatype\n'), ((12288, 12377), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (12307, 12377), True, 'import nitclk.errors as errors\n'), ((14053, 14094), 'nitclk._visatype.ViSession', '_visatype.ViSession', (['self._session_number'], {}), '(self._session_number)\n', (14072, 14094), True, 'import nitclk._visatype as _visatype\n'), ((14257, 14287), 'nitclk._visatype.ViAttr', '_visatype.ViAttr', (['attribute_id'], {}), '(attribute_id)\n', (14273, 14287), True, 'import nitclk._visatype as _visatype\n'), ((14326, 14345), 'nitclk._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (14343, 14345), True, 'import nitclk._visatype as _visatype\n'), ((14554, 14642), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(True)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=True,\n is_error_handling=False)\n', (14573, 14642), True, 'import nitclk.errors as errors\n'), ((14664, 14693), 'nitclk._visatype.ViInt32', '_visatype.ViInt32', (['error_code'], {}), '(error_code)\n', (14681, 14693), True, 'import nitclk._visatype as _visatype\n'), ((14941, 15030), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (14960, 15030), True, 'import nitclk.errors as errors\n'), ((15959, 15979), 'nitclk._visatype.ViUInt32', '_visatype.ViUInt32', ([], {}), '()\n', (15977, 15979), True, 'import nitclk._visatype as _visatype\n'), ((16109, 16196), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(True)', 'is_error_handling': '(True)'}), '(self, error_code, ignore_warnings=True,\n is_error_handling=True)\n', (16128, 16196), True, 'import nitclk.errors as errors\n'), ((16227, 16257), 'nitclk._visatype.ViUInt32', '_visatype.ViUInt32', (['error_code'], {}), '(error_code)\n', (16245, 16257), True, 'import nitclk._visatype as _visatype\n'), ((16482, 16570), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(True)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=True)\n', (16501, 16570), True, 'import nitclk.errors as errors\n'), ((17669, 17710), 'nitclk._visatype.ViSession', '_visatype.ViSession', (['self._session_number'], {}), '(self._session_number)\n', (17688, 17710), True, 'import nitclk._visatype as _visatype\n'), ((17873, 17903), 'nitclk._visatype.ViAttr', '_visatype.ViAttr', (['attribute_id'], {}), '(attribute_id)\n', (17889, 17903), True, 'import nitclk._visatype as _visatype\n'), ((17939, 17964), 'nitclk._visatype.ViReal64', '_visatype.ViReal64', (['value'], {}), '(value)\n', (17957, 17964), True, 'import nitclk._visatype as _visatype\n'), ((18117, 18206), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (18136, 18206), True, 'import nitclk.errors as errors\n'), ((19405, 19446), 'nitclk._visatype.ViSession', '_visatype.ViSession', (['self._session_number'], {}), '(self._session_number)\n', (19424, 19446), True, 'import nitclk._visatype as _visatype\n'), ((19609, 19639), 'nitclk._visatype.ViAttr', '_visatype.ViAttr', (['attribute_id'], {}), '(attribute_id)\n', (19625, 19639), True, 'import nitclk._visatype as _visatype\n'), ((19675, 19701), 'nitclk._visatype.ViSession', '_visatype.ViSession', (['value'], {}), '(value)\n', (19694, 19701), True, 'import nitclk._visatype as _visatype\n'), ((19855, 19944), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (19874, 19944), True, 'import nitclk.errors as errors\n'), ((21098, 21139), 'nitclk._visatype.ViSession', '_visatype.ViSession', (['self._session_number'], {}), '(self._session_number)\n', (21117, 21139), True, 'import nitclk._visatype as _visatype\n'), ((21302, 21332), 'nitclk._visatype.ViAttr', '_visatype.ViAttr', (['attribute_id'], {}), '(attribute_id)\n', (21318, 21332), True, 'import nitclk._visatype as _visatype\n'), ((21578, 21667), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (21597, 21667), True, 'import nitclk.errors as errors\n'), ((21976, 22000), 'nitclk._library_singleton.get', '_library_singleton.get', ([], {}), '()\n', (21998, 22000), True, 'import nitclk._library_singleton as _library_singleton\n'), ((32279, 32368), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (32298, 32368), True, 'import nitclk.errors as errors\n'), ((32451, 32482), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(0.0)'}), '(seconds=0.0)\n', (32469, 32482), False, 'import datetime\n'), ((33579, 33636), 'nitclk._converters.convert_timedelta_to_seconds_real64', '_converters.convert_timedelta_to_seconds_real64', (['min_time'], {}), '(min_time)\n', (33626, 33636), True, 'import nitclk._converters as _converters\n'), ((33786, 33875), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (33805, 33875), True, 'import nitclk.errors as errors\n'), ((34763, 34783), 'nitclk._visatype.ViUInt32', '_visatype.ViUInt32', ([], {}), '()\n', (34781, 34783), True, 'import nitclk._visatype as _visatype\n'), ((34913, 35000), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(True)', 'is_error_handling': '(True)'}), '(self, error_code, ignore_warnings=True,\n is_error_handling=True)\n', (34932, 35000), True, 'import nitclk.errors as errors\n'), ((35031, 35061), 'nitclk._visatype.ViUInt32', '_visatype.ViUInt32', (['error_code'], {}), '(error_code)\n', (35049, 35061), True, 'import nitclk._visatype as _visatype\n'), ((35286, 35374), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(True)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=True)\n', (35305, 35374), True, 'import nitclk.errors as errors\n'), ((36390, 36479), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (36409, 36479), True, 'import nitclk.errors as errors\n'), ((37347, 37368), 'nitclk._visatype.ViBoolean', '_visatype.ViBoolean', ([], {}), '()\n', (37366, 37368), True, 'import nitclk._visatype as _visatype\n'), ((37538, 37627), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (37557, 37627), True, 'import nitclk.errors as errors\n'), ((37736, 37767), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(0.0)'}), '(seconds=0.0)\n', (37754, 37767), False, 'import datetime\n'), ((38905, 38962), 'nitclk._converters.convert_timedelta_to_seconds_real64', '_converters.convert_timedelta_to_seconds_real64', (['min_time'], {}), '(min_time)\n', (38952, 38962), True, 'import nitclk._converters as _converters\n'), ((39114, 39203), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (39133, 39203), True, 'import nitclk.errors as errors\n'), ((39268, 39299), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(0.0)'}), '(seconds=0.0)\n', (39286, 39299), False, 'import datetime\n'), ((40753, 40817), 'nitclk._converters.convert_timedelta_to_seconds_real64', '_converters.convert_timedelta_to_seconds_real64', (['min_tclk_period'], {}), '(min_tclk_period)\n', (40800, 40817), True, 'import nitclk._converters as _converters\n'), ((40953, 41042), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (40972, 41042), True, 'import nitclk.errors as errors\n'), ((41121, 41152), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(0.0)'}), '(seconds=0.0)\n', (41139, 41152), False, 'import datetime\n'), ((42245, 42302), 'nitclk._converters.convert_timedelta_to_seconds_real64', '_converters.convert_timedelta_to_seconds_real64', (['min_time'], {}), '(min_time)\n', (42292, 42302), True, 'import nitclk._converters as _converters\n'), ((42448, 42537), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (42467, 42537), True, 'import nitclk.errors as errors\n'), ((42598, 42629), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(0.0)'}), '(seconds=0.0)\n', (42616, 42629), False, 'import datetime\n'), ((43921, 43977), 'nitclk._converters.convert_timedelta_to_seconds_real64', '_converters.convert_timedelta_to_seconds_real64', (['timeout'], {}), '(timeout)\n', (43968, 43977), True, 'import nitclk._converters as _converters\n'), ((44107, 44196), 'nitclk.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (44126, 44196), True, 'import nitclk.errors as errors\n'), ((795, 823), 'ctypes.POINTER', 'ctypes.POINTER', (['library_type'], {}), '(library_type)\n', (809, 823), False, 'import ctypes\n'), ((918, 950), 'numpy.ctypeslib.as_ctypes', 'numpy.ctypeslib.as_ctypes', (['value'], {}), '(value)\n', (943, 950), False, 'import numpy\n'), ((10692, 10719), 'ctypes.pointer', 'ctypes.pointer', (['value_ctype'], {}), '(value_ctype)\n', (10706, 10719), False, 'import ctypes\n'), ((12250, 12277), 'ctypes.pointer', 'ctypes.pointer', (['value_ctype'], {}), '(value_ctype)\n', (12264, 12277), False, 'import ctypes\n'), ((32052, 32111), 'nitclk._converters.convert_to_nitclk_session_number_list', '_converters.convert_to_nitclk_session_number_list', (['sessions'], {}), '(sessions)\n', (32101, 32111), True, 'import nitclk._converters as _converters\n'), ((33446, 33505), 'nitclk._converters.convert_to_nitclk_session_number_list', '_converters.convert_to_nitclk_session_number_list', (['sessions'], {}), '(sessions)\n', (33495, 33505), True, 'import nitclk._converters as _converters\n'), ((36186, 36245), 'nitclk._converters.convert_to_nitclk_session_number_list', '_converters.convert_to_nitclk_session_number_list', (['sessions'], {}), '(sessions)\n', (36235, 36245), True, 'import nitclk._converters as _converters\n'), ((37218, 37277), 'nitclk._converters.convert_to_nitclk_session_number_list', '_converters.convert_to_nitclk_session_number_list', (['sessions'], {}), '(sessions)\n', (37267, 37277), True, 'import nitclk._converters as _converters\n'), ((37501, 37527), 'ctypes.pointer', 'ctypes.pointer', (['done_ctype'], {}), '(done_ctype)\n', (37515, 37527), False, 'import ctypes\n'), ((38772, 38831), 'nitclk._converters.convert_to_nitclk_session_number_list', '_converters.convert_to_nitclk_session_number_list', (['sessions'], {}), '(sessions)\n', (38821, 38831), True, 'import nitclk._converters as _converters\n'), ((40613, 40672), 'nitclk._converters.convert_to_nitclk_session_number_list', '_converters.convert_to_nitclk_session_number_list', (['sessions'], {}), '(sessions)\n', (40662, 40672), True, 'import nitclk._converters as _converters\n'), ((42112, 42171), 'nitclk._converters.convert_to_nitclk_session_number_list', '_converters.convert_to_nitclk_session_number_list', (['sessions'], {}), '(sessions)\n', (42161, 42171), True, 'import nitclk._converters as _converters\n'), ((43789, 43848), 'nitclk._converters.convert_to_nitclk_session_number_list', '_converters.convert_to_nitclk_session_number_list', (['sessions'], {}), '(sessions)\n', (43838, 43848), True, 'import nitclk._converters as _converters\n')] |
from random import random
import numpy as np
from math import e
def degrau(u):
if u>=0:
return 1
else:
return 0
def degrauBipolar(u):
if u>0:
return 1
elif u==0:
return 0
else:
return -1
def linear(u):
return u
def logistica(u,beta):
return 1/(1 + e**(-beta*u))
def tangenteHiperbolica(u,beta):
return (1 - e**(-beta*u))/(1 + e**(-beta*u))
def camadaSimples(entrada_e_peso=[],entrada_e_peso_prod=[],limiarDeAtivacao =[],funcaoDeAtivacao=[]):#entradas e pesos e limiar
pontencialDeAtv = []#potencial de ativação de cada neuronio
saidas = []#saídas de cada neuronio g(u)/y
print("\nPasso 1 [u = Σ - θ]:")
for indexEntrada,i in enumerate(entrada_e_peso_prod):
print("u"+str(indexEntrada+1)+" = ",end="")
for indexCamada,j in enumerate(i):
if indexCamada != len(i)-1:#se nao for o ultimo elemento nao dá \n
print("x"+str(indexCamada+1)+"*w("+str(indexCamada+1)+","+str(indexEntrada+1)+") + ",end="")
continue
print("x"+str(indexCamada+1)+"*w("+str(indexCamada+1)+","+str(indexEntrada+1)+") - θ"+str(indexEntrada+1))#se for o ultimo elemento dá \n
print("\nPasso 2 [u = Σ - θ]:")
for indexEntrada,i in enumerate(entrada_e_peso_prod):
print("u"+str(indexEntrada+1)+" = ",end="")
for indexCamada,j in enumerate(i):
if indexCamada != len(i)-1:#se nao for o ultimo elemento nao dá \n
print(str(entrada_e_peso[indexCamada][indexEntrada][0])+"*"+str(entrada_e_peso[indexCamada][indexEntrada][1])+" + ",end="")
continue
u = sum(i)-limiarDeAtivacao[indexEntrada]
print(str(entrada_e_peso[indexCamada][indexEntrada][0])+"*"+str(entrada_e_peso[indexCamada][indexEntrada][1]),
"-",limiarDeAtivacao[indexEntrada],"=>",u)#se for o ultimo elemento dá \n
pontencialDeAtv.append(u)
k = 0
print("\nPasso 3 g(u):")
for indicePotencial,potencial in enumerate(pontencialDeAtv):
if funcaoDeAtivacao[indicePotencial] == 1:
k = degrau(potencial)
elif funcaoDeAtivacao[indicePotencial] == 2:
k = linear(potencial)
elif funcaoDeAtivacao[indicePotencial] == 3:
k = logistica(potencial,beta)
elif funcaoDeAtivacao[indicePotencial] == 4:
k = tangenteHiperbolica(potencial,beta)
elif funcaoDeAtivacao[indicePotencial] == 5:
k = degrauBipolar(potencial)
saidas.append(k)
print("g(u"+str(indicePotencial+1)+") =",k)
return saidas
if __name__ == '__main__':
#amostras com seus pesos correspondentes e suas saidas d(x) desejadas
amostras = [[0.9,0.1,1],
[0.6,0.5,1],
[0.2,0.8,-1],
[0.7,0.2,1],
[0.5,0.4,-1],
[0.4,0.6,1],
[0.25,0.8,-1],
[0.1,0.9,-1],
[0.3,0.7,-1],
[0.0,1.0,-1]]
for indexAm in range(len(amostras)):
erro = False
print("\n\nAmostra",indexAm+1)
for epoca in range(1,101): #100 epocas foram adotadas
quantidadeDeSaidas = 1 #sempre será uma saida nas amostras que foram dadas
entrada_e_peso = []
entrada_e_peso_prod = []#produto da entrada com o peso
limiarDeAtivacao = [] #limiar de ativação de cada neurônio
funcaoDeAtivacao = []#Vetor que guarda qual a função de ativacao de cada neuronio
for entrada in amostras[indexAm][:2]:
aux = []
aux.append((entrada,random()))#entrada da amostra e peso randomico
entrada_e_peso.append(aux)
for linha in entrada_e_peso: #Faz o produto da entrada com o peso para cada neuronio
entrada_e_peso_prod.append(np.prod(linha,axis=1))
for i in range(quantidadeDeSaidas):
limiarDeAtivacao.append(random())
funcaoDeAtivacao.append(5)#função adotada foi a degrau bipolar para os testes randomicos
beta = None
if ((3 in funcaoDeAtivacao) or (4 in funcaoDeAtivacao)):
beta = 1#valor de beta adotado foi sempre de 1
entrada_e_peso = np.array(entrada_e_peso)
entrada_e_peso_prod = np.array(entrada_e_peso_prod).transpose()
y = camadaSimples(entrada_e_peso,entrada_e_peso_prod,limiarDeAtivacao,funcaoDeAtivacao)[0]
if y != amostras[indexAm][-1]:#compara a saida obtida com a saída desejada
continue
print("Solução encontrada na época",epoca)
erro = True
break
| [
"numpy.array",
"random.random",
"numpy.prod"
] | [((4321, 4345), 'numpy.array', 'np.array', (['entrada_e_peso'], {}), '(entrada_e_peso)\n', (4329, 4345), True, 'import numpy as np\n'), ((3896, 3918), 'numpy.prod', 'np.prod', (['linha'], {'axis': '(1)'}), '(linha, axis=1)\n', (3903, 3918), True, 'import numpy as np\n'), ((4008, 4016), 'random.random', 'random', ([], {}), '()\n', (4014, 4016), False, 'from random import random\n'), ((4380, 4409), 'numpy.array', 'np.array', (['entrada_e_peso_prod'], {}), '(entrada_e_peso_prod)\n', (4388, 4409), True, 'import numpy as np\n'), ((3665, 3673), 'random.random', 'random', ([], {}), '()\n', (3671, 3673), False, 'from random import random\n')] |
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import List, Union, Dict
import numpy as np
import matplotlib as plt
import pandas as pd
from pandas.core.dtypes.common import is_numeric_dtype
from qf_lib.analysis.strategy_monitoring.pnl_calculator import PnLCalculator
from qf_lib.backtesting.contract.contract import Contract
from qf_lib.analysis.common.abstract_document import AbstractDocument
from qf_lib.backtesting.contract.contract_to_ticker_conversion.base import ContractTickerMapper
from qf_lib.backtesting.portfolio.transaction import Transaction
from qf_lib.common.enums.frequency import Frequency
from qf_lib.common.enums.price_field import PriceField
from qf_lib.common.exceptions.future_contracts_exceptions import NoValidTickerException
from qf_lib.common.tickers.tickers import Ticker
from qf_lib.common.utils.dateutils.timer import SettableTimer
from qf_lib.common.utils.error_handling import ErrorHandling
from qf_lib.common.utils.logging.qf_parent_logger import qf_logger
from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame
from qf_lib.containers.dataframe.qf_dataframe import QFDataFrame
from qf_lib.containers.futures.future_tickers.future_ticker import FutureTicker
from qf_lib.containers.futures.futures_adjustment_method import FuturesAdjustmentMethod
from qf_lib.containers.futures.futures_chain import FuturesChain
from qf_lib.containers.series.prices_series import PricesSeries
from qf_lib.containers.series.simple_returns_series import SimpleReturnsSeries
from qf_lib.data_providers.data_provider import DataProvider
from qf_lib.documents_utils.document_exporting.element.df_table import DFTable
from qf_lib.documents_utils.document_exporting.element.heading import HeadingElement
from qf_lib.documents_utils.document_exporting.element.new_page import NewPageElement
from qf_lib.documents_utils.document_exporting.element.paragraph import ParagraphElement
from qf_lib.documents_utils.document_exporting.pdf_exporter import PDFExporter
from qf_lib.settings import Settings
@ErrorHandling.class_error_logging()
class AssetPerfAndDrawdownSheet(AbstractDocument):
"""
For each of the given tickers, provides performance and drawdown comparison of the strategy vs buy and hold.
It also computed the performance contribution and PnL of each of the assets with the given frequency (either yearly
or monthly).
Note: It is assumed that at the beginning no positions are open in the portfolio.
Parameters
-----------
category_to_model_tickers: Dict[str, List[Ticker]]
Dictionary mapping a string, which denotes a category / sector etc, into a list of tickers. The categories are
used to provide aggregated information about performance contribution in each of them (e.g. to compute
performance contribution of different sectors, a dictionary mapping sector names into tickers objects).
contract_ticker_mapper: ContractTickerMapper
An instance of the ContractTickerMapper used to map the tickers into corresponding contracts, which are
used in the Transactions objects
transactions: Union[List[Transaction], str]
Either list of Transaction objects or a path to the Transactions file.
start_date: datetime
end_date: datetime
Dates to used as start and end date for the statistics
data_provider: DataProvider
Data provider used to download the prices and future contracts information, necessary to compute Buy and Hold
benchmark performance
settings: Settings
Necessary settings
pdf_exporter: PDFExporter
Used to export the document to PDF
title: str
Title of the document, will be a part of the filename. Do not use special characters.
initial_cash: int
Initial cash in the portfolio (used to compute the performance contribution for each asset)
frequency: Frequency
Frequency which should be used to compute the performance contribution. Currently only Yearly and Monthly
frequencies are supported.
"""
def __init__(self, category_to_model_tickers: Dict[str, List[Ticker]], contract_ticker_mapper: ContractTickerMapper,
transactions: Union[List[Transaction], str], start_date: datetime, end_date: datetime,
data_provider: DataProvider, settings: Settings, pdf_exporter: PDFExporter,
title: str = "Assets Monitoring Sheet", initial_cash: int = 10000000,
frequency: Frequency = Frequency.YEARLY):
super().__init__(settings, pdf_exporter, title=title)
self.tickers = [t for tickers_list in category_to_model_tickers.values() for t in tickers_list]
self._ticker_to_category = {ticker: c for c, tickers_list in category_to_model_tickers.items()
for ticker in tickers_list}
self._contract_ticker_mapper = contract_ticker_mapper
self._pnl_calculator = PnLCalculator(data_provider, contract_ticker_mapper)
self.transactions = self._parse_transactions_file(transactions) if isinstance(transactions, str) \
else transactions
self._start_date = start_date
self._end_date = end_date
self._data_provider = data_provider
self._initial_cash = initial_cash
if frequency not in (Frequency.MONTHLY, Frequency.YEARLY):
raise NotImplementedError("Only monthly and yearly frequencies are currently supported.")
self._frequency = frequency
self._max_columns_per_page = 7
self._logger = qf_logger.getChild(self.__class__.__name__)
def build_document(self):
self._add_header()
self.document.add_element(ParagraphElement("\n"))
ticker_to_pnl_series = self._compute_pnl()
self._add_pnl_and_performance_contribution_tables(ticker_to_pnl_series)
self._add_performance_statistics(ticker_to_pnl_series)
def _parse_transactions_file(self, path_to_transactions_file: str) -> List[Transaction]:
""" Parse the Transactions csv file created by the Monitor and generate a list of transactions objects. """
transactions_df = pd.read_csv(path_to_transactions_file)
transactions = [
Transaction(
time=pd.to_datetime(row.loc["Timestamp"]),
contract=Contract(
symbol=row.loc["Contract symbol"],
security_type=row.loc["Security type"],
exchange=row.loc["Exchange"],
contract_size=row.loc["Contract size"]
),
quantity=row.loc["Quantity"],
price=row.loc["Price"],
commission=row.loc["Commission"]
) for _, row in transactions_df.iterrows()
]
return transactions
def _compute_pnl(self) -> Dict[Ticker, PricesSeries]:
""" Computes PnL time series for each of the tickers. """
ticker_to_pnl_series = {ticker: self._pnl_calculator.compute_pnl(ticker, self.transactions, self._start_date,
self._end_date) for ticker in self.tickers}
return ticker_to_pnl_series
def _add_performance_statistics(self, ticker_to_pnl_series: Dict[Ticker, PricesSeries]):
""" Generate performance and drawdown plots, which provide the comparison between the strategy performance
and Buy and Hold performance for each of the assets.
"""
self.document.add_element(NewPageElement())
self.document.add_element(HeadingElement(level=2, text="Performance and Drawdowns - Strategy vs Buy and Hold"))
self.document.add_element(ParagraphElement("\n"))
for ticker in self.tickers:
grid = self._get_new_grid()
buy_and_hold_returns = self._generate_buy_and_hold_returns(ticker)
strategy_exposure_series = ticker_to_pnl_series[ticker].to_simple_returns().fillna(0.0)
strategy_exposure_series = strategy_exposure_series.where(strategy_exposure_series == 0.0).fillna(1.0)
strategy_returns = buy_and_hold_returns * strategy_exposure_series
strategy_returns = strategy_returns.dropna()
strategy_returns.name = "Strategy"
if len(strategy_returns) > 0:
perf_chart = self._get_perf_chart([buy_and_hold_returns, strategy_returns], False,
"Performance - {}".format(ticker.name))
underwater_chart = self._get_underwater_chart(strategy_returns.to_prices(),
title="Drawdown - {}".format(ticker.name),
benchmark_series=buy_and_hold_returns.to_prices(),
rotate_x_axis=True)
grid.add_chart(perf_chart)
grid.add_chart(underwater_chart)
self.document.add_element(grid)
else:
self._logger.warning("No data is available for {}. No plots will be generated.".format(ticker.name))
def _generate_buy_and_hold_returns(self, ticker: Ticker) -> SimpleReturnsSeries:
""" Computes series of simple returns, which would be returned by the Buy and Hold strategy. """
if isinstance(ticker, FutureTicker):
try:
ticker.initialize_data_provider(SettableTimer(self._end_date), self._data_provider)
futures_chain = FuturesChain(ticker, self._data_provider, FuturesAdjustmentMethod.BACK_ADJUSTED)
prices_series = futures_chain.get_price(PriceField.Close, self._start_date, self._end_date)
except NoValidTickerException:
prices_series = PricesSeries()
else:
prices_series = self._data_provider.get_price(ticker, PriceField.Close, self._start_date, self._end_date)
returns_tms = prices_series.to_simple_returns().replace([-np.inf, np.inf], np.nan).fillna(0.0)
returns_tms.name = "Buy and Hold"
return returns_tms
def _add_pnl_and_performance_contribution_tables(self, ticker_to_pnl: Dict[Ticker, PricesSeries]):
# For each ticker compute the PnL for each period (each year, month etc)
pnl_df = QFDataFrame.from_dict(ticker_to_pnl)
agg_performance = pnl_df.groupby(pd.Grouper(key=pnl_df.index.name, freq=self._frequency.to_pandas_freq())) \
.apply(lambda s: s.iloc[-1] - s.iloc[0])
# Format the column labels, so that they point exactly to the considered time frame
column_labels_format = {
Frequency.YEARLY: "%Y",
Frequency.MONTHLY: "%b %Y",
}
columns_format = column_labels_format[self._frequency]
performance_df = agg_performance.rename(index=lambda timestamp: timestamp.strftime(columns_format))
# Transpose the original data frame, so that performance for each period is presented in a separate column
performance_df = performance_df.transpose()
performance_df.index = performance_df.index.set_names("Asset")
performance_df = performance_df.reset_index()
performance_df["Asset"] = performance_df["Asset"].apply(lambda t: t.name)
performance_tables = self._create_performance_tables(performance_df.copy())
performance_contribution_tables = self._create_performance_contribution_tables(performance_df.copy())
# Add the text and all figures into the document
self.document.add_element(HeadingElement(level=2, text="Profit and Loss"))
self.document.add_element(ParagraphElement("The following tables provide the details on the Total profit and "
"loss for each asset (notional in currency units)."))
self.document.add_element(ParagraphElement("\n"))
for table in performance_tables:
self.document.add_element(HeadingElement(level=3, text="Performance between: {} - {}".format(
table.model.data.columns[1], table.model.data.columns[-1])))
self.document.add_element(table)
self.document.add_element(ParagraphElement("\n"))
self.document.add_element(NewPageElement())
# Add performance contribution table
self.document.add_element(HeadingElement(level=2, text="Performance contribution"))
for table in performance_contribution_tables:
self.document.add_element(HeadingElement(level=3, text="Performance contribution between {} - {}".format(
table.model.data.columns[1], table.model.data.columns[-1])))
self.document.add_element(table)
def _create_performance_tables(self, performance_df: QFDataFrame) -> List[DFTable]:
""" Create a formatted DFTable out of the performance_df data frame. """
numeric_columns = [col for col in performance_df.columns if is_numeric_dtype(performance_df[col])]
performance_df[numeric_columns] = performance_df[numeric_columns].applymap(lambda x: '{:,.0f}'.format(x))
performance_df = performance_df.set_index("Asset").sort_index()
# Divide the performance df into a number of data frames, so that each of them contains up to
# self.max_col_per_page columns, but keep the first column of the original df in all of them
split_dfs = np.array_split(performance_df, np.ceil(performance_df.num_of_columns / self._max_columns_per_page),
axis=1)
df_tables = [DFTable(df.reset_index(), css_classes=['table', 'shrink-font', 'right-align', 'wide-first-column'])
for df in split_dfs]
return df_tables
def _create_performance_contribution_tables(self, performance_df: QFDataFrame) -> List[DFTable]:
"""
Create a list of DFTables with assets names in the index and different years / months in columns, which contains
details on the performance contribution for each asset.
"""
# Create a QFSeries which contains the initial amount of cash in the portfolio for each year / month
numeric_columns = [col for col in performance_df.columns if is_numeric_dtype(performance_df[col])]
portfolio_values = performance_df[numeric_columns].sum().shift(fill_value=self._initial_cash).cumsum()
performance_df[numeric_columns] = performance_df[numeric_columns] / portfolio_values[numeric_columns]
# Add category column and aggregate data accordingly
ticker_name_to_category = {t.name: category for t, category in self._ticker_to_category.items()}
performance_df["Category"] = performance_df["Asset"].apply(lambda t: ticker_name_to_category[t])
all_categories = list(set(ticker_name_to_category.values()))
performance_df = performance_df.sort_values(by=["Category", "Asset"])
performance_df = performance_df.groupby("Category").apply(
lambda d: pd.concat([PricesDataFrame({**{"Asset": [d.name], "Category": [d.name]},
**{c: [d[c].sum()] for c in numeric_columns}}), d],
ignore_index=True)).drop(columns=["Category"])
# Add the Total Performance row (divide by 2 as the df contains already aggregated data for each group)
total_sum_row = performance_df[numeric_columns].sum() / 2
total_sum_row["Asset"] = "Total Performance"
performance_df = performance_df.append(total_sum_row, ignore_index=True)
# Format the rows using the percentage formatter
performance_df[numeric_columns] = performance_df[numeric_columns].applymap(lambda x: '{:.2%}'.format(x))
# Divide the performance dataframe into a number of dataframes, so that each of them contains up to
# self._max_columns_per_page columns
split_dfs = np.array_split(performance_df.set_index("Asset"),
np.ceil((performance_df.num_of_columns - 1) / self._max_columns_per_page), axis=1)
df_tables = [DFTable(df.reset_index(), css_classes=['table', 'shrink-font', 'right-align', 'wide-first-column'])
for df in split_dfs]
# Get the indices of rows, which contain category info
category_indices = performance_df[performance_df["Asset"].isin(all_categories)].index
for df_table in df_tables:
# Add table formatting, highlight rows showing the total contribution of the given category
df_table.add_rows_styles(category_indices, {"font-weight": "bold", "font-size": "0.95em",
"background-color": "#cbd0d2"})
df_table.add_rows_styles([performance_df.index[-1]], {"font-weight": "bold", "font-size": "0.95em",
"background-color": "#b9bcbd"})
return df_tables
def save(self, report_dir: str = ""):
# Set the style for the report
plt.style.use(['tearsheet'])
filename = "%Y_%m_%d-%H%M {}.pdf".format(self.title)
filename = datetime.now().strftime(filename)
return self.pdf_exporter.generate([self.document], report_dir, filename)
| [
"numpy.ceil",
"qf_lib.containers.dataframe.qf_dataframe.QFDataFrame.from_dict",
"qf_lib.backtesting.contract.contract.Contract",
"qf_lib.common.utils.dateutils.timer.SettableTimer",
"pandas.read_csv",
"qf_lib.common.utils.logging.qf_parent_logger.qf_logger.getChild",
"qf_lib.documents_utils.document_exp... | [((2685, 2720), 'qf_lib.common.utils.error_handling.ErrorHandling.class_error_logging', 'ErrorHandling.class_error_logging', ([], {}), '()\n', (2718, 2720), False, 'from qf_lib.common.utils.error_handling import ErrorHandling\n'), ((5596, 5648), 'qf_lib.analysis.strategy_monitoring.pnl_calculator.PnLCalculator', 'PnLCalculator', (['data_provider', 'contract_ticker_mapper'], {}), '(data_provider, contract_ticker_mapper)\n', (5609, 5648), False, 'from qf_lib.analysis.strategy_monitoring.pnl_calculator import PnLCalculator\n'), ((6215, 6258), 'qf_lib.common.utils.logging.qf_parent_logger.qf_logger.getChild', 'qf_logger.getChild', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (6233, 6258), False, 'from qf_lib.common.utils.logging.qf_parent_logger import qf_logger\n'), ((6806, 6844), 'pandas.read_csv', 'pd.read_csv', (['path_to_transactions_file'], {}), '(path_to_transactions_file)\n', (6817, 6844), True, 'import pandas as pd\n'), ((10995, 11031), 'qf_lib.containers.dataframe.qf_dataframe.QFDataFrame.from_dict', 'QFDataFrame.from_dict', (['ticker_to_pnl'], {}), '(ticker_to_pnl)\n', (11016, 11031), False, 'from qf_lib.containers.dataframe.qf_dataframe import QFDataFrame\n'), ((17725, 17753), 'matplotlib.style.use', 'plt.style.use', (["['tearsheet']"], {}), "(['tearsheet'])\n", (17738, 17753), True, 'import matplotlib as plt\n'), ((6351, 6373), 'qf_lib.documents_utils.document_exporting.element.paragraph.ParagraphElement', 'ParagraphElement', (['"""\n"""'], {}), "('\\n')\n", (6367, 6373), False, 'from qf_lib.documents_utils.document_exporting.element.paragraph import ParagraphElement\n'), ((8172, 8188), 'qf_lib.documents_utils.document_exporting.element.new_page.NewPageElement', 'NewPageElement', ([], {}), '()\n', (8186, 8188), False, 'from qf_lib.documents_utils.document_exporting.element.new_page import NewPageElement\n'), ((8224, 8313), 'qf_lib.documents_utils.document_exporting.element.heading.HeadingElement', 'HeadingElement', ([], {'level': '(2)', 'text': '"""Performance and Drawdowns - Strategy vs Buy and Hold"""'}), "(level=2, text=\n 'Performance and Drawdowns - Strategy vs Buy and Hold')\n", (8238, 8313), False, 'from qf_lib.documents_utils.document_exporting.element.heading import HeadingElement\n'), ((8344, 8366), 'qf_lib.documents_utils.document_exporting.element.paragraph.ParagraphElement', 'ParagraphElement', (['"""\n"""'], {}), "('\\n')\n", (8360, 8366), False, 'from qf_lib.documents_utils.document_exporting.element.paragraph import ParagraphElement\n'), ((12247, 12294), 'qf_lib.documents_utils.document_exporting.element.heading.HeadingElement', 'HeadingElement', ([], {'level': '(2)', 'text': '"""Profit and Loss"""'}), "(level=2, text='Profit and Loss')\n", (12261, 12294), False, 'from qf_lib.documents_utils.document_exporting.element.heading import HeadingElement\n'), ((12330, 12474), 'qf_lib.documents_utils.document_exporting.element.paragraph.ParagraphElement', 'ParagraphElement', (['"""The following tables provide the details on the Total profit and loss for each asset (notional in currency units)."""'], {}), "(\n 'The following tables provide the details on the Total profit and loss for each asset (notional in currency units).'\n )\n", (12346, 12474), False, 'from qf_lib.documents_utils.document_exporting.element.paragraph import ParagraphElement\n'), ((12554, 12576), 'qf_lib.documents_utils.document_exporting.element.paragraph.ParagraphElement', 'ParagraphElement', (['"""\n"""'], {}), "('\\n')\n", (12570, 12576), False, 'from qf_lib.documents_utils.document_exporting.element.paragraph import ParagraphElement\n'), ((12945, 12961), 'qf_lib.documents_utils.document_exporting.element.new_page.NewPageElement', 'NewPageElement', ([], {}), '()\n', (12959, 12961), False, 'from qf_lib.documents_utils.document_exporting.element.new_page import NewPageElement\n'), ((13043, 13099), 'qf_lib.documents_utils.document_exporting.element.heading.HeadingElement', 'HeadingElement', ([], {'level': '(2)', 'text': '"""Performance contribution"""'}), "(level=2, text='Performance contribution')\n", (13057, 13099), False, 'from qf_lib.documents_utils.document_exporting.element.heading import HeadingElement\n'), ((14113, 14180), 'numpy.ceil', 'np.ceil', (['(performance_df.num_of_columns / self._max_columns_per_page)'], {}), '(performance_df.num_of_columns / self._max_columns_per_page)\n', (14120, 14180), True, 'import numpy as np\n'), ((16666, 16739), 'numpy.ceil', 'np.ceil', (['((performance_df.num_of_columns - 1) / self._max_columns_per_page)'], {}), '((performance_df.num_of_columns - 1) / self._max_columns_per_page)\n', (16673, 16739), True, 'import numpy as np\n'), ((10209, 10294), 'qf_lib.containers.futures.futures_chain.FuturesChain', 'FuturesChain', (['ticker', 'self._data_provider', 'FuturesAdjustmentMethod.BACK_ADJUSTED'], {}), '(ticker, self._data_provider, FuturesAdjustmentMethod.BACK_ADJUSTED\n )\n', (10221, 10294), False, 'from qf_lib.containers.futures.futures_chain import FuturesChain\n'), ((12886, 12908), 'qf_lib.documents_utils.document_exporting.element.paragraph.ParagraphElement', 'ParagraphElement', (['"""\n"""'], {}), "('\\n')\n", (12902, 12908), False, 'from qf_lib.documents_utils.document_exporting.element.paragraph import ParagraphElement\n'), ((13633, 13670), 'pandas.core.dtypes.common.is_numeric_dtype', 'is_numeric_dtype', (['performance_df[col]'], {}), '(performance_df[col])\n', (13649, 13670), False, 'from pandas.core.dtypes.common import is_numeric_dtype\n'), ((14901, 14938), 'pandas.core.dtypes.common.is_numeric_dtype', 'is_numeric_dtype', (['performance_df[col]'], {}), '(performance_df[col])\n', (14917, 14938), False, 'from pandas.core.dtypes.common import is_numeric_dtype\n'), ((17834, 17848), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (17846, 17848), False, 'from datetime import datetime\n'), ((6916, 6952), 'pandas.to_datetime', 'pd.to_datetime', (["row.loc['Timestamp']"], {}), "(row.loc['Timestamp'])\n", (6930, 6952), True, 'import pandas as pd\n'), ((6979, 7142), 'qf_lib.backtesting.contract.contract.Contract', 'Contract', ([], {'symbol': "row.loc['Contract symbol']", 'security_type': "row.loc['Security type']", 'exchange': "row.loc['Exchange']", 'contract_size': "row.loc['Contract size']"}), "(symbol=row.loc['Contract symbol'], security_type=row.loc[\n 'Security type'], exchange=row.loc['Exchange'], contract_size=row.loc[\n 'Contract size'])\n", (6987, 7142), False, 'from qf_lib.backtesting.contract.contract import Contract\n'), ((10125, 10154), 'qf_lib.common.utils.dateutils.timer.SettableTimer', 'SettableTimer', (['self._end_date'], {}), '(self._end_date)\n', (10138, 10154), False, 'from qf_lib.common.utils.dateutils.timer import SettableTimer\n'), ((10473, 10487), 'qf_lib.containers.series.prices_series.PricesSeries', 'PricesSeries', ([], {}), '()\n', (10485, 10487), False, 'from qf_lib.containers.series.prices_series import PricesSeries\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import argparse
import numpy as np
from mindboggle.mio.colors import distinguishable_colors, label_adjacency_matrix
if __name__ == "__main__":
description = ('calculate colormap for labeled image;'
'calculated result is stored in output_dirname/colors.npy')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('label_filename', help='path to the label image')
parser.add_argument('output_dirname', help='path to the folder storing '
'temporary files and result')
parser.add_argument('-v', '--verbose', action='store_true', default=False)
args = parser.parse_args()
if not os.path.isdir(args.output_dirname):
os.makedirs(args.output_dirname)
matrix_filename = os.path.join(args.output_dirname, 'matrix.npy')
colormap_filename = os.path.join(args.output_dirname, 'colormap.npy')
labels_filename = os.path.join(args.output_dirname, 'labels.npy')
colors_filename = os.path.join(args.output_dirname, 'colors.npy')
if args.verbose:
print('finding adjacency maps...')
if not os.path.isfile(matrix_filename) or \
not os.path.isfile(labels_filename):
labels, matrix = label_adjacency_matrix(args.label_filename,
out_dir=args.output_dirname)[:2]
matrix = matrix.as_matrix()[:, 1:]
np.save(matrix_filename, matrix)
np.save(labels_filename, labels)
else:
labels = np.load(labels_filename)
matrix = np.load(matrix_filename)
if args.verbose:
print('finding colormap...')
if not os.path.isfile(colormap_filename):
num_colors = len(labels)
colormap = distinguishable_colors(ncolors=num_colors,
plot_colormap=False,
save_csv=False,
out_dir=args.output_dirname)
np.save(colormap_filename, colormap)
else:
colormap = np.load(colormap_filename)
if args.verbose:
print('finding label colors')
if not os.path.isfile(colors_filename):
label_colors = colors.group_colors(colormap,
args.label_filename,
IDs=labels,
adjacency_matrix=matrix,
out_dir=args.output_dirname,
plot_colors=False,
plot_graphs=False)
np.save(colors_filename, label_colors)
| [
"os.makedirs",
"argparse.ArgumentParser",
"os.path.join",
"mindboggle.mio.colors.distinguishable_colors",
"os.path.isfile",
"os.path.isdir",
"mindboggle.mio.colors.label_adjacency_matrix",
"numpy.load",
"numpy.save"
] | [((356, 404), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description'}), '(description=description)\n', (379, 404), False, 'import argparse\n'), ((863, 910), 'os.path.join', 'os.path.join', (['args.output_dirname', '"""matrix.npy"""'], {}), "(args.output_dirname, 'matrix.npy')\n", (875, 910), False, 'import os\n'), ((935, 984), 'os.path.join', 'os.path.join', (['args.output_dirname', '"""colormap.npy"""'], {}), "(args.output_dirname, 'colormap.npy')\n", (947, 984), False, 'import os\n'), ((1007, 1054), 'os.path.join', 'os.path.join', (['args.output_dirname', '"""labels.npy"""'], {}), "(args.output_dirname, 'labels.npy')\n", (1019, 1054), False, 'import os\n'), ((1077, 1124), 'os.path.join', 'os.path.join', (['args.output_dirname', '"""colors.npy"""'], {}), "(args.output_dirname, 'colors.npy')\n", (1089, 1124), False, 'import os\n'), ((755, 789), 'os.path.isdir', 'os.path.isdir', (['args.output_dirname'], {}), '(args.output_dirname)\n', (768, 789), False, 'import os\n'), ((799, 831), 'os.makedirs', 'os.makedirs', (['args.output_dirname'], {}), '(args.output_dirname)\n', (810, 831), False, 'import os\n'), ((1489, 1521), 'numpy.save', 'np.save', (['matrix_filename', 'matrix'], {}), '(matrix_filename, matrix)\n', (1496, 1521), True, 'import numpy as np\n'), ((1530, 1562), 'numpy.save', 'np.save', (['labels_filename', 'labels'], {}), '(labels_filename, labels)\n', (1537, 1562), True, 'import numpy as np\n'), ((1590, 1614), 'numpy.load', 'np.load', (['labels_filename'], {}), '(labels_filename)\n', (1597, 1614), True, 'import numpy as np\n'), ((1632, 1656), 'numpy.load', 'np.load', (['matrix_filename'], {}), '(matrix_filename)\n', (1639, 1656), True, 'import numpy as np\n'), ((1728, 1761), 'os.path.isfile', 'os.path.isfile', (['colormap_filename'], {}), '(colormap_filename)\n', (1742, 1761), False, 'import os\n'), ((1815, 1928), 'mindboggle.mio.colors.distinguishable_colors', 'distinguishable_colors', ([], {'ncolors': 'num_colors', 'plot_colormap': '(False)', 'save_csv': '(False)', 'out_dir': 'args.output_dirname'}), '(ncolors=num_colors, plot_colormap=False, save_csv=\n False, out_dir=args.output_dirname)\n', (1837, 1928), False, 'from mindboggle.mio.colors import distinguishable_colors, label_adjacency_matrix\n'), ((2058, 2094), 'numpy.save', 'np.save', (['colormap_filename', 'colormap'], {}), '(colormap_filename, colormap)\n', (2065, 2094), True, 'import numpy as np\n'), ((2124, 2150), 'numpy.load', 'np.load', (['colormap_filename'], {}), '(colormap_filename)\n', (2131, 2150), True, 'import numpy as np\n'), ((2223, 2254), 'os.path.isfile', 'os.path.isfile', (['colors_filename'], {}), '(colors_filename)\n', (2237, 2254), False, 'import os\n'), ((2703, 2741), 'numpy.save', 'np.save', (['colors_filename', 'label_colors'], {}), '(colors_filename, label_colors)\n', (2710, 2741), True, 'import numpy as np\n'), ((1202, 1233), 'os.path.isfile', 'os.path.isfile', (['matrix_filename'], {}), '(matrix_filename)\n', (1216, 1233), False, 'import os\n'), ((1255, 1286), 'os.path.isfile', 'os.path.isfile', (['labels_filename'], {}), '(labels_filename)\n', (1269, 1286), False, 'import os\n'), ((1313, 1385), 'mindboggle.mio.colors.label_adjacency_matrix', 'label_adjacency_matrix', (['args.label_filename'], {'out_dir': 'args.output_dirname'}), '(args.label_filename, out_dir=args.output_dirname)\n', (1335, 1385), False, 'from mindboggle.mio.colors import distinguishable_colors, label_adjacency_matrix\n')] |
import sys
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from railrl.visualization import visualization_util as vu
from railrl.torch.vae.skew.common import prob_to_weight
def visualize_vae_samples(
epoch, training_data, vae,
report, dynamics,
n_vis=1000,
xlim=(-1.5, 1.5),
ylim=(-1.5, 1.5)
):
plt.figure()
plt.suptitle("Epoch {}".format(epoch))
n_samples = len(training_data)
skip_factor = max(n_samples // n_vis, 1)
training_data = training_data[::skip_factor]
reconstructed_samples = vae.reconstruct(training_data)
generated_samples = vae.sample(n_vis)
projected_generated_samples = dynamics(generated_samples)
plt.subplot(2, 2, 1)
plt.plot(generated_samples[:, 0], generated_samples[:, 1], '.')
if xlim is not None:
plt.xlim(*xlim)
if ylim is not None:
plt.ylim(*ylim)
plt.title("Generated Samples")
plt.subplot(2, 2, 2)
plt.plot(projected_generated_samples[:, 0],
projected_generated_samples[:, 1], '.')
if xlim is not None:
plt.xlim(*xlim)
if ylim is not None:
plt.ylim(*ylim)
plt.title("Projected Generated Samples")
plt.subplot(2, 2, 3)
plt.plot(training_data[:, 0], training_data[:, 1], '.')
if xlim is not None:
plt.xlim(*xlim)
if ylim is not None:
plt.ylim(*ylim)
plt.title("Training Data")
plt.subplot(2, 2, 4)
plt.plot(reconstructed_samples[:, 0], reconstructed_samples[:, 1], '.')
if xlim is not None:
plt.xlim(*xlim)
if ylim is not None:
plt.ylim(*ylim)
plt.title("Reconstruction")
fig = plt.gcf()
sample_img = vu.save_image(fig)
report.add_image(sample_img, "Epoch {} Samples".format(epoch))
return sample_img
def visualize_vae(vae, skew_config, report,
resolution=20,
title="VAE Heatmap"):
xlim, ylim = vae.get_plot_ranges()
show_prob_heatmap(vae, xlim=xlim, ylim=ylim, resolution=resolution)
fig = plt.gcf()
prob_heatmap_img = vu.save_image(fig)
report.add_image(prob_heatmap_img, "Prob " + title)
show_weight_heatmap(
vae, skew_config, xlim=xlim, ylim=ylim, resolution=resolution,
)
fig = plt.gcf()
heatmap_img = vu.save_image(fig)
report.add_image(heatmap_img, "Weight " + title)
return prob_heatmap_img
def show_weight_heatmap(
vae, skew_config,
xlim, ylim,
resolution=20,
):
def get_prob_batch(batch):
prob = vae.compute_density(batch)
return prob_to_weight(prob, skew_config)
heat_map = vu.make_heat_map(get_prob_batch, xlim, ylim,
resolution=resolution, batch=True)
vu.plot_heatmap(heat_map)
def show_prob_heatmap(
vae,
xlim, ylim,
resolution=20,
):
def get_prob_batch(batch):
return vae.compute_density(batch)
heat_map = vu.make_heat_map(get_prob_batch, xlim, ylim,
resolution=resolution, batch=True)
vu.plot_heatmap(heat_map)
def visualize_histogram(histogram, skew_config, report, title=""):
prob = histogram.pvals
weights = prob_to_weight(prob, skew_config)
xrange, yrange = histogram.xy_range
extent = [xrange[0], xrange[1], yrange[0], yrange[1]]
for name, values in [
('Weight Heatmap', weights),
('Prob Heatmap', prob),
]:
plt.figure()
fig = plt.gcf()
ax = plt.gca()
values = values.copy()
values[values == 0] = np.nan
heatmap_img = ax.imshow(
np.swapaxes(values, 0, 1), # imshow uses first axis as y-axis
extent=extent,
cmap=plt.get_cmap('plasma'),
interpolation='nearest',
aspect='auto',
origin='bottom', # <-- Important! By default top left is (0, 0)
# norm=LogNorm(),
)
divider = make_axes_locatable(ax)
legend_axis = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(heatmap_img, cax=legend_axis, orientation='vertical')
heatmap_img = vu.save_image(fig)
if histogram.num_bins < 5:
pvals_str = np.array2string(histogram.pvals, precision=3)
report.add_text(pvals_str)
report.add_image(heatmap_img, "{} {}".format(title, name))
return heatmap_img
def progressbar(it, prefix="", size=60):
count = len(it)
def _show(_i):
x = int(size * _i / count)
sys.stdout.write(
"%s[%s%s] %i/%i\r" % (prefix, "#" * x, "." * (size - x), _i, count))
sys.stdout.flush()
_show(0)
for i, item in enumerate(it):
yield item
_show(i + 1)
sys.stdout.write("\n")
sys.stdout.flush() | [
"railrl.visualization.visualization_util.plot_heatmap",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.plot",
"railrl.torch.vae.skew.common.prob_to_weight",
"numpy.array2string",
"numpy.swapaxes",
"matplotlib.pyplot.figu... | [((407, 419), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (417, 419), True, 'from matplotlib import pyplot as plt\n'), ((759, 779), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (770, 779), True, 'from matplotlib import pyplot as plt\n'), ((784, 847), 'matplotlib.pyplot.plot', 'plt.plot', (['generated_samples[:, 0]', 'generated_samples[:, 1]', '"""."""'], {}), "(generated_samples[:, 0], generated_samples[:, 1], '.')\n", (792, 847), True, 'from matplotlib import pyplot as plt\n'), ((950, 980), 'matplotlib.pyplot.title', 'plt.title', (['"""Generated Samples"""'], {}), "('Generated Samples')\n", (959, 980), True, 'from matplotlib import pyplot as plt\n'), ((985, 1005), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (996, 1005), True, 'from matplotlib import pyplot as plt\n'), ((1010, 1098), 'matplotlib.pyplot.plot', 'plt.plot', (['projected_generated_samples[:, 0]', 'projected_generated_samples[:, 1]', '"""."""'], {}), "(projected_generated_samples[:, 0], projected_generated_samples[:, \n 1], '.')\n", (1018, 1098), True, 'from matplotlib import pyplot as plt\n'), ((1209, 1249), 'matplotlib.pyplot.title', 'plt.title', (['"""Projected Generated Samples"""'], {}), "('Projected Generated Samples')\n", (1218, 1249), True, 'from matplotlib import pyplot as plt\n'), ((1254, 1274), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (1265, 1274), True, 'from matplotlib import pyplot as plt\n'), ((1279, 1334), 'matplotlib.pyplot.plot', 'plt.plot', (['training_data[:, 0]', 'training_data[:, 1]', '"""."""'], {}), "(training_data[:, 0], training_data[:, 1], '.')\n", (1287, 1334), True, 'from matplotlib import pyplot as plt\n'), ((1437, 1463), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Data"""'], {}), "('Training Data')\n", (1446, 1463), True, 'from matplotlib import pyplot as plt\n'), ((1468, 1488), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (1479, 1488), True, 'from matplotlib import pyplot as plt\n'), ((1493, 1564), 'matplotlib.pyplot.plot', 'plt.plot', (['reconstructed_samples[:, 0]', 'reconstructed_samples[:, 1]', '"""."""'], {}), "(reconstructed_samples[:, 0], reconstructed_samples[:, 1], '.')\n", (1501, 1564), True, 'from matplotlib import pyplot as plt\n'), ((1667, 1694), 'matplotlib.pyplot.title', 'plt.title', (['"""Reconstruction"""'], {}), "('Reconstruction')\n", (1676, 1694), True, 'from matplotlib import pyplot as plt\n'), ((1706, 1715), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1713, 1715), True, 'from matplotlib import pyplot as plt\n'), ((1733, 1751), 'railrl.visualization.visualization_util.save_image', 'vu.save_image', (['fig'], {}), '(fig)\n', (1746, 1751), True, 'from railrl.visualization import visualization_util as vu\n'), ((2082, 2091), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2089, 2091), True, 'from matplotlib import pyplot as plt\n'), ((2115, 2133), 'railrl.visualization.visualization_util.save_image', 'vu.save_image', (['fig'], {}), '(fig)\n', (2128, 2133), True, 'from railrl.visualization import visualization_util as vu\n'), ((2303, 2312), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2310, 2312), True, 'from matplotlib import pyplot as plt\n'), ((2331, 2349), 'railrl.visualization.visualization_util.save_image', 'vu.save_image', (['fig'], {}), '(fig)\n', (2344, 2349), True, 'from railrl.visualization import visualization_util as vu\n'), ((2669, 2748), 'railrl.visualization.visualization_util.make_heat_map', 'vu.make_heat_map', (['get_prob_batch', 'xlim', 'ylim'], {'resolution': 'resolution', 'batch': '(True)'}), '(get_prob_batch, xlim, ylim, resolution=resolution, batch=True)\n', (2685, 2748), True, 'from railrl.visualization import visualization_util as vu\n'), ((2785, 2810), 'railrl.visualization.visualization_util.plot_heatmap', 'vu.plot_heatmap', (['heat_map'], {}), '(heat_map)\n', (2800, 2810), True, 'from railrl.visualization import visualization_util as vu\n'), ((2985, 3064), 'railrl.visualization.visualization_util.make_heat_map', 'vu.make_heat_map', (['get_prob_batch', 'xlim', 'ylim'], {'resolution': 'resolution', 'batch': '(True)'}), '(get_prob_batch, xlim, ylim, resolution=resolution, batch=True)\n', (3001, 3064), True, 'from railrl.visualization import visualization_util as vu\n'), ((3101, 3126), 'railrl.visualization.visualization_util.plot_heatmap', 'vu.plot_heatmap', (['heat_map'], {}), '(heat_map)\n', (3116, 3126), True, 'from railrl.visualization import visualization_util as vu\n'), ((3237, 3270), 'railrl.torch.vae.skew.common.prob_to_weight', 'prob_to_weight', (['prob', 'skew_config'], {}), '(prob, skew_config)\n', (3251, 3270), False, 'from railrl.torch.vae.skew.common import prob_to_weight\n'), ((4772, 4794), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (4788, 4794), False, 'import sys\n'), ((4799, 4817), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4815, 4817), False, 'import sys\n'), ((881, 896), 'matplotlib.pyplot.xlim', 'plt.xlim', (['*xlim'], {}), '(*xlim)\n', (889, 896), True, 'from matplotlib import pyplot as plt\n'), ((930, 945), 'matplotlib.pyplot.ylim', 'plt.ylim', (['*ylim'], {}), '(*ylim)\n', (938, 945), True, 'from matplotlib import pyplot as plt\n'), ((1140, 1155), 'matplotlib.pyplot.xlim', 'plt.xlim', (['*xlim'], {}), '(*xlim)\n', (1148, 1155), True, 'from matplotlib import pyplot as plt\n'), ((1189, 1204), 'matplotlib.pyplot.ylim', 'plt.ylim', (['*ylim'], {}), '(*ylim)\n', (1197, 1204), True, 'from matplotlib import pyplot as plt\n'), ((1368, 1383), 'matplotlib.pyplot.xlim', 'plt.xlim', (['*xlim'], {}), '(*xlim)\n', (1376, 1383), True, 'from matplotlib import pyplot as plt\n'), ((1417, 1432), 'matplotlib.pyplot.ylim', 'plt.ylim', (['*ylim'], {}), '(*ylim)\n', (1425, 1432), True, 'from matplotlib import pyplot as plt\n'), ((1598, 1613), 'matplotlib.pyplot.xlim', 'plt.xlim', (['*xlim'], {}), '(*xlim)\n', (1606, 1613), True, 'from matplotlib import pyplot as plt\n'), ((1647, 1662), 'matplotlib.pyplot.ylim', 'plt.ylim', (['*ylim'], {}), '(*ylim)\n', (1655, 1662), True, 'from matplotlib import pyplot as plt\n'), ((2619, 2652), 'railrl.torch.vae.skew.common.prob_to_weight', 'prob_to_weight', (['prob', 'skew_config'], {}), '(prob, skew_config)\n', (2633, 2652), False, 'from railrl.torch.vae.skew.common import prob_to_weight\n'), ((3479, 3491), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3489, 3491), True, 'from matplotlib import pyplot as plt\n'), ((3506, 3515), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3513, 3515), True, 'from matplotlib import pyplot as plt\n'), ((3529, 3538), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3536, 3538), True, 'from matplotlib import pyplot as plt\n'), ((3982, 4005), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (4001, 4005), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((4175, 4193), 'railrl.visualization.visualization_util.save_image', 'vu.save_image', (['fig'], {}), '(fig)\n', (4188, 4193), True, 'from railrl.visualization import visualization_util as vu\n'), ((4554, 4643), 'sys.stdout.write', 'sys.stdout.write', (["('%s[%s%s] %i/%i\\r' % (prefix, '#' * x, '.' * (size - x), _i, count))"], {}), "('%s[%s%s] %i/%i\\r' % (prefix, '#' * x, '.' * (size - x),\n _i, count))\n", (4570, 4643), False, 'import sys\n'), ((4661, 4679), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4677, 4679), False, 'import sys\n'), ((3652, 3677), 'numpy.swapaxes', 'np.swapaxes', (['values', '(0)', '(1)'], {}), '(values, 0, 1)\n', (3663, 3677), True, 'import numpy as np\n'), ((4253, 4298), 'numpy.array2string', 'np.array2string', (['histogram.pvals'], {'precision': '(3)'}), '(histogram.pvals, precision=3)\n', (4268, 4298), True, 'import numpy as np\n'), ((3759, 3781), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""plasma"""'], {}), "('plasma')\n", (3771, 3781), True, 'from matplotlib import pyplot as plt\n')] |
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torch.optim import lr_scheduler
from torchvision import datasets, transforms, models
import copy
import time
import argparse
from sys import argv
import os
import json
import numpy as np
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
from PIL import Image
img = Image.open(image)
if img.size[0] > img.size[1]:
img.thumbnail((99999, 255))
else:
img.thumbnail((255, 99999))
left_margin = (img.width-224)/2
bottom_margin = (img.height-224)/2
right_margin = left_margin + 224
top_margin = bottom_margin + 224
img = img.crop((left_margin, bottom_margin, right_margin,
top_margin))
img = np.array(img)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
img = (img - mean)/std
img = img.transpose((2, 0, 1))
return img
def get_cat_to_json(file_path):
with open(file_path, 'r') as f:
cat_to_name = json.load(f)
return cat_to_name
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
image = process_image(image_path)
image_tensor = torch.from_numpy(image).float().to(device)
model_input = image_tensor.unsqueeze(0)
probs = torch.exp(model.forward(model_input))
top_probs, top_labs = probs.topk(topk)
top_probs = top_probs.cpu().detach().numpy().tolist()[0]
top_labs = top_labs.cpu().detach().numpy().tolist()[0]
idx_to_class = {val: key for key, val in model.class_to_idx.items() }
top_labels = [idx_to_class[lab] for lab in top_labs]
return top_probs, top_labels
def getClassifier(input_units = 25088, hidden_units = 4096):
classifier = nn.Sequential( nn.Linear(input_units, hidden_units, bias=True),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(hidden_units, hidden_units, bias=True),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(hidden_units, 102, bias=True),
nn.LogSoftmax(dim=1))
return classifier
def load_model_checkpoint(checkpoint_file_path):
checkpoint = torch.load(checkpoint_file_path)
model = None
architecture = checkpoint['arch']
if (architecture == "vgg19"):
model = models.vgg19(pretrained=True)
elif (architecture == "densenet121"):
model = models.densenet121(pretrained=True)
elif (architecture == "alexnet"):
model = models.alexnet(pretrained=True)
elif (architecture == "googlenet"):
model = models.alexnet(pretrained=True)
else:
print("Only vgg19, densenet121, alexnet or googlenet are supported!")
return
for param in model.parameters():
param.requires_grad = False
hidden_units = checkpoint["hidden_units"]
input_units = checkpoint["input_units"]
model.classifier = getClassifier(input_units, hidden_units)
model.class_to_idx = checkpoint['class_to_idx']
model.load_state_dict(checkpoint['state_dict'])
return model
if __name__ == '__main__':
useGPU = True
top_k = 5
hidden_units = 25088
image_path = "flowers/test/2/image_05100.jpg"
checkpoint_file_path = 'classifier.pth'
category_names_file_path = None
parser = argparse.ArgumentParser(description="Flowers classifier predict module")
parser.add_argument('--gpu', action="store_true", default=False)
parser.add_argument('--top_k', action="store", dest="top_k", type=int)
parser.add_argument('--category_names', action="store", dest="category_names")
if (len(argv) <= 1):
print("image_path & checkpoint_file_path are not provided! Exit")
exit(-1)
if (argv[1] != None):
image_path = argv[1]
if (argv[2] != None):
checkpoint_file_path = argv[2]
args = parser.parse_args(argv[3:])
if (args.top_k != None):
top_k = args.top_k
if (args.gpu != None):
useGPU = args.gpu
if (args.category_names != None):
category_names_file_path = args.category_names
device = torch.device("cuda" if torch.cuda.is_available() and useGPU else "cpu")
model = load_model_checkpoint(checkpoint_file_path)
model.to(device)
flowers_by_name = None
probs, classes = predict(image_path, model, top_k)
if (category_names_file_path != None):
cat_to_name = get_cat_to_json(category_names_file_path)
flowers_by_name = [cat_to_name[x] for x in classes]
print(probs)
print(classes)
if (flowers_by_name != None):
print(flowers_by_name)
| [
"torch.nn.ReLU",
"PIL.Image.open",
"torch.nn.Dropout",
"argparse.ArgumentParser",
"torchvision.models.vgg19",
"torch.load",
"torchvision.models.alexnet",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.nn.LogSoftmax",
"json.load",
"torchvision.models... | [((455, 472), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (465, 472), False, 'from PIL import Image\n'), ((877, 908), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (885, 908), True, 'import numpy as np\n'), ((919, 950), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (927, 950), True, 'import numpy as np\n'), ((2450, 2482), 'torch.load', 'torch.load', (['checkpoint_file_path'], {}), '(checkpoint_file_path)\n', (2460, 2482), False, 'import torch\n'), ((3568, 3640), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Flowers classifier predict module"""'}), "(description='Flowers classifier predict module')\n", (3591, 3640), False, 'import argparse\n'), ((848, 861), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (856, 861), True, 'import numpy as np\n'), ((1120, 1132), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1129, 1132), False, 'import json\n'), ((1910, 1957), 'torch.nn.Linear', 'nn.Linear', (['input_units', 'hidden_units'], {'bias': '(True)'}), '(input_units, hidden_units, bias=True)\n', (1919, 1957), False, 'from torch import nn\n'), ((1991, 2004), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1998, 2004), False, 'from torch import nn\n'), ((2038, 2053), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2048, 2053), False, 'from torch import nn\n'), ((2087, 2135), 'torch.nn.Linear', 'nn.Linear', (['hidden_units', 'hidden_units'], {'bias': '(True)'}), '(hidden_units, hidden_units, bias=True)\n', (2096, 2135), False, 'from torch import nn\n'), ((2169, 2182), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2176, 2182), False, 'from torch import nn\n'), ((2216, 2231), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2226, 2231), False, 'from torch import nn\n'), ((2265, 2304), 'torch.nn.Linear', 'nn.Linear', (['hidden_units', '(102)'], {'bias': '(True)'}), '(hidden_units, 102, bias=True)\n', (2274, 2304), False, 'from torch import nn\n'), ((2338, 2358), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (2351, 2358), False, 'from torch import nn\n'), ((2589, 2618), 'torchvision.models.vgg19', 'models.vgg19', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2601, 2618), False, 'from torchvision import datasets, transforms, models\n'), ((2677, 2712), 'torchvision.models.densenet121', 'models.densenet121', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2695, 2712), False, 'from torchvision import datasets, transforms, models\n'), ((2767, 2798), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2781, 2798), False, 'from torchvision import datasets, transforms, models\n'), ((4386, 4411), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4409, 4411), False, 'import torch\n'), ((1350, 1373), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (1366, 1373), False, 'import torch\n'), ((2855, 2886), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2869, 2886), False, 'from torchvision import datasets, transforms, models\n')] |
import numpy as np
import paddle
from math import sqrt
from sklearn.linear_model import LinearRegression
def cos_formula(a, b, c):
''' formula to calculate the angle between two edges
a and b are the edge lengths, c is the angle length.
'''
res = (a**2 + b**2 - c**2) / (2 * a * b)
# sanity check
res = -1. if res < -1. else res
res = 1. if res > 1. else res
return np.arccos(res)
def setxor(a, b):
n = len(a)
res = []
link = []
i, j = 0, 0
while i < n and j < n:
if a[i] == b[j]:
link.append(a[i])
i += 1
j += 1
elif a[i] < b[j]:
res.append(a[i])
i += 1
else:
res.append(b[j])
j += 1
if i < j:
res.append(a[-1])
elif i > j:
res.append(b[-1])
else:
link.append(a[-1])
return res, link
def rmse(y,f):
rmse = sqrt(((y - f)**2).mean(axis=0))
return rmse
def mae(y,f):
mae = (np.abs(y-f)).mean()
return mae
def sd(y,f):
f,y = f.reshape(-1,1),y.reshape(-1,1)
lr = LinearRegression()
lr.fit(f,y)
y_ = lr.predict(f)
sd = (((y - y_) ** 2).sum() / (len(y) - 1)) ** 0.5
return sd
def pearson(y,f):
rp = np.corrcoef(y, f)[0,1]
return rp
def generate_segment_id(index):
zeros = paddle.zeros(index[-1] + 1, dtype="int32")
index = index[:-1]
segments = paddle.scatter(
zeros, index, paddle.ones_like(
index, dtype="int32"), overwrite=False)
segments = paddle.cumsum(segments)[:-1] - 1
return segments | [
"numpy.abs",
"paddle.ones_like",
"numpy.arccos",
"numpy.corrcoef",
"paddle.cumsum",
"paddle.zeros",
"sklearn.linear_model.LinearRegression"
] | [((403, 417), 'numpy.arccos', 'np.arccos', (['res'], {}), '(res)\n', (412, 417), True, 'import numpy as np\n'), ((1107, 1125), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1123, 1125), False, 'from sklearn.linear_model import LinearRegression\n'), ((1344, 1386), 'paddle.zeros', 'paddle.zeros', (['(index[-1] + 1)'], {'dtype': '"""int32"""'}), "(index[-1] + 1, dtype='int32')\n", (1356, 1386), False, 'import paddle\n'), ((1262, 1279), 'numpy.corrcoef', 'np.corrcoef', (['y', 'f'], {}), '(y, f)\n', (1273, 1279), True, 'import numpy as np\n'), ((1463, 1501), 'paddle.ones_like', 'paddle.ones_like', (['index'], {'dtype': '"""int32"""'}), "(index, dtype='int32')\n", (1479, 1501), False, 'import paddle\n'), ((1007, 1020), 'numpy.abs', 'np.abs', (['(y - f)'], {}), '(y - f)\n', (1013, 1020), True, 'import numpy as np\n'), ((1552, 1575), 'paddle.cumsum', 'paddle.cumsum', (['segments'], {}), '(segments)\n', (1565, 1575), False, 'import paddle\n')] |
#!/usr/bin/env python
# coding: utf-8
import rospy
import geometry_msgs.msg
from sensor_msgs.msg import JointState
import numpy as np
class Arm_ik:
def __init__(self):
self._sub_pos = rospy.Subscriber("/arm_pos", geometry_msgs.msg.Point, self.pos_callback)
self.pub = rospy.Publisher("vv_kuwamai/master_joint_state", JointState, queue_size=10)
#初期位置
self.pos = geometry_msgs.msg.Point()
self.pos.x = 0.1
self.pos.z = 0.1
self.r = rospy.Rate(20)
#最大関節角速度
self.max_vel = 0.3
#初期角度
self.q = self.q_old = np.array([[0],
[0],
[np.pi/2]])
def pos_callback(self, message):
self.pos = message
#逆運動学計算
def ik(self):
while not rospy.is_shutdown():
#目標手先位置
r_ref = np.array([[self.pos.x],
[self.pos.y],
[self.pos.z]])
#特異姿勢回避
r_ref = self.singularity_avoidance(r_ref)
r = self.fk(self.q)
if np.linalg.norm(r - r_ref, ord=2) > 0.0001:
#数値計算
for i in range(10):
r = self.fk(self.q)
if np.linalg.norm(r - r_ref, ord=2) < 0.0001:
break
self.q = self.q - np.linalg.inv(self.J(self.q)).dot((r - r_ref))
self.angular_vel_limit()
js = JointState()
js.name=["joint_{}".format(i) for i in range(3)]
js.position = [self.q[0,0], self.q[1,0], self.q[2,0]]
self.pub.publish(js)
self.r.sleep()
#同次変換行列
def trans_m(self, a, alpha, d, theta):
m = np.array([[np.cos(theta), -np.sin(theta), 0., a],
[np.cos(alpha)*np.sin(theta), np.cos(alpha)*np.cos(theta), -np.sin(alpha), -np.sin(alpha)*d],
[np.sin(alpha)*np.sin(theta), np.sin(alpha)*np.cos(theta), np.cos(alpha), np.cos(alpha)*d],
[0., 0., 0., 1.]])
return m
#順運動学
def fk(self, theta):
tm0_1 = self.trans_m(0, 0, 0, theta[0,0]+np.pi)
tm1_2 = self.trans_m(0, np.pi/2, 0, theta[1,0]+np.pi/2)
tm2_3 = self.trans_m(0.1, 0, 0, theta[2,0])
tm3_4 = self.trans_m(0.1, 0, 0, 0)
pos = tm0_1.dot(tm1_2).dot(tm2_3).dot(tm3_4)[0:3,3:4]
return pos
#ヤコビ行列
def J(self, theta):
e = 1.0e-10
diff_q1 = (self.fk(theta+np.array([[e],[0.],[0.]]))-self.fk(theta))/e
diff_q2 = (self.fk(theta+np.array([[0.],[e],[0.]]))-self.fk(theta))/e
diff_q3 = (self.fk(theta+np.array([[0.],[0.],[e]]))-self.fk(theta))/e
return np.hstack((diff_q1, diff_q2, diff_q3))
#角速度制限
def angular_vel_limit(self):
q_diff = self.q - self.q_old
q_diff_max = np.abs(q_diff).max()
if(q_diff_max > self.max_vel):
rospy.loginfo("Too fast")
q_diff /= q_diff_max
q_diff *= self.max_vel
self.q = self.q_old + q_diff
self.q_old = self.q
#特異姿勢回避
def singularity_avoidance(self, r_ref):
#コントローラ位置がアームの可動範囲を超えた際はスケール
r_ref_norm = np.linalg.norm(r_ref, ord=2)
if r_ref_norm > 0.19:
rospy.loginfo("Out of movable range")
r_ref /= r_ref_norm
r_ref *= 0.19
#目標位置がz軸上付近にある際はどける
r_ref_xy_norm = np.linalg.norm(r_ref[0:2], ord=2)
if r_ref_xy_norm < 0.01:
rospy.loginfo("Avoid singular configuration")
r_ref[0] = 0.01
return r_ref
if __name__ == '__main__':
try:
rospy.init_node('arm_ik')
arm_ik = Arm_ik()
arm_ik.ik()
rospy.spin()
except rospy.ROSInterruptException:
pass
| [
"numpy.abs",
"rospy.Subscriber",
"rospy.is_shutdown",
"numpy.hstack",
"rospy.init_node",
"sensor_msgs.msg.JointState",
"numpy.array",
"rospy.Rate",
"rospy.spin",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"rospy.Publisher",
"rospy.loginfo"
] | [((198, 270), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/arm_pos"""', 'geometry_msgs.msg.Point', 'self.pos_callback'], {}), "('/arm_pos', geometry_msgs.msg.Point, self.pos_callback)\n", (214, 270), False, 'import rospy\n'), ((290, 365), 'rospy.Publisher', 'rospy.Publisher', (['"""vv_kuwamai/master_joint_state"""', 'JointState'], {'queue_size': '(10)'}), "('vv_kuwamai/master_joint_state', JointState, queue_size=10)\n", (305, 365), False, 'import rospy\n'), ((502, 516), 'rospy.Rate', 'rospy.Rate', (['(20)'], {}), '(20)\n', (512, 516), False, 'import rospy\n'), ((607, 640), 'numpy.array', 'np.array', (['[[0], [0], [np.pi / 2]]'], {}), '([[0], [0], [np.pi / 2]])\n', (615, 640), True, 'import numpy as np\n'), ((2799, 2837), 'numpy.hstack', 'np.hstack', (['(diff_q1, diff_q2, diff_q3)'], {}), '((diff_q1, diff_q2, diff_q3))\n', (2808, 2837), True, 'import numpy as np\n'), ((3293, 3321), 'numpy.linalg.norm', 'np.linalg.norm', (['r_ref'], {'ord': '(2)'}), '(r_ref, ord=2)\n', (3307, 3321), True, 'import numpy as np\n'), ((3514, 3547), 'numpy.linalg.norm', 'np.linalg.norm', (['r_ref[0:2]'], {'ord': '(2)'}), '(r_ref[0:2], ord=2)\n', (3528, 3547), True, 'import numpy as np\n'), ((3735, 3760), 'rospy.init_node', 'rospy.init_node', (['"""arm_ik"""'], {}), "('arm_ik')\n", (3750, 3760), False, 'import rospy\n'), ((3815, 3827), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3825, 3827), False, 'import rospy\n'), ((833, 852), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (850, 852), False, 'import rospy\n'), ((894, 946), 'numpy.array', 'np.array', (['[[self.pos.x], [self.pos.y], [self.pos.z]]'], {}), '([[self.pos.x], [self.pos.y], [self.pos.z]])\n', (902, 946), True, 'import numpy as np\n'), ((3014, 3039), 'rospy.loginfo', 'rospy.loginfo', (['"""Too fast"""'], {}), "('Too fast')\n", (3027, 3039), False, 'import rospy\n'), ((3365, 3402), 'rospy.loginfo', 'rospy.loginfo', (['"""Out of movable range"""'], {}), "('Out of movable range')\n", (3378, 3402), False, 'import rospy\n'), ((3594, 3639), 'rospy.loginfo', 'rospy.loginfo', (['"""Avoid singular configuration"""'], {}), "('Avoid singular configuration')\n", (3607, 3639), False, 'import rospy\n'), ((1130, 1162), 'numpy.linalg.norm', 'np.linalg.norm', (['(r - r_ref)'], {'ord': '(2)'}), '(r - r_ref, ord=2)\n', (1144, 1162), True, 'import numpy as np\n'), ((1516, 1528), 'sensor_msgs.msg.JointState', 'JointState', ([], {}), '()\n', (1526, 1528), False, 'from sensor_msgs.msg import JointState\n'), ((2941, 2955), 'numpy.abs', 'np.abs', (['q_diff'], {}), '(q_diff)\n', (2947, 2955), True, 'import numpy as np\n'), ((1811, 1824), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1817, 1824), True, 'import numpy as np\n'), ((2048, 2061), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (2054, 2061), True, 'import numpy as np\n'), ((1294, 1326), 'numpy.linalg.norm', 'np.linalg.norm', (['(r - r_ref)'], {'ord': '(2)'}), '(r - r_ref, ord=2)\n', (1308, 1326), True, 'import numpy as np\n'), ((1827, 1840), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1833, 1840), True, 'import numpy as np\n'), ((1873, 1886), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (1879, 1886), True, 'import numpy as np\n'), ((1887, 1900), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1893, 1900), True, 'import numpy as np\n'), ((1902, 1915), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (1908, 1915), True, 'import numpy as np\n'), ((1916, 1929), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1922, 1929), True, 'import numpy as np\n'), ((1932, 1945), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (1938, 1945), True, 'import numpy as np\n'), ((1989, 2002), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (1995, 2002), True, 'import numpy as np\n'), ((2003, 2016), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2009, 2016), True, 'import numpy as np\n'), ((2018, 2031), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (2024, 2031), True, 'import numpy as np\n'), ((2032, 2045), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2038, 2045), True, 'import numpy as np\n'), ((2064, 2077), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (2070, 2077), True, 'import numpy as np\n'), ((2583, 2612), 'numpy.array', 'np.array', (['[[e], [0.0], [0.0]]'], {}), '([[e], [0.0], [0.0]])\n', (2591, 2612), True, 'import numpy as np\n'), ((2661, 2690), 'numpy.array', 'np.array', (['[[0.0], [e], [0.0]]'], {}), '([[0.0], [e], [0.0]])\n', (2669, 2690), True, 'import numpy as np\n'), ((2739, 2768), 'numpy.array', 'np.array', (['[[0.0], [0.0], [e]]'], {}), '([[0.0], [0.0], [e]])\n', (2747, 2768), True, 'import numpy as np\n'), ((1948, 1961), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (1954, 1961), True, 'import numpy as np\n')] |
import numpy as np
import torch as th
class EpidemicModel(th.nn.Module):
"""Score driven epidemic model."""
def __init__(self):
super(EpidemicModel, self).__init__()
self.alpha = th.nn.Parameter(th.tensor(0.0, requires_grad=True))
self.beta = th.nn.Parameter(th.tensor(0.0, requires_grad=True))
self.gamma = th.nn.Parameter(th.tensor(0.0, requires_grad=True))
def forward(self, x, y):
"""Run forward step.
Parameters
----------
x : torch.float
Cumulative number of cases.
y : torch.float
New cases.
Returns
-------
log_lams, omegas
Time varying parameters.
"""
self.omega = self.alpha / (1 - self.beta)
self.score = 0
log_lams = []
omegas = []
for t in range(x.shape[0]):
self.omega = self.alpha + self.beta * self.omega + self.gamma * self.score
log_lam = th.log(x[t]) + self.omega
self.score = (y[t] - th.exp(log_lam)) / th.sqrt(th.exp(log_lam))
log_lams.append(log_lam)
omegas.append(self.omega.detach().numpy())
return th.stack(log_lams), omegas
def predict(self, x, y, horizon):
"""Predict from model.
Predictions will be made for `horizon` steps after the input data.
Parameters
----------
x : torch.float
Cumulative number of cases.
y : torch.float
New cases.
horizon : int
Number of periods to predict.
Returns
-------
pred : numpy.array
Predictions.
"""
log_lam, omegas = self.forward(x, y)
omega = omegas[-1]
x_last = x[-1]
y_pred = []
for h in th.arange(0, horizon):
y_pred_t = x_last * np.exp(omega)
omega = self.alpha.detach().numpy() + self.beta.detach().numpy() * omega
x_last = x_last + y_pred_t
y_pred.append(y_pred_t)
return np.append(np.exp(log_lam.detach().numpy()), np.array(y_pred))
class EpidemicModelUnitRoot(th.nn.Module):
"""Score driven epidemic model with unit root dynamics."""
def __init__(self):
super(EpidemicModelUnitRoot, self).__init__()
self.omega0 = th.nn.Parameter(th.tensor(0.0, requires_grad=True))
self.gamma = th.nn.Parameter(th.tensor(0.0, requires_grad=True))
def forward(self, x, y):
"""Run forward step.
Parameters
----------
x : torch.float
Cumulative number of cases.
y : torch.float
New cases.
Returns
-------
log_lams, omegas
Time varying parameters.
"""
omega = self.omega0
self.score = 0
log_lams = []
omegas = []
for t in range(x.shape[0]):
omega = omega + self.gamma * self.score
log_lam = th.log(x[t]) + omega
self.score = (y[t] - th.exp(log_lam)) / th.sqrt(th.exp(log_lam))
log_lams.append(log_lam)
omegas.append(omega.detach().numpy())
return th.stack(log_lams), omegas
def predict(self, x, y, horizon):
"""Predict from model.
Predictions will be made for `horizon` steps after the input data.
Parameters
----------
x : torch.float
Cumulative number of cases.
y : torch.float
New cases.
horizon : int
Number of periods to predict.
Returns
-------
pred : numpy.array
Predictions.
"""
log_lam, omegas = self.forward(x, y)
omega = omegas[-1]
x_last = x[-1]
y_pred = []
for h in th.arange(0, horizon):
y_pred_t = x_last * np.exp(omega)
x_last = x_last + y_pred_t
y_pred.append(y_pred_t)
return np.append(np.exp(log_lam.detach().numpy()), np.array(y_pred))
class PoissonLogLikelihood(th.nn.Module):
"""Compute the average Poisson log likelihood."""
def __init__(self):
super(PoissonLogLikelihood, self).__init__()
def forward(self, log_lam, target, max_val=1e6):
"""Run forward step.
Parameters
----------
log_lam : torch.float
Log of predicted new cases.
target : torch.float
Actual new cases.
max_val : int
Number to replace missing values in objective by.
Returns
-------
objective
Average objective.
"""
objective = th.exp(log_lam) - target * log_lam
objective = th.where(
th.isnan(objective), th.full_like(objective, max_val), objective
)
return th.mean(objective)
| [
"torch.log",
"torch.mean",
"torch.stack",
"torch.exp",
"torch.full_like",
"numpy.exp",
"torch.tensor",
"numpy.array",
"torch.isnan",
"torch.arange"
] | [((1808, 1829), 'torch.arange', 'th.arange', (['(0)', 'horizon'], {}), '(0, horizon)\n', (1817, 1829), True, 'import torch as th\n'), ((3787, 3808), 'torch.arange', 'th.arange', (['(0)', 'horizon'], {}), '(0, horizon)\n', (3796, 3808), True, 'import torch as th\n'), ((4800, 4818), 'torch.mean', 'th.mean', (['objective'], {}), '(objective)\n', (4807, 4818), True, 'import torch as th\n'), ((222, 256), 'torch.tensor', 'th.tensor', (['(0.0)'], {'requires_grad': '(True)'}), '(0.0, requires_grad=True)\n', (231, 256), True, 'import torch as th\n'), ((294, 328), 'torch.tensor', 'th.tensor', (['(0.0)'], {'requires_grad': '(True)'}), '(0.0, requires_grad=True)\n', (303, 328), True, 'import torch as th\n'), ((367, 401), 'torch.tensor', 'th.tensor', (['(0.0)'], {'requires_grad': '(True)'}), '(0.0, requires_grad=True)\n', (376, 401), True, 'import torch as th\n'), ((1191, 1209), 'torch.stack', 'th.stack', (['log_lams'], {}), '(log_lams)\n', (1199, 1209), True, 'import torch as th\n'), ((2097, 2113), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (2105, 2113), True, 'import numpy as np\n'), ((2340, 2374), 'torch.tensor', 'th.tensor', (['(0.0)'], {'requires_grad': '(True)'}), '(0.0, requires_grad=True)\n', (2349, 2374), True, 'import torch as th\n'), ((2413, 2447), 'torch.tensor', 'th.tensor', (['(0.0)'], {'requires_grad': '(True)'}), '(0.0, requires_grad=True)\n', (2422, 2447), True, 'import torch as th\n'), ((3170, 3188), 'torch.stack', 'th.stack', (['log_lams'], {}), '(log_lams)\n', (3178, 3188), True, 'import torch as th\n'), ((3991, 4007), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (3999, 4007), True, 'import numpy as np\n'), ((4633, 4648), 'torch.exp', 'th.exp', (['log_lam'], {}), '(log_lam)\n', (4639, 4648), True, 'import torch as th\n'), ((4710, 4729), 'torch.isnan', 'th.isnan', (['objective'], {}), '(objective)\n', (4718, 4729), True, 'import torch as th\n'), ((4731, 4763), 'torch.full_like', 'th.full_like', (['objective', 'max_val'], {}), '(objective, max_val)\n', (4743, 4763), True, 'import torch as th\n'), ((980, 992), 'torch.log', 'th.log', (['x[t]'], {}), '(x[t])\n', (986, 992), True, 'import torch as th\n'), ((1863, 1876), 'numpy.exp', 'np.exp', (['omega'], {}), '(omega)\n', (1869, 1876), True, 'import numpy as np\n'), ((2969, 2981), 'torch.log', 'th.log', (['x[t]'], {}), '(x[t])\n', (2975, 2981), True, 'import torch as th\n'), ((3842, 3855), 'numpy.exp', 'np.exp', (['omega'], {}), '(omega)\n', (3848, 3855), True, 'import numpy as np\n'), ((1039, 1054), 'torch.exp', 'th.exp', (['log_lam'], {}), '(log_lam)\n', (1045, 1054), True, 'import torch as th\n'), ((1066, 1081), 'torch.exp', 'th.exp', (['log_lam'], {}), '(log_lam)\n', (1072, 1081), True, 'import torch as th\n'), ((3023, 3038), 'torch.exp', 'th.exp', (['log_lam'], {}), '(log_lam)\n', (3029, 3038), True, 'import torch as th\n'), ((3050, 3065), 'torch.exp', 'th.exp', (['log_lam'], {}), '(log_lam)\n', (3056, 3065), True, 'import torch as th\n')] |
from data import *
from utilities import *
from networks import *
import matplotlib.pyplot as plt
import numpy as np
num_known_classes = 65 #25
num_all_classes = 65
def skip(data, label, is_train):
return False
batch_size = 32
def transform(data, label, is_train):
label = one_hot(num_all_classes,label)
data = tl.prepro.crop(data, 224, 224, is_random=is_train)
data = np.transpose(data, [2, 0, 1])
data = np.asarray(data, np.float32) / 255.0
return data, label
ds = FileListDataset('/mnt/datasets/office-home/product_0-64_val.txt', '/mnt/datasets/office-home/', transform=transform, skip_pred=skip, is_train=True, imsize=256)
product = CustomDataLoader(ds, batch_size=batch_size, num_threads=2)
ds = FileListDataset('/mnt/datasets/office-home/real_world_0-64_test.txt', '/mnt/datasets/office-home/', transform=transform, skip_pred=skip, is_train=True, imsize=256)
real_world = CustomDataLoader(ds, batch_size=batch_size, num_threads=2)
ds = FileListDataset('/mnt/datasets/office-home/art_0-64_test.txt', '/mnt/datasets/office-home/', transform=transform, skip_pred=skip, is_train=True, imsize=256)
art = CustomDataLoader(ds, batch_size=batch_size, num_threads=2)
ds = FileListDataset('/mnt/datasets/office-home/clipart_0-64_test.txt', '/mnt/datasets/office-home/', transform=transform, skip_pred=skip, is_train=True, imsize=256)
clipart = CustomDataLoader(ds, batch_size=batch_size, num_threads=2)
setGPU('0')
discriminator_p = Discriminator(n = 25).cuda() # multi-binary classifier
discriminator_p.load_state_dict(torch.load('discriminator_p_office-home.pkl'))
feature_extractor = ResNetFc(model_name='resnet50')
cls = CLS(feature_extractor.output_num(), num_known_classes+1, bottle_neck_dim=256)
net = nn.Sequential(feature_extractor, cls).cuda()
score_pr = []
score_rw = []
score_ar = []
score_cl = []
label_pr = []
label_rw = []
label_ar = []
label_cl = []
def get_score(dataset):
ss = []
ll = []
for (i, (im, label)) in enumerate(dataset.generator()):
im = Variable(torch.from_numpy(im)).cuda()
f, __, __, __ = net.forward(im)
p = discriminator_p.forward(f).cpu().detach().numpy()
ss.append(p)
ll.append(label)
return np.vstack(ss), np.vstack(ll)
score_pr, label_pr = get_score(product)
score_rw, label_rw = get_score(real_world)
score_ar, label_ar = get_score(art)
score_cl, label_cl = get_score(clipart)
filename = "scores_office-home"
np.savez_compressed(filename,
product_score=score_pr, product_label=label_pr,
real_world_score=score_rw, real_world_label=label_rw,
art_score=score_ar, art_label=label_ar,
clipart_score=score_cl, clipart_label=label_cl)
| [
"numpy.savez_compressed",
"numpy.transpose",
"numpy.asarray",
"numpy.vstack"
] | [((2438, 2670), 'numpy.savez_compressed', 'np.savez_compressed', (['filename'], {'product_score': 'score_pr', 'product_label': 'label_pr', 'real_world_score': 'score_rw', 'real_world_label': 'label_rw', 'art_score': 'score_ar', 'art_label': 'label_ar', 'clipart_score': 'score_cl', 'clipart_label': 'label_cl'}), '(filename, product_score=score_pr, product_label=\n label_pr, real_world_score=score_rw, real_world_label=label_rw,\n art_score=score_ar, art_label=label_ar, clipart_score=score_cl,\n clipart_label=label_cl)\n', (2457, 2670), True, 'import numpy as np\n'), ((388, 417), 'numpy.transpose', 'np.transpose', (['data', '[2, 0, 1]'], {}), '(data, [2, 0, 1])\n', (400, 417), True, 'import numpy as np\n'), ((429, 457), 'numpy.asarray', 'np.asarray', (['data', 'np.float32'], {}), '(data, np.float32)\n', (439, 457), True, 'import numpy as np\n'), ((2215, 2228), 'numpy.vstack', 'np.vstack', (['ss'], {}), '(ss)\n', (2224, 2228), True, 'import numpy as np\n'), ((2230, 2243), 'numpy.vstack', 'np.vstack', (['ll'], {}), '(ll)\n', (2239, 2243), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon May 31 15:40:31 2021
@author: jessm
this is comparing the teporal cube slices to their impainted counterparts
"""
import os
import matplotlib.pyplot as plt
import numpy as np
from astropy.table import QTable, Table, Column
from astropy import units as u
dimage=np.load('np_align30.npy')
dthresh=np.load('thresh_a30.npy')
dtable=np.load('table_a30.npy')
#reimage=np.load('np_rebinned5e7.npy')
#rethresh=np.load('thresh5e7.npy')
#retable=np.load('table5e7.npy')
reimage=np.load('inpaint_a30.npy')
rethresh=np.load('thresh_inpaint_a30.npy')
retable=np.load('table_inpaint_a30.npy')
print(dtable.shape, retable.shape)
"""plot"""
fig, axes = plt.subplots(2, 2, figsize=(10,10), sharex=False, sharey=False)
ax = axes.ravel()
ary=ax[0].imshow(dimage, origin='lower', cmap = "YlGnBu_r")
ax[0].set_title('Temporal Cube Slice of MEC Image')
plt.colorbar(ary, ax=ax[0], fraction=0.046, pad=0.04)
imgplt=ax[1].imshow(dimage*dthresh, origin='lower', cmap = "YlGnBu_r")
ax[1].set_title('Masked Temporal MEC Image \nOnly Speckles')
plt.colorbar(imgplt, ax=ax[1], fraction=0.046, pad=0.04)
ary=ax[2].imshow(reimage, origin='lower', cmap = "YlGnBu_r")
ax[2].set_title('Inpainted MEC Image')
plt.colorbar(ary, ax=ax[2], fraction=0.046, pad=0.04)
imgplt=ax[3].imshow(reimage*rethresh, origin='lower', cmap = "YlGnBu_r")
ax[3].set_title('Masked Inpainted MEC Image \nOnly Speckles')
plt.colorbar(imgplt, ax=ax[3], fraction=0.046, pad=0.04)
plt.show()
"""table"""
middle=np.array([0,0,0,0])
retable=np.vstack((np.round(retable, decimals=2)))
dtable=np.vstack((np.round(dtable, decimals=2)))
#print(np.hstack([dname,rename]))
def reshape_rows(array1, array2):
if array1.shape[0] > array2.shape[0]:
resizea2=array2.copy()
resizea2.resize(array1.shape[0], array2.shape[1])
reshaped=np.hstack([array1, resizea2])
return(reshaped)
if array1.shape[0] < array2.shape[0]:
resizea1=array1.copy()
resizea1.resize(array2.shape[0], array1.shape[1])
reshaped=np.hstack([resizea1,array2])
return(reshaped)
else:
reshaped=np.hstack([array1, array2])
return(reshaped)
sidebyside=reshape_rows(retable, dtable)
show=Table(sidebyside, names=('Pixels', 'Speckles', 'Percent', 'Intensity', 'InPixels', 'InSpeckles', 'InPercent', 'InAvg Intensity'))
show.pprint_all()
| [
"astropy.table.Table",
"numpy.hstack",
"matplotlib.pyplot.colorbar",
"numpy.array",
"numpy.load",
"matplotlib.pyplot.subplots",
"numpy.round",
"matplotlib.pyplot.show"
] | [((329, 354), 'numpy.load', 'np.load', (['"""np_align30.npy"""'], {}), "('np_align30.npy')\n", (336, 354), True, 'import numpy as np\n'), ((364, 389), 'numpy.load', 'np.load', (['"""thresh_a30.npy"""'], {}), "('thresh_a30.npy')\n", (371, 389), True, 'import numpy as np\n'), ((398, 422), 'numpy.load', 'np.load', (['"""table_a30.npy"""'], {}), "('table_a30.npy')\n", (405, 422), True, 'import numpy as np\n'), ((546, 572), 'numpy.load', 'np.load', (['"""inpaint_a30.npy"""'], {}), "('inpaint_a30.npy')\n", (553, 572), True, 'import numpy as np\n'), ((583, 616), 'numpy.load', 'np.load', (['"""thresh_inpaint_a30.npy"""'], {}), "('thresh_inpaint_a30.npy')\n", (590, 616), True, 'import numpy as np\n'), ((626, 658), 'numpy.load', 'np.load', (['"""table_inpaint_a30.npy"""'], {}), "('table_inpaint_a30.npy')\n", (633, 658), True, 'import numpy as np\n'), ((724, 788), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(10, 10)', 'sharex': '(False)', 'sharey': '(False)'}), '(2, 2, figsize=(10, 10), sharex=False, sharey=False)\n', (736, 788), True, 'import matplotlib.pyplot as plt\n'), ((925, 978), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['ary'], {'ax': 'ax[0]', 'fraction': '(0.046)', 'pad': '(0.04)'}), '(ary, ax=ax[0], fraction=0.046, pad=0.04)\n', (937, 978), True, 'import matplotlib.pyplot as plt\n'), ((1117, 1173), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['imgplt'], {'ax': 'ax[1]', 'fraction': '(0.046)', 'pad': '(0.04)'}), '(imgplt, ax=ax[1], fraction=0.046, pad=0.04)\n', (1129, 1173), True, 'import matplotlib.pyplot as plt\n'), ((1280, 1333), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['ary'], {'ax': 'ax[2]', 'fraction': '(0.046)', 'pad': '(0.04)'}), '(ary, ax=ax[2], fraction=0.046, pad=0.04)\n', (1292, 1333), True, 'import matplotlib.pyplot as plt\n'), ((1475, 1531), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['imgplt'], {'ax': 'ax[3]', 'fraction': '(0.046)', 'pad': '(0.04)'}), '(imgplt, ax=ax[3], fraction=0.046, pad=0.04)\n', (1487, 1531), True, 'import matplotlib.pyplot as plt\n'), ((1535, 1545), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1543, 1545), True, 'import matplotlib.pyplot as plt\n'), ((1571, 1593), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (1579, 1593), True, 'import numpy as np\n'), ((2325, 2458), 'astropy.table.Table', 'Table', (['sidebyside'], {'names': "('Pixels', 'Speckles', 'Percent', 'Intensity', 'InPixels', 'InSpeckles',\n 'InPercent', 'InAvg Intensity')"}), "(sidebyside, names=('Pixels', 'Speckles', 'Percent', 'Intensity',\n 'InPixels', 'InSpeckles', 'InPercent', 'InAvg Intensity'))\n", (2330, 2458), False, 'from astropy.table import QTable, Table, Column\n'), ((1613, 1642), 'numpy.round', 'np.round', (['retable'], {'decimals': '(2)'}), '(retable, decimals=2)\n', (1621, 1642), True, 'import numpy as np\n'), ((1664, 1692), 'numpy.round', 'np.round', (['dtable'], {'decimals': '(2)'}), '(dtable, decimals=2)\n', (1672, 1692), True, 'import numpy as np\n'), ((1922, 1951), 'numpy.hstack', 'np.hstack', (['[array1, resizea2]'], {}), '([array1, resizea2])\n', (1931, 1951), True, 'import numpy as np\n'), ((2133, 2162), 'numpy.hstack', 'np.hstack', (['[resizea1, array2]'], {}), '([resizea1, array2])\n', (2142, 2162), True, 'import numpy as np\n'), ((2217, 2244), 'numpy.hstack', 'np.hstack', (['[array1, array2]'], {}), '([array1, array2])\n', (2226, 2244), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
def evolution_strategy(
f,
population_size,
sigma,
lr,
initial_params,
num_iters):
# assume initial params is a 1-D array
num_params = len(initial_params)
reward_per_iteration = np.zeros(num_iters)
# Initalise parameters
params = initial_params
# Loop through num_iters
for t in range(num_iters):
# N = noise
N = np.random.randn(population_size, num_params)
# R strores the reward
R = np.zeros(population_size)
# loop through each "offspring" and get reward
for j in range(population_size):
# Add noise to 'parent' to get offspring
params_try = params + sigma*N[j]
# Get reward fir a single offspring
R[j] = f(params_try)
# m = mean reward for all offspring
m = R.mean()
# A = standardised reward for each offspring
A = (R - m) / R.std()
# Store reward progress
reward_per_iteration[t] = m
# Update parameters
update_rate = lr/sigma
for i in range(len(params)):
adjustment = N[:, i] * A
mean_adjustment = np.mean(adjustment)
params[i] = params[i] + update_rate * mean_adjustment
return params, reward_per_iteration
def reward_function(params):
# Aim it to optimise function
x0 = params[0]
x1 = params[1]
x2 = params[2]
return -(x0**2 + 0.1*(x1 - 4)**2 + 0.5*(x2 + 2)**2)
# Call evolution strategy
best_params, rewards = evolution_strategy(
f=reward_function,
population_size=50,
sigma=0.1,
lr=3e-3,
initial_params=np.random.randn(3),
num_iters=500)
# plot the rewards per iteration
plt.plot(rewards)
plt.show()
# final params
print("Final params:", best_params)
| [
"numpy.mean",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.random.randn",
"matplotlib.pyplot.show"
] | [((1739, 1756), 'matplotlib.pyplot.plot', 'plt.plot', (['rewards'], {}), '(rewards)\n', (1747, 1756), True, 'import matplotlib.pyplot as plt\n'), ((1757, 1767), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1765, 1767), True, 'import matplotlib.pyplot as plt\n'), ((267, 286), 'numpy.zeros', 'np.zeros', (['num_iters'], {}), '(num_iters)\n', (275, 286), True, 'import numpy as np\n'), ((434, 478), 'numpy.random.randn', 'np.random.randn', (['population_size', 'num_params'], {}), '(population_size, num_params)\n', (449, 478), True, 'import numpy as np\n'), ((522, 547), 'numpy.zeros', 'np.zeros', (['population_size'], {}), '(population_size)\n', (530, 547), True, 'import numpy as np\n'), ((1668, 1686), 'numpy.random.randn', 'np.random.randn', (['(3)'], {}), '(3)\n', (1683, 1686), True, 'import numpy as np\n'), ((1198, 1217), 'numpy.mean', 'np.mean', (['adjustment'], {}), '(adjustment)\n', (1205, 1217), True, 'import numpy as np\n')] |
import torch
import numpy as np
import pandas as pd
from analysis import generate_model_specs, load_data_as_table
from pathlib import Path
import matplotlib.pyplot as plt
LOGS_DIR = Path('logs')
DATA_DIR = Path('data')
FIG_SIZE = (6, 4)
def plot_by_hyper(df: pd.DataFrame, x_name, y_name, **kwargs):
fig = plt.figure(figsize=kwargs.get('figsize', FIG_SIZE))
ax = fig.add_subplot(1, 1, 1)
for _, grp in df.groupby('run'):
grp.plot(x_name, y_name, ax=ax, linewidth=0.5, **kwargs)
group_by_hyper = df.groupby(x_name)
mu_y, sem_y = group_by_hyper.mean(), group_by_hyper.agg(lambda x: x.std() / np.sqrt(x.count()))
ax.errorbar(df[x_name].unique(), mu_y[y_name], yerr=sem_y[y_name], fmt='-k', marker='.', linewidth=1.5)
ax.legend().remove()
ax.set_ylabel(y_name)
ax.set_title(f"{y_name} vs {x_name}")
if __name__ == "__main__":
l2_model_specs = list(generate_model_specs({'dataset': 'mnist', 'task': 'sup', 'run': range(9),
'l1': 0.0, 'drop': 0.0, 'l2': np.logspace(-5, -1, 9)}))
df_l2_metrics = load_data_as_table(l2_model_specs, ['test_acc'])
plot_by_hyper(df_l2_metrics, 'l2', 'test_acc', logx=True, figsize=(3, 2))
plt.show()
| [
"numpy.logspace",
"analysis.load_data_as_table",
"matplotlib.pyplot.show",
"pathlib.Path"
] | [((184, 196), 'pathlib.Path', 'Path', (['"""logs"""'], {}), "('logs')\n", (188, 196), False, 'from pathlib import Path\n'), ((208, 220), 'pathlib.Path', 'Path', (['"""data"""'], {}), "('data')\n", (212, 220), False, 'from pathlib import Path\n'), ((1098, 1146), 'analysis.load_data_as_table', 'load_data_as_table', (['l2_model_specs', "['test_acc']"], {}), "(l2_model_specs, ['test_acc'])\n", (1116, 1146), False, 'from analysis import generate_model_specs, load_data_as_table\n'), ((1229, 1239), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1237, 1239), True, 'import matplotlib.pyplot as plt\n'), ((1052, 1074), 'numpy.logspace', 'np.logspace', (['(-5)', '(-1)', '(9)'], {}), '(-5, -1, 9)\n', (1063, 1074), True, 'import numpy as np\n')] |
# Copyright (c) 2016, the Cap authors.
#
# This file is subject to the Modified BSD License and may not be distributed
# without copyright and license information. Please refer to the file LICENSE
# for the text and further information on this license.
from matplotlib import pyplot
from numpy import array, append
from h5py import File
from sys import stdout, exit
__all__ = [
'initialize_data',
'report_data',
'save_data',
'plot_data',
'open_file_in_write_mode',
]
def initialize_data():
return {
'time': array([], dtype=float),
'current': array([], dtype=float),
'voltage': array([], dtype=float),
}
def report_data(data, time, device):
data['time'] = append(data['time'], time)
data['current'] = append(data['current'], device.get_current())
data['voltage'] = append(data['voltage'], device.get_voltage())
def save_data(data, path, fout):
fout[path + '/time'] = data['time']
fout[path + '/current'] = data['current']
fout[path + '/voltage'] = data['voltage']
def plot_data(data):
time = data['time']
current = data['current']
voltage = data['voltage']
plot_linewidth = 3
label_fontsize = 30
tick_fontsize = 20
f, axarr = pyplot.subplots(2, sharex=True, figsize=(16, 12))
axarr[0].plot(time, current, lw=plot_linewidth)
axarr[0].set_ylabel(r'$\mathrm{Current\ [A]}$', fontsize=label_fontsize)
# axarr[0].plot(time,1e3*current,lw=plot_linewidth)
# axarr[0].set_ylabel(r'$\mathrm{Current\ [mA]}$',fontsize=label_fontsize)
axarr[0].get_yaxis().set_tick_params(labelsize=tick_fontsize)
axarr[1].plot(time, voltage, lw=plot_linewidth)
axarr[1].set_ylabel(r'$\mathrm{Voltage\ [V]}$', fontsize=label_fontsize)
axarr[1].set_xlabel(r'$\mathrm{Time\ [s]}$', fontsize=label_fontsize)
axarr[1].get_xaxis().set_tick_params(labelsize=tick_fontsize)
axarr[1].get_yaxis().set_tick_params(labelsize=tick_fontsize)
def open_file_in_write_mode(filename):
try:
fout = File(filename, 'w-')
except IOError:
print("file '{0}' already exists...".format(filename))
stdout.write('overwrite it? [Y/n] ')
yes = set(['yes', 'y', ''])
no = set(['no', 'n'])
while True:
answer = raw_input().lower()
if answer in yes:
fout = File(filename, 'w')
break
elif answer in no:
exit(0)
else:
stdout.write("Please respond with 'yes' or 'no'")
return fout
| [
"h5py.File",
"numpy.append",
"numpy.array",
"sys.exit",
"matplotlib.pyplot.subplots",
"sys.stdout.write"
] | [((717, 743), 'numpy.append', 'append', (["data['time']", 'time'], {}), "(data['time'], time)\n", (723, 743), False, 'from numpy import array, append\n'), ((1239, 1288), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['(2)'], {'sharex': '(True)', 'figsize': '(16, 12)'}), '(2, sharex=True, figsize=(16, 12))\n', (1254, 1288), False, 'from matplotlib import pyplot\n'), ((543, 565), 'numpy.array', 'array', (['[]'], {'dtype': 'float'}), '([], dtype=float)\n', (548, 565), False, 'from numpy import array, append\n'), ((586, 608), 'numpy.array', 'array', (['[]'], {'dtype': 'float'}), '([], dtype=float)\n', (591, 608), False, 'from numpy import array, append\n'), ((629, 651), 'numpy.array', 'array', (['[]'], {'dtype': 'float'}), '([], dtype=float)\n', (634, 651), False, 'from numpy import array, append\n'), ((2023, 2043), 'h5py.File', 'File', (['filename', '"""w-"""'], {}), "(filename, 'w-')\n", (2027, 2043), False, 'from h5py import File\n'), ((2135, 2171), 'sys.stdout.write', 'stdout.write', (['"""overwrite it? [Y/n] """'], {}), "('overwrite it? [Y/n] ')\n", (2147, 2171), False, 'from sys import stdout, exit\n'), ((2352, 2371), 'h5py.File', 'File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (2356, 2371), False, 'from h5py import File\n'), ((2441, 2448), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (2445, 2448), False, 'from sys import stdout, exit\n'), ((2483, 2532), 'sys.stdout.write', 'stdout.write', (['"""Please respond with \'yes\' or \'no\'"""'], {}), '("Please respond with \'yes\' or \'no\'")\n', (2495, 2532), False, 'from sys import stdout, exit\n')] |
from typing import Dict, List, Any
import numpy as np
from overrides import overrides
from .instance import TextInstance, IndexedInstance
from ..data_indexer import DataIndexer
class QuestionPassageInstance(TextInstance):
"""
A QuestionPassageInstance is a base class for datasets that consist primarily of a question
text and a passage, where the passage contains the answer to the question. This class should
not be used directly due to the missing ``_index_label`` function, use a subclass instead.
"""
def __init__(self, question_text: str, passage_text: str, label: Any, index: int=None):
super(QuestionPassageInstance, self).__init__(label, index)
self.question_text = question_text
self.passage_text = passage_text
def __str__(self):
return ('QuestionPassageInstance(' + self.question_text +
', ' + self.passage_text + ', ' +
str(self.label) + ')')
@overrides
def words(self) -> Dict[str, List[str]]:
words = self._words_from_text(self.question_text)
passage_words = self._words_from_text(self.passage_text)
for namespace in words:
words[namespace].extend(passage_words[namespace])
return words
def _index_label(self, label: Any) -> List[int]:
"""
Index the labels. Since we don't know what form the label takes,
we leave it to subclasses to implement this method.
"""
raise NotImplementedError
@overrides
def to_indexed_instance(self, data_indexer: DataIndexer):
question_indices = self._index_text(self.question_text, data_indexer)
passage_indices = self._index_text(self.passage_text, data_indexer)
label_indices = self._index_label(self.label)
return IndexedQuestionPassageInstance(question_indices,
passage_indices, label_indices,
self.index)
class IndexedQuestionPassageInstance(IndexedInstance):
"""
This is an indexed instance that is used for (question, passage) pairs.
"""
def __init__(self,
question_indices: List[int],
passage_indices: List[int],
label: List[int],
index: int=None):
super(IndexedQuestionPassageInstance, self).__init__(label, index)
self.question_indices = question_indices
self.passage_indices = passage_indices
@classmethod
@overrides
def empty_instance(cls):
return IndexedQuestionPassageInstance([], [], label=None, index=None)
@overrides
def get_lengths(self) -> Dict[str, int]:
"""
We need to pad at least the question length, the passage length, and the
word length across all the questions and passages. Subclasses that
add more arguments should also override this method to enable padding on said
arguments.
"""
question_lengths = self._get_word_sequence_lengths(self.question_indices)
passage_lengths = self._get_word_sequence_lengths(self.passage_indices)
lengths = {}
# the number of words to pad the question to
lengths['num_question_words'] = question_lengths['num_sentence_words']
# the number of words to pad the passage to
lengths['num_passage_words'] = passage_lengths['num_sentence_words']
if 'num_word_characters' in question_lengths and 'num_word_characters' in passage_lengths:
# the length of the longest word across the passage and question
lengths['num_word_characters'] = max(question_lengths['num_word_characters'],
passage_lengths['num_word_characters'])
return lengths
@overrides
def pad(self, max_lengths: Dict[str, int]):
"""
In this function, we pad the questions and passages (in terms of number of words in each),
as well as the individual words in the questions and passages themselves.
"""
max_lengths_tmp = max_lengths.copy()
max_lengths_tmp['num_sentence_words'] = max_lengths_tmp['num_question_words']
self.question_indices = self.pad_word_sequence(self.question_indices, max_lengths_tmp)
max_lengths_tmp['num_sentence_words'] = max_lengths_tmp['num_passage_words']
self.passage_indices = self.pad_word_sequence(self.passage_indices, max_lengths_tmp,
truncate_from_right=False)
@overrides
def as_training_data(self):
question_array = np.asarray(self.question_indices, dtype='int32')
passage_array = np.asarray(self.passage_indices, dtype='int32')
return (question_array, passage_array), np.asarray(self.label)
| [
"numpy.asarray"
] | [((4624, 4672), 'numpy.asarray', 'np.asarray', (['self.question_indices'], {'dtype': '"""int32"""'}), "(self.question_indices, dtype='int32')\n", (4634, 4672), True, 'import numpy as np\n'), ((4697, 4744), 'numpy.asarray', 'np.asarray', (['self.passage_indices'], {'dtype': '"""int32"""'}), "(self.passage_indices, dtype='int32')\n", (4707, 4744), True, 'import numpy as np\n'), ((4793, 4815), 'numpy.asarray', 'np.asarray', (['self.label'], {}), '(self.label)\n', (4803, 4815), True, 'import numpy as np\n')] |
# %%
"""
Tests of the encoder classes
"""
import numpy as np
import pytest
import gym
from gym_physx.envs.shaping import PlanBasedShaping
from gym_physx.encoders.config_encoder import ConfigEncoder
from gym_physx.wrappers import DesiredGoalEncoder
@pytest.mark.parametrize("n_trials", [20])
@pytest.mark.parametrize("fixed_finger_initial_position", [True, False])
@pytest.mark.parametrize("komo_plans", [True, False])
def test_config_encoder(n_trials, fixed_finger_initial_position, komo_plans):
"""
Test the ConfigEncoder class
"""
env = gym.make(
'gym_physx:physx-pushing-v0',
plan_based_shaping=PlanBasedShaping(shaping_mode='relaxed', width=0.5),
fixed_initial_config=None,
fixed_finger_initial_position=fixed_finger_initial_position,
plan_generator=None,
komo_plans=komo_plans
)
encoder = ConfigEncoder(
env.box_xy_min, env.box_xy_max,
env.plan_length, env.dim_plan,
fixed_finger_initial_position,
0 # always use n_keyframes=0 here
)
env = DesiredGoalEncoder(env, encoder)
for _ in range(n_trials):
observation = env.reset()
observation, _, _, info = env.step(env.action_space.sample())
assert env.observation_space.contains(observation)
if fixed_finger_initial_position:
assert observation['desired_goal'].shape == (4,)
assert np.all(observation['desired_goal'][:2] == info[
"original_plan"
].reshape(env.plan_length, env.dim_plan)[0, 3:5])
assert np.all(observation['desired_goal'][2:] == info[
"original_plan"
].reshape(env.plan_length, env.dim_plan)[-1, 3:5])
else:
assert observation['desired_goal'].shape == (6,)
assert np.all(observation['desired_goal'][:2] == info[
"original_plan"
].reshape(env.plan_length, env.dim_plan)[0, :2])
assert np.all(observation['desired_goal'][2:4] == info[
"original_plan"
].reshape(env.plan_length, env.dim_plan)[0, 3:5])
assert np.all(observation['desired_goal'][4:] == info[
"original_plan"
].reshape(env.plan_length, env.dim_plan)[-1, 3:5])
@pytest.mark.parametrize("n_trials", [100])
@pytest.mark.parametrize("fixed_finger_initial_position", [True, False])
@pytest.mark.parametrize("n_keyframes", [0, 1, 2, 3, 4, 5])
def test_reconstruction_from_config_encoding(
n_trials,
fixed_finger_initial_position,
n_keyframes
):
"""
Proof that there is a function (reconstruct_plan(encoding))
using which it is always possible to reconstruct the plan
from the config encoding
"""
env = gym.make(
'gym_physx:physx-pushing-v0',
plan_based_shaping=PlanBasedShaping(shaping_mode='relaxed', width=0.5),
fixed_initial_config=None,
fixed_finger_initial_position=fixed_finger_initial_position,
plan_generator=None,
komo_plans=False,
n_keyframes=n_keyframes,
plan_length=50*(1+n_keyframes)
)
encoder = ConfigEncoder(
env.box_xy_min, env.box_xy_max,
env.plan_length, env.dim_plan,
fixed_finger_initial_position,
n_keyframes
)
for _ in range(n_trials):
obs = env.reset()
encoding = encoder.encode(obs['desired_goal'])
# reconstruct plan only from encoding (and experiment parameters)
reconstructed_plan = reconstruct_plan(
encoding,
fixed_finger_initial_position,
n_keyframes
)
# assert correct reconstruction
assert np.max(np.abs(reconstructed_plan - obs['desired_goal'])) < 1e-14
def reconstruct_plan(
encoding,
fixed_finger_initial_position,
n_keyframes
):
"""
reconstruct plan from encoding
"""
# Quick check
assert len(encoding) == 2*n_keyframes + (
4 if fixed_finger_initial_position else 6
)
# this env is freshly created only to access its methods.
new_env = gym.make(
'gym_physx:physx-pushing-v0',
plan_based_shaping=PlanBasedShaping(shaping_mode='relaxed', width=0.5),
fixed_initial_config=None,
fixed_finger_initial_position=fixed_finger_initial_position,
plan_generator=None,
komo_plans=False,
n_keyframes=n_keyframes,
plan_length=50*(1+n_keyframes)
)
# extract config from encoding
finger_position = np.array(
[0, 0]) if fixed_finger_initial_position else encoding[:2]
finger_position = np.array(
list(finger_position) + [0.64]
)
offset = 0 if fixed_finger_initial_position else 2
box_initial_position = encoding[offset:2+offset]
box_goal_position = encoding[2+offset:4+offset]
relevant_intermediate_frames = [
encoding[4+offset+2*index:4+offset+2*index+2]
for index in range(n_keyframes)
]
# compile keyframes
keyframes = [np.array(list(box_initial_position) + [new_env.floor_level])]
for int_frame in relevant_intermediate_frames:
keyframes.append(np.array(list(int_frame) + [new_env.floor_level]))
# append goal twice
keyframes.append(np.array(list(box_goal_position) + [new_env.floor_level]))
keyframes.append(np.array(list(box_goal_position) + [new_env.floor_level]))
# and treat second-to-last frame
if n_keyframes == 0:
# in this case, the first push is along the longest
# direction
first_dir = np.argmax(
np.abs(keyframes[-1] - keyframes[0])
)
assert first_dir in [0, 1]
second_dir = 0 if first_dir == 1 else 1
keyframes[-2][first_dir] = keyframes[-1][first_dir]
keyframes[-2][second_dir] = keyframes[0][second_dir]
else:
# in this case, the second-to-last push is
# perpendicular to the third-to-last
direction = 1 if keyframes[-4][0] == keyframes[-3][0] else 0
keyframes[-2][direction] = keyframes[-3][direction]
# create waypoints
waypoints = np.array(
new_env._get_waypoints(finger_position, keyframes) # pylint: disable=protected-access
)
# return plan
return new_env._densify_waypoints(waypoints) # pylint: disable=protected-access
| [
"numpy.abs",
"gym_physx.wrappers.DesiredGoalEncoder",
"pytest.mark.parametrize",
"numpy.array",
"gym_physx.encoders.config_encoder.ConfigEncoder",
"gym_physx.envs.shaping.PlanBasedShaping"
] | [((251, 292), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_trials"""', '[20]'], {}), "('n_trials', [20])\n", (274, 292), False, 'import pytest\n'), ((294, 365), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fixed_finger_initial_position"""', '[True, False]'], {}), "('fixed_finger_initial_position', [True, False])\n", (317, 365), False, 'import pytest\n'), ((367, 419), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""komo_plans"""', '[True, False]'], {}), "('komo_plans', [True, False])\n", (390, 419), False, 'import pytest\n'), ((2277, 2319), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_trials"""', '[100]'], {}), "('n_trials', [100])\n", (2300, 2319), False, 'import pytest\n'), ((2321, 2392), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fixed_finger_initial_position"""', '[True, False]'], {}), "('fixed_finger_initial_position', [True, False])\n", (2344, 2392), False, 'import pytest\n'), ((2394, 2452), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_keyframes"""', '[0, 1, 2, 3, 4, 5]'], {}), "('n_keyframes', [0, 1, 2, 3, 4, 5])\n", (2417, 2452), False, 'import pytest\n'), ((869, 983), 'gym_physx.encoders.config_encoder.ConfigEncoder', 'ConfigEncoder', (['env.box_xy_min', 'env.box_xy_max', 'env.plan_length', 'env.dim_plan', 'fixed_finger_initial_position', '(0)'], {}), '(env.box_xy_min, env.box_xy_max, env.plan_length, env.dim_plan,\n fixed_finger_initial_position, 0)\n', (882, 983), False, 'from gym_physx.encoders.config_encoder import ConfigEncoder\n'), ((1061, 1093), 'gym_physx.wrappers.DesiredGoalEncoder', 'DesiredGoalEncoder', (['env', 'encoder'], {}), '(env, encoder)\n', (1079, 1093), False, 'from gym_physx.wrappers import DesiredGoalEncoder\n'), ((3139, 3263), 'gym_physx.encoders.config_encoder.ConfigEncoder', 'ConfigEncoder', (['env.box_xy_min', 'env.box_xy_max', 'env.plan_length', 'env.dim_plan', 'fixed_finger_initial_position', 'n_keyframes'], {}), '(env.box_xy_min, env.box_xy_max, env.plan_length, env.dim_plan,\n fixed_finger_initial_position, n_keyframes)\n', (3152, 3263), False, 'from gym_physx.encoders.config_encoder import ConfigEncoder\n'), ((4526, 4542), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (4534, 4542), True, 'import numpy as np\n'), ((632, 683), 'gym_physx.envs.shaping.PlanBasedShaping', 'PlanBasedShaping', ([], {'shaping_mode': '"""relaxed"""', 'width': '(0.5)'}), "(shaping_mode='relaxed', width=0.5)\n", (648, 683), False, 'from gym_physx.envs.shaping import PlanBasedShaping\n'), ((2835, 2886), 'gym_physx.envs.shaping.PlanBasedShaping', 'PlanBasedShaping', ([], {'shaping_mode': '"""relaxed"""', 'width': '(0.5)'}), "(shaping_mode='relaxed', width=0.5)\n", (2851, 2886), False, 'from gym_physx.envs.shaping import PlanBasedShaping\n'), ((4178, 4229), 'gym_physx.envs.shaping.PlanBasedShaping', 'PlanBasedShaping', ([], {'shaping_mode': '"""relaxed"""', 'width': '(0.5)'}), "(shaping_mode='relaxed', width=0.5)\n", (4194, 4229), False, 'from gym_physx.envs.shaping import PlanBasedShaping\n'), ((5579, 5615), 'numpy.abs', 'np.abs', (['(keyframes[-1] - keyframes[0])'], {}), '(keyframes[-1] - keyframes[0])\n', (5585, 5615), True, 'import numpy as np\n'), ((3694, 3742), 'numpy.abs', 'np.abs', (["(reconstructed_plan - obs['desired_goal'])"], {}), "(reconstructed_plan - obs['desired_goal'])\n", (3700, 3742), True, 'import numpy as np\n')] |
import os
import wget
import glob
import shutil
import zipfile
import tempfile
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow import keras
from keras.models import Model, load_model
from keras.layers import Dense, GlobalAveragePooling2D, Dropout
from keras.preprocessing.image import ImageDataGenerator, load_img
from keras.applications.inception_v3 import preprocess_input
from .plot import plot_image, plot_images, plot_train_history
tempdir = tempfile.gettempdir()
def download(url):
basename = os.path.basename(url)
dest = os.path.join(tempdir, basename)
if not os.path.exists(dest):
wget.download(url, dest)
return dest
def load_dataset(dataset='cats-and-dogs'):
if not os.path.exists('datasets'):
os.mkdir('datasets')
if dataset == 'cats-and-dogs':
zip_file = download('https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip')
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
zip_ref.extractall('datasets/')
os.remove('datasets/PetImages/Cat/666.jpg')
os.remove('datasets/PetImages/Dog/11702.jpg')
cat_paths = glob.glob('datasets/PetImages/Cat/*.jpg')
dog_paths = glob.glob('datasets/PetImages/Dog/*.jpg')
class_names = ['Cat', 'Dog']
return cat_paths, dog_paths, class_names
def split_train_test(cat_paths, dog_paths, train_ratio=0.7):
cats_train, cats_test = train_test_split(cat_paths, test_size=1-train_ratio)
dogs_train, dogs_test = train_test_split(dog_paths, test_size=1-train_ratio)
folders = ['train', 'test', 'train/Cat', 'train/Dog', 'test/Cat', 'test/Dog']
for folder in folders:
if not os.path.exists(folder):
os.mkdir(folder)
for cat_train in cats_train:
shutil.move(cat_train, 'train/Cat')
for dog_train in dogs_train:
shutil.move(dog_train, 'train/Dog')
for cat_test in cats_test:
shutil.move(cat_test, 'test/Cat')
for dog_test in dogs_test:
shutil.move(dog_test, 'test/Dog')
return 'train', 'test'
def create_inception3_model(n_classes):
base_model = keras.applications.InceptionV3(weights='imagenet', include_top=False)
for layer in base_model.layers:
layer.trainable = False
x = Dropout(0.4)(GlobalAveragePooling2D(name='avg_pool')(base_model.output))
predictions = Dense(n_classes, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
return model
def predict_inception3_model(model, X):
x = np.expand_dims(X, axis=0)
x = keras.applications.inception_v3.preprocess_input(x)
preds = model.predict(x)
return preds[0]
def create_mobilenet2_model(n_classes):
base_model = keras.applications.MobileNetV2(input_shape=(160, 160, 3), weights='imagenet', include_top=False)
base_model.trainable = False
x = GlobalAveragePooling2D(name='avg_pool')(base_model.output)
predictions = Dense(n_classes, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
return model
def predict_mobilenet2_model(model, X):
x = np.expand_dims(X, axis=0)
x = keras.applications.mobilenet_v2.preprocess_input(x)
preds = model.predict(x)
return preds[0]
class ClassificationModel:
def __init__(self, name, n_classes):
self.name = name
self.n_classes = n_classes
if self.name == 'inception3':
self.model = create_inception3_model(self.n_classes)
elif self.name == 'mobilenet2':
self.model = create_mobilenet2_model(self.n_classes)
def preprocess_input(self, x):
if self.name == 'inception3':
return keras.applications.inception_v3.preprocess_input(x)
elif self.name == 'mobilenet2':
return keras.applications.mobilenet_v2.preprocess_input(x)
def train(self, train_generator, validation_generator, epochs=5):
# return model.fit(train_generator, epochs=epochs, steps_per_epoch=320, validation_data=validation_generator, validation_steps=60)
self.history = self.model.fit(train_generator, epochs=epochs, validation_data=validation_generator)
return self.history
def predict(self, X):
if self.name == 'inception3':
return predict_inception3_model(self.model, X)
elif self.name == 'mobilenet2':
return predict_mobilenet2_model(self.model, X)
def plot_history(self):
if self.history:
plot_train_history(self.history)
def save(self, filename='model.h5'):
self.model.save(filename)
def load(self, filename='model.h5'):
self.model = load_model(filename)
def create_model(name, n_classes):
"""Create a new model: inception3 or mobilenet2"""
return ClassificationModel(name, n_classes)
| [
"wget.download",
"zipfile.ZipFile",
"keras.layers.Dense",
"os.remove",
"os.path.exists",
"shutil.move",
"keras.models.Model",
"os.mkdir",
"keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.applications.MobileNetV2",
"glob.glob",
"tensorflow.keras.applications.InceptionV3",
"sklearn.mod... | [((491, 512), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (510, 512), False, 'import tempfile\n'), ((549, 570), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (565, 570), False, 'import os\n'), ((582, 613), 'os.path.join', 'os.path.join', (['tempdir', 'basename'], {}), '(tempdir, basename)\n', (594, 613), False, 'import os\n'), ((1493, 1547), 'sklearn.model_selection.train_test_split', 'train_test_split', (['cat_paths'], {'test_size': '(1 - train_ratio)'}), '(cat_paths, test_size=1 - train_ratio)\n', (1509, 1547), False, 'from sklearn.model_selection import train_test_split\n'), ((1574, 1628), 'sklearn.model_selection.train_test_split', 'train_test_split', (['dog_paths'], {'test_size': '(1 - train_ratio)'}), '(dog_paths, test_size=1 - train_ratio)\n', (1590, 1628), False, 'from sklearn.model_selection import train_test_split\n'), ((2190, 2259), 'tensorflow.keras.applications.InceptionV3', 'keras.applications.InceptionV3', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (2220, 2259), False, 'from tensorflow import keras\n'), ((2481, 2532), 'keras.models.Model', 'Model', ([], {'inputs': 'base_model.input', 'outputs': 'predictions'}), '(inputs=base_model.input, outputs=predictions)\n', (2486, 2532), False, 'from keras.models import Model, load_model\n'), ((2694, 2719), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (2708, 2719), True, 'import numpy as np\n'), ((2728, 2779), 'tensorflow.keras.applications.inception_v3.preprocess_input', 'keras.applications.inception_v3.preprocess_input', (['x'], {}), '(x)\n', (2776, 2779), False, 'from tensorflow import keras\n'), ((2888, 2989), 'tensorflow.keras.applications.MobileNetV2', 'keras.applications.MobileNetV2', ([], {'input_shape': '(160, 160, 3)', 'weights': '"""imagenet"""', 'include_top': '(False)'}), "(input_shape=(160, 160, 3), weights=\n 'imagenet', include_top=False)\n", (2918, 2989), False, 'from tensorflow import keras\n'), ((3157, 3208), 'keras.models.Model', 'Model', ([], {'inputs': 'base_model.input', 'outputs': 'predictions'}), '(inputs=base_model.input, outputs=predictions)\n', (3162, 3208), False, 'from keras.models import Model, load_model\n'), ((3370, 3395), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (3384, 3395), True, 'import numpy as np\n'), ((3404, 3455), 'tensorflow.keras.applications.mobilenet_v2.preprocess_input', 'keras.applications.mobilenet_v2.preprocess_input', (['x'], {}), '(x)\n', (3452, 3455), False, 'from tensorflow import keras\n'), ((625, 645), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (639, 645), False, 'import os\n'), ((655, 679), 'wget.download', 'wget.download', (['url', 'dest'], {}), '(url, dest)\n', (668, 679), False, 'import wget\n'), ((752, 778), 'os.path.exists', 'os.path.exists', (['"""datasets"""'], {}), "('datasets')\n", (766, 778), False, 'import os\n'), ((788, 808), 'os.mkdir', 'os.mkdir', (['"""datasets"""'], {}), "('datasets')\n", (796, 808), False, 'import os\n'), ((1094, 1137), 'os.remove', 'os.remove', (['"""datasets/PetImages/Cat/666.jpg"""'], {}), "('datasets/PetImages/Cat/666.jpg')\n", (1103, 1137), False, 'import os\n'), ((1146, 1191), 'os.remove', 'os.remove', (['"""datasets/PetImages/Dog/11702.jpg"""'], {}), "('datasets/PetImages/Dog/11702.jpg')\n", (1155, 1191), False, 'import os\n'), ((1212, 1253), 'glob.glob', 'glob.glob', (['"""datasets/PetImages/Cat/*.jpg"""'], {}), "('datasets/PetImages/Cat/*.jpg')\n", (1221, 1253), False, 'import glob\n'), ((1274, 1315), 'glob.glob', 'glob.glob', (['"""datasets/PetImages/Dog/*.jpg"""'], {}), "('datasets/PetImages/Dog/*.jpg')\n", (1283, 1315), False, 'import glob\n'), ((1845, 1880), 'shutil.move', 'shutil.move', (['cat_train', '"""train/Cat"""'], {}), "(cat_train, 'train/Cat')\n", (1856, 1880), False, 'import shutil\n'), ((1922, 1957), 'shutil.move', 'shutil.move', (['dog_train', '"""train/Dog"""'], {}), "(dog_train, 'train/Dog')\n", (1933, 1957), False, 'import shutil\n'), ((1997, 2030), 'shutil.move', 'shutil.move', (['cat_test', '"""test/Cat"""'], {}), "(cat_test, 'test/Cat')\n", (2008, 2030), False, 'import shutil\n'), ((2070, 2103), 'shutil.move', 'shutil.move', (['dog_test', '"""test/Dog"""'], {}), "(dog_test, 'test/Dog')\n", (2081, 2103), False, 'import shutil\n'), ((2336, 2348), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (2343, 2348), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Dropout\n'), ((2427, 2465), 'keras.layers.Dense', 'Dense', (['n_classes'], {'activation': '"""softmax"""'}), "(n_classes, activation='softmax')\n", (2432, 2465), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Dropout\n'), ((3026, 3065), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {'name': '"""avg_pool"""'}), "(name='avg_pool')\n", (3048, 3065), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Dropout\n'), ((3103, 3141), 'keras.layers.Dense', 'Dense', (['n_classes'], {'activation': '"""softmax"""'}), "(n_classes, activation='softmax')\n", (3108, 3141), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Dropout\n'), ((4910, 4930), 'keras.models.load_model', 'load_model', (['filename'], {}), '(filename)\n', (4920, 4930), False, 'from keras.models import Model, load_model\n'), ((999, 1029), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_file', '"""r"""'], {}), "(zip_file, 'r')\n", (1014, 1029), False, 'import zipfile\n'), ((1751, 1773), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (1765, 1773), False, 'import os\n'), ((1787, 1803), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (1795, 1803), False, 'import os\n'), ((2349, 2388), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {'name': '"""avg_pool"""'}), "(name='avg_pool')\n", (2371, 2388), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Dropout\n'), ((3936, 3987), 'tensorflow.keras.applications.inception_v3.preprocess_input', 'keras.applications.inception_v3.preprocess_input', (['x'], {}), '(x)\n', (3984, 3987), False, 'from tensorflow import keras\n'), ((4047, 4098), 'tensorflow.keras.applications.mobilenet_v2.preprocess_input', 'keras.applications.mobilenet_v2.preprocess_input', (['x'], {}), '(x)\n', (4095, 4098), False, 'from tensorflow import keras\n')] |
"""
Defines some useful utilities for plotting the evolution of a Resonator Network
"""
import copy
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.lines import Line2D
from utils.encoding_decoding import cosine_sim
class LiveResonatorPlot(object):
"""
A container for a Matplotlib plot we'll use for live visualization
Parameters
----------
plot_type : str
One of {'vec_img_viz', 'sim_bar_plots'}. Specifies which plot to
show. The 'vec_img_viz' plots just display the target and current states
as images. The 'sim_bar_plots' plot shows the similarity between each of
the factors and the codebook as well as displays the similarity between
the model's estimate of the composite vector and the composite vector
itself.
target_vectors : dictionary
Contains the target vectors for each factor.
factor_ordering : list
Order to display the factors in. Just nice for consistent plotting
query_vec : ndarray, optional
The original composite vector given as a query to the Resonator Network.
It will usually be the product of target vectors, but in general can
have some additional noise corruption so we provide this as an additional
input. Only necessary for the 'sim_bar_plots' type plot.
codebooks: dict, optional.
Keys label the factors and values are the corresponsing codebooks.
Only necessary for the 'sim_bar_plots' type plot.
image_size : (int, int), optional
If applicable, dimensions of image visualization for vectors (in pixels).
Only necessary for the 'vec_img_viz' type plot.
"""
def __init__(self, plot_type, target_vectors, factor_ordering,
query_vec=None, codebooks=None, image_size=None):
assert plot_type in ['vec_img_viz', 'sim_bar_plots']
self.vec_size = len(
target_vectors[np.random.choice(list(target_vectors.keys()))])
self.factor_ordering = factor_ordering
self.plot_type = plot_type
plt.ion()
if plot_type == 'sim_bar_plots':
assert codebooks is not None, 'please provide the codebooks as input'
assert query_vec is not None, 'please provide the query vec as input'
self.codebooks = copy.deepcopy(codebooks)
self.query_vec = copy.deepcopy(query_vec)
self.target_vectors = copy.deepcopy(target_vectors)
self.barplot_refs = [] # hold references to the BarContainer objects
# some constants to get GridSpec working for us
mjm = 0.075 # {m}a{j}or {m}argin
mnm = 1.0 # {m}i{n}or {m}argin
vert_margin = 0.08
horz_margin = 0.1
gs_height = (((1.0 - 2*vert_margin) -
(mjm * (len(self.factor_ordering) - 1))) /
len(self.factor_ordering))
fig = plt.figure(figsize=(15, 12))
tab10colors = plt.get_cmap('tab10').colors
for fac_idx in range(len(self.factor_ordering)):
factor_label = self.factor_ordering[fac_idx]
gs = GridSpec(6, 30)
t = (1.0 - vert_margin) - fac_idx*gs_height - fac_idx*mjm
gs.update(top=t, bottom=t - gs_height, left=horz_margin,
right=1.0-horz_margin, hspace=mnm, wspace=8*mnm)
num_in_codebook = self.codebooks[factor_label].shape[1]
# current states
t_ax = plt.subplot(gs[:3, :18])
self.barplot_refs.append(t_ax.bar(np.arange(num_in_codebook),
np.zeros((num_in_codebook,)),
color=tab10colors[fac_idx], width=1))
t_ax.spines['top'].set_visible(False)
t_ax.spines['right'].set_visible(False)
t_ax.get_xaxis().set_ticks([])
t_ax.get_yaxis().set_ticks([-1.0, 0.0, 1.0])
t_ax.set_ylabel('Similarity', fontsize=12)
t_ax.set_ylim(-1, 1)
t_ax.yaxis.set_tick_params(labelsize=12)
t_ax.text(0.02, 0.95, 'Current State', horizontalalignment='left',
verticalalignment='top', transform=t_ax.transAxes,
color='k', fontsize=14)
if fac_idx == 0:
t_ax.set_title('Current state of each factor', fontsize=18)
# target similarity plot
t_ax = plt.subplot(gs[3:, :18])
target_csim = cosine_sim(
target_vectors[factor_label],
self.codebooks[factor_label])
t_ax.bar(np.arange(num_in_codebook), target_csim,
color=tab10colors[fac_idx], width=1)
t_ax.spines['top'].set_visible(False)
t_ax.spines['right'].set_visible(False)
t_ax.set_xlabel('Index in codebook', fontsize=12)
t_ax.set_ylabel('Similarity', fontsize=12)
t_ax.get_yaxis().set_ticks([-1.0, 0.0, 1.0])
t_ax.text(0.02, 0.95, 'Target State', horizontalalignment='left',
verticalalignment='top', transform=t_ax.transAxes,
color='k', fontsize=14)
if num_in_codebook > 10:
t_ax.get_xaxis().set_ticks(
np.arange(0, num_in_codebook, np.rint(num_in_codebook/10)))
else:
t_ax.get_xaxis().set_ticks(np.arange(num_in_codebook))
t_ax.xaxis.set_tick_params(labelsize=12)
t_ax.yaxis.set_tick_params(labelsize=12)
# similarity between query composite and current estimated composite
gs = GridSpec(3, 30)
t_ax = plt.subplot(gs[1:2, 22:])
# similarities to target
self.lineplot_ref = Line2D([], [], color='k', linewidth=3)
self.total_sim_saved = []
t_ax.add_line(self.lineplot_ref)
t_ax.set_ylim(-1.25, 1.25)
t_ax.set_xlim(0, 20) # we'll have to update the axis every ten steps
t_ax.set_title(r'Similarity between $\mathbf{c}$ and $\hat{\mathbf{c}}$',
fontsize=18)
t_ax.set_xlabel('Iteration number', fontsize=14)
t_ax.set_ylabel('Cosine Similarity', fontsize=14)
t_ax.xaxis.set_tick_params(labelsize=12)
t_ax.yaxis.set_tick_params(labelsize=12)
t_ax.yaxis.set_ticks([-1, 0, 1])
t_ax.spines['top'].set_visible(False)
t_ax.spines['right'].set_visible(False)
self.sim_plot_ax_ref = t_ax
if plot_type == 'vec_img_viz':
if image_size is None:
# we assume that vector is a square number and we will display
# as a square image
assert np.sqrt(self.vec_size) % 1 == 0
self.image_size = (int(np.sqrt(self.vec_size)),
int(np.sqrt(self.vec_size)))
else:
self.image_size = image_size
self.fig, self.axes = plt.subplots(
len(factor_ordering), 2, figsize=(10, 15))
self.fig.suptitle('Resonator State', fontsize='15')
self.im_refs = []
for fac_idx in range(len(self.factor_ordering)):
factor_label = self.factor_ordering[fac_idx]
self.im_refs.append([])
maxval = np.max(target_vectors[factor_label])
minval = np.min(target_vectors[factor_label])
targ_im = self.axes[fac_idx][0].imshow(
np.reshape(target_vectors[factor_label],
self.image_size), cmap='gray', vmin=minval, vmax=maxval)
self.axes[fac_idx][0].set_title(
'Target vector for ' + factor_label)
self.axes[fac_idx][0].axis('off')
self.im_refs[fac_idx].append(targ_im)
res_im = self.axes[fac_idx][1].imshow(
np.zeros(self.image_size), cmap='gray', vmin=minval, vmax=maxval)
self.axes[fac_idx][1].set_title(
'Current state for ' + factor_label)
self.axes[fac_idx][1].axis('off')
self.im_refs[fac_idx].append(res_im)
plt.show(block=False)
plt.draw()
def UpdatePlot(self, current_state, wait_interval=0.001):
if self.plot_type == 'sim_bar_plots':
for f_idx in range(len(self.factor_ordering)):
csim = cosine_sim(current_state[self.factor_ordering[f_idx]],
self.codebooks[self.factor_ordering[f_idx]])
# really slow, should find a faster visualization solution
for rect, ht in zip(self.barplot_refs[f_idx], csim):
rect.set_height(ht)
composite_est = np.product(np.array(
[current_state[x] for x in current_state]), axis=0)
self.total_sim_saved.append(cosine_sim(self.query_vec, composite_est))
self.lineplot_ref.set_data(
np.arange(len(self.total_sim_saved)), self.total_sim_saved)
if len(self.total_sim_saved) % 20 == 0:
self.sim_plot_ax_ref.set_xlim(0, len(self.total_sim_saved) + 20)
if (len(self.total_sim_saved) > 1 and
not np.isclose(self.total_sim_saved[-1], self.total_sim_saved[-2])):
if self.total_sim_saved[-1] < self.total_sim_saved[-2]:
self.lineplot_ref.set_color('r')
else:
self.lineplot_ref.set_color('k')
else:
for f_idx in range(len(self.factor_ordering)):
self.im_refs[f_idx][-1].set_data(
np.reshape(current_state[self.factor_ordering[f_idx]],
self.image_size))
pause_without_refocus(wait_interval)
#^ we could get a LOT faster plotting my using something other than
# plt.pause but this is quick an dirty...
def ClosePlot(self):
plt.close()
# plus any other cleanup we may need
def pause_without_refocus(interval):
backend = plt.rcParams['backend']
if backend in matplotlib.rcsetup.interactive_bk:
figManager = matplotlib._pylab_helpers.Gcf.get_active()
if figManager is not None:
canvas = figManager.canvas
if canvas.figure.stale:
canvas.draw()
canvas.start_event_loop(interval)
return
| [
"numpy.sqrt",
"numpy.array",
"utils.encoding_decoding.cosine_sim",
"copy.deepcopy",
"matplotlib.lines.Line2D",
"numpy.arange",
"numpy.reshape",
"numpy.max",
"matplotlib._pylab_helpers.Gcf.get_active",
"matplotlib.pyplot.close",
"matplotlib.gridspec.GridSpec",
"numpy.rint",
"numpy.min",
"ma... | [((2053, 2062), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (2060, 2062), True, 'from matplotlib import pyplot as plt\n'), ((7627, 7648), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (7635, 7648), True, 'from matplotlib import pyplot as plt\n'), ((7653, 7663), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (7661, 7663), True, 'from matplotlib import pyplot as plt\n'), ((9214, 9225), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9223, 9225), True, 'from matplotlib import pyplot as plt\n'), ((9410, 9452), 'matplotlib._pylab_helpers.Gcf.get_active', 'matplotlib._pylab_helpers.Gcf.get_active', ([], {}), '()\n', (9450, 9452), False, 'import matplotlib\n'), ((2276, 2300), 'copy.deepcopy', 'copy.deepcopy', (['codebooks'], {}), '(codebooks)\n', (2289, 2300), False, 'import copy\n'), ((2324, 2348), 'copy.deepcopy', 'copy.deepcopy', (['query_vec'], {}), '(query_vec)\n', (2337, 2348), False, 'import copy\n'), ((2377, 2406), 'copy.deepcopy', 'copy.deepcopy', (['target_vectors'], {}), '(target_vectors)\n', (2390, 2406), False, 'import copy\n'), ((2830, 2858), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 12)'}), '(figsize=(15, 12))\n', (2840, 2858), True, 'from matplotlib import pyplot as plt\n'), ((5347, 5362), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(3)', '(30)'], {}), '(3, 30)\n', (5355, 5362), False, 'from matplotlib.gridspec import GridSpec\n'), ((5376, 5401), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1:2, 22:]'], {}), '(gs[1:2, 22:])\n', (5387, 5401), True, 'from matplotlib import pyplot as plt\n'), ((5459, 5497), 'matplotlib.lines.Line2D', 'Line2D', (['[]', '[]'], {'color': '"""k"""', 'linewidth': '(3)'}), "([], [], color='k', linewidth=3)\n", (5465, 5497), False, 'from matplotlib.lines import Line2D\n'), ((2879, 2900), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (2891, 2900), True, 'from matplotlib import pyplot as plt\n'), ((3030, 3045), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(6)', '(30)'], {}), '(6, 30)\n', (3038, 3045), False, 'from matplotlib.gridspec import GridSpec\n'), ((3350, 3374), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[:3, :18]'], {}), '(gs[:3, :18])\n', (3361, 3374), True, 'from matplotlib import pyplot as plt\n'), ((4242, 4266), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[3:, :18]'], {}), '(gs[3:, :18])\n', (4253, 4266), True, 'from matplotlib import pyplot as plt\n'), ((4289, 4359), 'utils.encoding_decoding.cosine_sim', 'cosine_sim', (['target_vectors[factor_label]', 'self.codebooks[factor_label]'], {}), '(target_vectors[factor_label], self.codebooks[factor_label])\n', (4299, 4359), False, 'from utils.encoding_decoding import cosine_sim\n'), ((6869, 6905), 'numpy.max', 'np.max', (['target_vectors[factor_label]'], {}), '(target_vectors[factor_label])\n', (6875, 6905), True, 'import numpy as np\n'), ((6923, 6959), 'numpy.min', 'np.min', (['target_vectors[factor_label]'], {}), '(target_vectors[factor_label])\n', (6929, 6959), True, 'import numpy as np\n'), ((7835, 7939), 'utils.encoding_decoding.cosine_sim', 'cosine_sim', (['current_state[self.factor_ordering[f_idx]]', 'self.codebooks[self.factor_ordering[f_idx]]'], {}), '(current_state[self.factor_ordering[f_idx]], self.codebooks[self.\n factor_ordering[f_idx]])\n', (7845, 7939), False, 'from utils.encoding_decoding import cosine_sim\n'), ((8153, 8204), 'numpy.array', 'np.array', (['[current_state[x] for x in current_state]'], {}), '([current_state[x] for x in current_state])\n', (8161, 8204), True, 'import numpy as np\n'), ((8257, 8298), 'utils.encoding_decoding.cosine_sim', 'cosine_sim', (['self.query_vec', 'composite_est'], {}), '(self.query_vec, composite_est)\n', (8267, 8298), False, 'from utils.encoding_decoding import cosine_sim\n'), ((4402, 4428), 'numpy.arange', 'np.arange', (['num_in_codebook'], {}), '(num_in_codebook)\n', (4411, 4428), True, 'import numpy as np\n'), ((7020, 7077), 'numpy.reshape', 'np.reshape', (['target_vectors[factor_label]', 'self.image_size'], {}), '(target_vectors[factor_label], self.image_size)\n', (7030, 7077), True, 'import numpy as np\n'), ((7379, 7404), 'numpy.zeros', 'np.zeros', (['self.image_size'], {}), '(self.image_size)\n', (7387, 7404), True, 'import numpy as np\n'), ((8583, 8645), 'numpy.isclose', 'np.isclose', (['self.total_sim_saved[-1]', 'self.total_sim_saved[-2]'], {}), '(self.total_sim_saved[-1], self.total_sim_saved[-2])\n', (8593, 8645), True, 'import numpy as np\n'), ((8929, 9000), 'numpy.reshape', 'np.reshape', (['current_state[self.factor_ordering[f_idx]]', 'self.image_size'], {}), '(current_state[self.factor_ordering[f_idx]], self.image_size)\n', (8939, 9000), True, 'import numpy as np\n'), ((3417, 3443), 'numpy.arange', 'np.arange', (['num_in_codebook'], {}), '(num_in_codebook)\n', (3426, 3443), True, 'import numpy as np\n'), ((3487, 3515), 'numpy.zeros', 'np.zeros', (['(num_in_codebook,)'], {}), '((num_in_codebook,))\n', (3495, 3515), True, 'import numpy as np\n'), ((5134, 5160), 'numpy.arange', 'np.arange', (['num_in_codebook'], {}), '(num_in_codebook)\n', (5143, 5160), True, 'import numpy as np\n'), ((6341, 6363), 'numpy.sqrt', 'np.sqrt', (['self.vec_size'], {}), '(self.vec_size)\n', (6348, 6363), True, 'import numpy as np\n'), ((6404, 6426), 'numpy.sqrt', 'np.sqrt', (['self.vec_size'], {}), '(self.vec_size)\n', (6411, 6426), True, 'import numpy as np\n'), ((6460, 6482), 'numpy.sqrt', 'np.sqrt', (['self.vec_size'], {}), '(self.vec_size)\n', (6467, 6482), True, 'import numpy as np\n'), ((5053, 5082), 'numpy.rint', 'np.rint', (['(num_in_codebook / 10)'], {}), '(num_in_codebook / 10)\n', (5060, 5082), True, 'import numpy as np\n')] |
#! /usr/bin/env python
'''
Produce a blackbody curve *** AB *** color look-up table, going from
[475-814] to [475-X], where X are the J_H_7_1 filters
'''
import numpy as np
from astropy.io import ascii
from astropy.table import Table
from astropy import constants as const
c = const.c.cgs.value
h = const.h.cgs.value
k = const.k_B.cgs.value
def DoAll():
# lambda_cen in cm; 475W,814W, J_H_7_1
wv = np.array([0.476873,0.782072,0.590979,0.817739,1.022070,1.240151,1.535107,1.830465,1.326561])
wv = wv*1e-4
T = 10**np.linspace(3,5,1e4)
col = [AB_color(wv[0],wv[i+1],T) for i in range(wv.size-1)]
tab = [T,col[0],col[1],col[2],col[3],col[4],col[5],col[6],col[7]]
nms = ('T','c0I','c01','c02','c03','c04','c05','c06','c07')
fmt = {'T':'%5.2f','c0I':'%10.3f','c01':'%10.3f','c02':'%10.3f','c03':'%10.3f','c04':'%10.3f','c05':'%10.3f','c06':'%10.3f','c07':'%10.3f'}
t = Table(tab, names=nms)
ascii.write(t, 'AB_color1.txt', format='fixed_width', delimiter='', formats=fmt)
def BB_lam(lam,T):
return 2*h*c**2 / (lam**5 * (np.exp(h*c / (lam*k*T)) - 1))
def AB_color(l1,l2,T):
f1,f2 = BB_lam(l1,T), BB_lam(l2,T)
return -2.5*np.log10((f1/f2)*(l1/l2)**2)
if __name__ == '__main__':
tmp = 3/2
print('This should be 1.5: %.3f' % tmp)
DoAll()
| [
"numpy.log10",
"astropy.io.ascii.write",
"astropy.table.Table",
"numpy.exp",
"numpy.array",
"numpy.linspace"
] | [((408, 512), 'numpy.array', 'np.array', (['[0.476873, 0.782072, 0.590979, 0.817739, 1.02207, 1.240151, 1.535107, \n 1.830465, 1.326561]'], {}), '([0.476873, 0.782072, 0.590979, 0.817739, 1.02207, 1.240151, \n 1.535107, 1.830465, 1.326561])\n', (416, 512), True, 'import numpy as np\n'), ((904, 925), 'astropy.table.Table', 'Table', (['tab'], {'names': 'nms'}), '(tab, names=nms)\n', (909, 925), False, 'from astropy.table import Table\n'), ((931, 1016), 'astropy.io.ascii.write', 'ascii.write', (['t', '"""AB_color1.txt"""'], {'format': '"""fixed_width"""', 'delimiter': '""""""', 'formats': 'fmt'}), "(t, 'AB_color1.txt', format='fixed_width', delimiter='', formats=fmt\n )\n", (942, 1016), False, 'from astropy.io import ascii\n'), ((530, 556), 'numpy.linspace', 'np.linspace', (['(3)', '(5)', '(10000.0)'], {}), '(3, 5, 10000.0)\n', (541, 556), True, 'import numpy as np\n'), ((1176, 1210), 'numpy.log10', 'np.log10', (['(f1 / f2 * (l1 / l2) ** 2)'], {}), '(f1 / f2 * (l1 / l2) ** 2)\n', (1184, 1210), True, 'import numpy as np\n'), ((1066, 1095), 'numpy.exp', 'np.exp', (['(h * c / (lam * k * T))'], {}), '(h * c / (lam * k * T))\n', (1072, 1095), True, 'import numpy as np\n')] |
import numpy as np
from itertools import product
from deep_rlsp.envs.gridworlds.env import Env, Direction, get_grid_representation
class BasicRoomEnv(Env):
"""
Basic empty room with stochastic transitions. Used for debugging.
"""
def __init__(self, prob, use_pixels_as_observations=True):
self.height = 3
self.width = 3
self.init_state = (1, 1)
self.prob = prob
self.nS = self.height * self.width
self.nA = 5
super().__init__(1, use_pixels_as_observations=use_pixels_as_observations)
self.num_features = 2
self.default_action = Direction.get_number_from_direction(Direction.STAY)
self.num_features = len(self.s_to_f(self.init_state))
self.reset()
states = self.enumerate_states()
self.make_transition_matrices(states, range(self.nA), self.nS, self.nA)
self.make_f_matrix(self.nS, self.num_features)
def enumerate_states(self):
return product(range(self.width), range(self.height))
def get_num_from_state(self, state):
return np.ravel_multi_index(state, (self.width, self.height))
def get_state_from_num(self, num):
return np.unravel_index(num, (self.width, self.height))
def s_to_f(self, s):
return s
def _obs_to_f(self, obs):
return np.unravel_index(obs[0].argmax(), obs[0].shape)
def _s_to_obs(self, s):
layers = [[s]]
obs = get_grid_representation(self.width, self.height, layers)
return np.array(obs, dtype=np.float32)
# render_width = 64
# render_height = 64
# x, y = s
# obs = np.zeros((3, render_height, render_width), dtype=np.float32)
# obs[
# :,
# y * render_height : (y + 1) * render_height,
# x * render_width : (x + 1) * render_width,
# ] = 1
# return obs
def get_next_states(self, state, action):
# next_states = []
# for a in range(self.nA):
# next_s = self.get_next_state(state, a)
# p = 1 - self.prob if a == action else self.prob / (self.nA - 1)
# next_states.append((p, next_s, 0))
next_s = self.get_next_state(state, action)
next_states = [(self.prob, next_s, 0), (1 - self.prob, state, 0)]
return next_states
def get_next_state(self, state, action):
"""Returns the next state given a state and an action."""
action = int(action)
if action == Direction.get_number_from_direction(Direction.STAY):
pass
elif action < len(Direction.ALL_DIRECTIONS):
move_x, move_y = Direction.move_in_direction_number(state, action)
# New position is legal
if 0 <= move_x < self.width and 0 <= move_y < self.height:
state = move_x, move_y
else:
# Move only changes orientation, which we already handled
pass
else:
raise ValueError("Invalid action {}".format(action))
return state
def s_to_ansi(self, state):
return str(self.s_to_obs(state))
if __name__ == "__main__":
from gym.utils.play import play
env = BasicRoomEnv(1)
play(env, fps=5)
| [
"deep_rlsp.envs.gridworlds.env.get_grid_representation",
"deep_rlsp.envs.gridworlds.env.Direction.move_in_direction_number",
"numpy.ravel_multi_index",
"gym.utils.play.play",
"numpy.array",
"numpy.unravel_index",
"deep_rlsp.envs.gridworlds.env.Direction.get_number_from_direction"
] | [((3222, 3238), 'gym.utils.play.play', 'play', (['env'], {'fps': '(5)'}), '(env, fps=5)\n', (3226, 3238), False, 'from gym.utils.play import play\n'), ((622, 673), 'deep_rlsp.envs.gridworlds.env.Direction.get_number_from_direction', 'Direction.get_number_from_direction', (['Direction.STAY'], {}), '(Direction.STAY)\n', (657, 673), False, 'from deep_rlsp.envs.gridworlds.env import Env, Direction, get_grid_representation\n'), ((1087, 1141), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['state', '(self.width, self.height)'], {}), '(state, (self.width, self.height))\n', (1107, 1141), True, 'import numpy as np\n'), ((1197, 1245), 'numpy.unravel_index', 'np.unravel_index', (['num', '(self.width, self.height)'], {}), '(num, (self.width, self.height))\n', (1213, 1245), True, 'import numpy as np\n'), ((1449, 1505), 'deep_rlsp.envs.gridworlds.env.get_grid_representation', 'get_grid_representation', (['self.width', 'self.height', 'layers'], {}), '(self.width, self.height, layers)\n', (1472, 1505), False, 'from deep_rlsp.envs.gridworlds.env import Env, Direction, get_grid_representation\n'), ((1521, 1552), 'numpy.array', 'np.array', (['obs'], {'dtype': 'np.float32'}), '(obs, dtype=np.float32)\n', (1529, 1552), True, 'import numpy as np\n'), ((2490, 2541), 'deep_rlsp.envs.gridworlds.env.Direction.get_number_from_direction', 'Direction.get_number_from_direction', (['Direction.STAY'], {}), '(Direction.STAY)\n', (2525, 2541), False, 'from deep_rlsp.envs.gridworlds.env import Env, Direction, get_grid_representation\n'), ((2642, 2691), 'deep_rlsp.envs.gridworlds.env.Direction.move_in_direction_number', 'Direction.move_in_direction_number', (['state', 'action'], {}), '(state, action)\n', (2676, 2691), False, 'from deep_rlsp.envs.gridworlds.env import Env, Direction, get_grid_representation\n')] |
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides functions to interface with scipy.sparse."""
from __future__ import absolute_import
from functools import reduce
from future.utils import iteritems
import itertools
import numpy
import numpy.linalg
import scipy
import scipy.sparse
import scipy.sparse.linalg
from fermilib.config import *
from fermilib.ops import FermionOperator, hermitian_conjugated, normal_ordered
from fermilib.utils import fourier_transform, Grid
from fermilib.utils._jellium import (momentum_vector, position_vector,
grid_indices)
from projectq.ops import QubitOperator
# Make global definitions.
identity_csc = scipy.sparse.identity(2, format='csr', dtype=complex)
pauli_x_csc = scipy.sparse.csc_matrix([[0., 1.], [1., 0.]], dtype=complex)
pauli_y_csc = scipy.sparse.csc_matrix([[0., -1.j], [1.j, 0.]], dtype=complex)
pauli_z_csc = scipy.sparse.csc_matrix([[1., 0.], [0., -1.]], dtype=complex)
q_raise_csc = (pauli_x_csc - 1.j * pauli_y_csc) / 2.
q_lower_csc = (pauli_x_csc + 1.j * pauli_y_csc) / 2.
pauli_matrix_map = {'I': identity_csc, 'X': pauli_x_csc,
'Y': pauli_y_csc, 'Z': pauli_z_csc}
def wrapped_kronecker(operator_1, operator_2):
"""Return the Kronecker product of two sparse.csc_matrix operators."""
return scipy.sparse.kron(operator_1, operator_2, 'csc')
def kronecker_operators(*args):
"""Return the Kronecker product of multiple sparse.csc_matrix operators."""
return reduce(wrapped_kronecker, *args)
def jordan_wigner_ladder_sparse(n_qubits, tensor_factor, ladder_type):
"""Make a matrix representation of a fermion ladder operator.
Args:
index: This is a nonzero integer. The integer indicates the tensor
factor and the sign indicates raising or lowering.
n_qubits(int): Number qubits in the system Hilbert space.
Returns:
The corresponding SparseOperator.
"""
identities = [scipy.sparse.identity(
2 ** tensor_factor, dtype=complex, format='csc')]
parities = (n_qubits - tensor_factor - 1) * [pauli_z_csc]
if ladder_type:
operator = kronecker_operators(identities + [q_raise_csc] + parities)
else:
operator = kronecker_operators(identities + [q_lower_csc] + parities)
return operator
def jordan_wigner_sparse(fermion_operator, n_qubits=None):
"""Initialize a SparseOperator from a FermionOperator.
Args:
fermion_operator(FermionOperator): instance of the FermionOperator
class.
n_qubits(int): Number of qubits.
Returns:
The corresponding SparseOperator.
"""
if n_qubits is None:
from fermilib.utils import count_qubits
n_qubits = count_qubits(fermion_operator)
# Create a list of raising and lowering operators for each orbital.
jw_operators = []
for tensor_factor in range(n_qubits):
jw_operators += [(jordan_wigner_ladder_sparse(n_qubits,
tensor_factor,
0),
jordan_wigner_ladder_sparse(n_qubits,
tensor_factor,
1))]
# Construct the SparseOperator.
n_hilbert = 2 ** n_qubits
values_list = [[]]
row_list = [[]]
column_list = [[]]
for term in fermion_operator.terms:
coefficient = fermion_operator.terms[term]
sparse_matrix = coefficient * scipy.sparse.identity(
2 ** n_qubits, dtype=complex, format='csc')
for ladder_operator in term:
sparse_matrix = sparse_matrix * jw_operators[
ladder_operator[0]][ladder_operator[1]]
if coefficient:
# Extract triplets from sparse_term.
sparse_matrix = sparse_matrix.tocoo(copy=False)
values_list.append(sparse_matrix.data)
(row, column) = sparse_matrix.nonzero()
row_list.append(row)
column_list.append(column)
values_list = numpy.concatenate(values_list)
row_list = numpy.concatenate(row_list)
column_list = numpy.concatenate(column_list)
sparse_operator = scipy.sparse.coo_matrix((
values_list, (row_list, column_list)),
shape=(n_hilbert, n_hilbert)).tocsc(copy=False)
sparse_operator.eliminate_zeros()
return sparse_operator
def qubit_operator_sparse(qubit_operator, n_qubits=None):
"""Initialize a SparseOperator from a QubitOperator.
Args:
qubit_operator(QubitOperator): instance of the QubitOperator class.
n_qubits (int): Number of qubits.
Returns:
The corresponding SparseOperator.
"""
from fermilib.utils import count_qubits
if n_qubits is None:
n_qubits = count_qubits(qubit_operator)
if n_qubits < count_qubits(qubit_operator):
raise ValueError('Invalid number of qubits specified.')
# Construct the SparseOperator.
n_hilbert = 2 ** n_qubits
values_list = [[]]
row_list = [[]]
column_list = [[]]
# Loop through the terms.
for qubit_term in qubit_operator.terms:
tensor_factor = 0
coefficient = qubit_operator.terms[qubit_term]
sparse_operators = [coefficient]
for pauli_operator in qubit_term:
# Grow space for missing identity operators.
if pauli_operator[0] > tensor_factor:
identity_qubits = pauli_operator[0] - tensor_factor
identity = scipy.sparse.identity(
2 ** identity_qubits, dtype=complex, format='csc')
sparse_operators += [identity]
# Add actual operator to the list.
sparse_operators += [pauli_matrix_map[pauli_operator[1]]]
tensor_factor = pauli_operator[0] + 1
# Grow space at end of string unless operator acted on final qubit.
if tensor_factor < n_qubits or not qubit_term:
identity_qubits = n_qubits - tensor_factor
identity = scipy.sparse.identity(
2 ** identity_qubits, dtype=complex, format='csc')
sparse_operators += [identity]
# Extract triplets from sparse_term.
sparse_matrix = kronecker_operators(sparse_operators)
values_list.append(sparse_matrix.tocoo(copy=False).data)
(column, row) = sparse_matrix.nonzero()
column_list.append(column)
row_list.append(row)
# Create sparse operator.
values_list = numpy.concatenate(values_list)
row_list = numpy.concatenate(row_list)
column_list = numpy.concatenate(column_list)
sparse_operator = scipy.sparse.coo_matrix((
values_list, (row_list, column_list)),
shape=(n_hilbert, n_hilbert)).tocsc(copy=False)
sparse_operator.eliminate_zeros()
return sparse_operator
def jw_hartree_fock_state(n_electrons, n_orbitals):
"""Function to product Hartree-Fock state in JW representation."""
occupied = scipy.sparse.csr_matrix([[0], [1]], dtype=float)
psi = 1.
unoccupied = scipy.sparse.csr_matrix([[1], [0]], dtype=float)
for orbital in range(n_electrons):
psi = scipy.sparse.kron(psi, occupied, 'csr')
for orbital in range(n_orbitals - n_electrons):
psi = scipy.sparse.kron(psi, unoccupied, 'csr')
return psi
def jw_number_indices(n_electrons, n_qubits):
"""Return the indices for n_electrons in n_qubits under JW encoding
Calculates the indices for all possible arrangements of n-electrons
within n-qubit orbitals when a Jordan-Wigner encoding is used.
Useful for restricting generic operators or vectors to a particular
particle number space when desired
Args:
n_electrons(int): Number of particles to restrict the operator to
n_qubits(int): Number of qubits defining the total state
Returns:
indices(list): List of indices in a 2^n length array that indicate
the indices of constant particle number within n_qubits
in a Jordan-Wigner encoding.
"""
occupations = itertools.combinations(range(n_qubits), n_electrons)
indices = [sum([2**n for n in occupation])
for occupation in occupations]
return indices
def jw_number_restrict_operator(operator, n_electrons, n_qubits=None):
"""Restrict a Jordan-Wigner encoded operator to a given particle number
Args:
sparse_operator(ndarray or sparse): Numpy operator acting on
the space of n_qubits.
n_electrons(int): Number of particles to restrict the operator to
n_qubits(int): Number of qubits defining the total state
Returns:
new_operator(ndarray or sparse): Numpy operator restricted to
acting on states with the same particle number.
"""
if n_qubits is None:
n_qubits = int(numpy.log2(operator.shape[0]))
select_indices = jw_number_indices(n_electrons, n_qubits)
return operator[numpy.ix_(select_indices, select_indices)]
def get_density_matrix(states, probabilities):
n_qubits = states[0].shape[0]
density_matrix = scipy.sparse.csc_matrix(
(n_qubits, n_qubits), dtype=complex)
for state, probability in zip(states, probabilities):
density_matrix = density_matrix + probability * state * state.getH()
return density_matrix
def is_hermitian(sparse_operator):
"""Test if matrix is Hermitian."""
difference = sparse_operator - sparse_operator.getH()
if difference.nnz:
discrepancy = max(map(abs, difference.data))
if discrepancy > EQ_TOLERANCE:
return False
return True
def get_ground_state(sparse_operator):
"""Compute lowest eigenvalue and eigenstate.
Returns:
eigenvalue: The lowest eigenvalue, a float.
eigenstate: The lowest eigenstate in scipy.sparse csc format.
"""
if not is_hermitian(sparse_operator):
raise ValueError('sparse_operator must be Hermitian.')
values, vectors = scipy.sparse.linalg.eigsh(
sparse_operator, 2, which='SA', maxiter=1e7)
eigenstate = scipy.sparse.csc_matrix(vectors[:, 0])
eigenvalue = values[0]
return eigenvalue, eigenstate.getH()
def sparse_eigenspectrum(sparse_operator):
"""Perform a dense diagonalization.
Returns:
eigenspectrum: The lowest eigenvalues in a numpy array.
"""
dense_operator = sparse_operator.todense()
if is_hermitian(sparse_operator):
eigenspectrum = numpy.linalg.eigvalsh(dense_operator)
else:
eigenspectrum = numpy.linalg.eigvals(dense_operator)
return numpy.sort(eigenspectrum)
def expectation(sparse_operator, state):
"""Compute expectation value of operator with a state.
Args:
state: scipy.sparse.csc vector representing a pure state,
or, a scipy.sparse.csc matrix representing a density matrix.
Returns:
A real float giving expectation value.
Raises:
ValueError: Input state has invalid format.
"""
# Handle density matrix.
if state.shape == sparse_operator.shape:
product = state * sparse_operator
expectation = numpy.sum(product.diagonal())
elif state.shape == (sparse_operator.shape[0], 1):
# Handle state vector.
expectation = state.getH() * sparse_operator * state
expectation = expectation[0, 0]
else:
# Handle exception.
raise ValueError('Input state has invalid format.')
# Return.
return expectation
def expectation_computational_basis_state(operator, computational_basis_state):
"""Compute expectation value of operator with a state.
Args:
operator: Qubit or FermionOperator to evaluate expectation value of.
If operator is a FermionOperator, it must be normal-ordered.
computational_basis_state (scipy.sparse vector / list): normalized
computational basis state (if scipy.sparse vector), or list of
occupied orbitals.
Returns:
A real float giving expectation value.
Raises:
TypeError: Incorrect operator or state type.
"""
if isinstance(operator, QubitOperator):
raise NotImplementedError('Not yet implemented for QubitOperators.')
if not isinstance(operator, FermionOperator):
raise TypeError('operator must be a FermionOperator.')
occupied_orbitals = computational_basis_state
if not isinstance(occupied_orbitals, list):
computational_basis_state_index = (
occupied_orbitals.nonzero()[0][0])
occupied_orbitals = [digit == '1' for digit in
bin(computational_basis_state_index)[2:]][::-1]
expectation_value = operator.terms.get((), 0.0)
for i in range(len(occupied_orbitals)):
if occupied_orbitals[i]:
expectation_value += operator.terms.get(
((i, 1), (i, 0)), 0.0)
for j in range(i + 1, len(occupied_orbitals)):
expectation_value -= operator.terms.get(
((j, 1), (i, 1), (j, 0), (i, 0)), 0.0)
return expectation_value
def expectation_db_operator_with_pw_basis_state(
operator, plane_wave_occ_orbitals, n_spatial_orbitals, grid,
spinless):
"""Compute expectation value of a dual basis operator with a plane
wave computational basis state.
Args:
operator: Dual-basis representation of FermionOperator to evaluate
expectation value of. Can have at most 3-body terms.
plane_wave_occ_orbitals (list): list of occupied plane-wave orbitals.
n_spatial_orbitals (int): Number of spatial orbitals.
grid (fermilib.utils.Grid): The grid used for discretization.
spinless (bool): Whether the system is spinless.
Returns:
A real float giving the expectation value.
"""
expectation_value = operator.terms.get((), 0.0)
for single_action, coefficient in iteritems(operator.terms):
if len(single_action) == 2:
expectation_value += coefficient * (
expectation_one_body_db_operator_computational_basis_state(
single_action, plane_wave_occ_orbitals, grid, spinless) /
n_spatial_orbitals)
elif len(single_action) == 4:
expectation_value += coefficient * (
expectation_two_body_db_operator_computational_basis_state(
single_action, plane_wave_occ_orbitals, grid, spinless) /
n_spatial_orbitals ** 2)
elif len(single_action) == 6:
expectation_value += coefficient * (
expectation_three_body_db_operator_computational_basis_state(
single_action, plane_wave_occ_orbitals, grid, spinless) /
n_spatial_orbitals ** 3)
return expectation_value
def expectation_one_body_db_operator_computational_basis_state(
dual_basis_action, plane_wave_occ_orbitals, grid, spinless):
"""Compute expectation value of a 1-body dual-basis operator with a
plane wave computational basis state.
Args:
dual_basis_action: Dual-basis action of FermionOperator to
evaluate expectation value of.
plane_wave_occ_orbitals (list): list of occupied plane-wave orbitals.
grid (fermilib.utils.Grid): The grid used for discretization.
spinless (bool): Whether the system is spinless.
Returns:
A real float giving the expectation value.
"""
expectation_value = 0.0
r_p = position_vector(grid_indices(dual_basis_action[0][0],
grid, spinless), grid)
r_q = position_vector(grid_indices(dual_basis_action[1][0],
grid, spinless), grid)
for orbital in plane_wave_occ_orbitals:
# If there's spin, p and q have to have the same parity (spin),
# and the new orbital has to have the same spin as these.
k_orbital = momentum_vector(grid_indices(orbital,
grid, spinless), grid)
# The Fourier transform is spin-conserving. This means that p, q,
# and the new orbital all have to have the same spin (parity).
if spinless or (dual_basis_action[0][0] % 2 ==
dual_basis_action[1][0] % 2 == orbital % 2):
expectation_value += numpy.exp(-1j * k_orbital.dot(r_p - r_q))
return expectation_value
def expectation_two_body_db_operator_computational_basis_state(
dual_basis_action, plane_wave_occ_orbitals, grid, spinless):
"""Compute expectation value of a 2-body dual-basis operator with a
plane wave computational basis state.
Args:
dual_basis_action: Dual-basis action of FermionOperator to
evaluate expectation value of.
plane_wave_occ_orbitals (list): list of occupied plane-wave orbitals.
grid (fermilib.utils.Grid): The grid used for discretization.
spinless (bool): Whether the system is spinless.
Returns:
A float giving the expectation value.
"""
expectation_value = 0.0
r_a = position_vector(grid_indices(dual_basis_action[0][0],
grid, spinless), grid)
r_b = position_vector(grid_indices(dual_basis_action[1][0],
grid, spinless), grid)
r_c = position_vector(grid_indices(dual_basis_action[2][0],
grid, spinless), grid)
r_d = position_vector(grid_indices(dual_basis_action[3][0],
grid, spinless), grid)
kac_dict = {}
kad_dict = {}
kbc_dict = {}
kbd_dict = {}
for i in plane_wave_occ_orbitals:
k = momentum_vector(grid_indices(i, grid, spinless), grid)
kac_dict[i] = k.dot(r_a - r_c)
kad_dict[i] = k.dot(r_a - r_d)
kbc_dict[i] = k.dot(r_b - r_c)
kbd_dict[i] = k.dot(r_b - r_d)
for orbital1 in plane_wave_occ_orbitals:
k1ac = kac_dict[orbital1]
k1ad = kad_dict[orbital1]
for orbital2 in plane_wave_occ_orbitals:
if orbital1 != orbital2:
k2bc = kbc_dict[orbital2]
k2bd = kbd_dict[orbital2]
# The Fourier transform is spin-conserving. This means that
# the parity of the orbitals involved in the transition must
# be the same.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[3][0] % 2 == orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[2][0] % 2 == orbital2 % 2)):
value = numpy.exp(-1j * (k1ad + k2bc))
# Add because it came from two anti-commutations.
expectation_value += value
# The Fourier transform is spin-conserving. This means that
# the parity of the orbitals involved in the transition must
# be the same.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[2][0] % 2 == orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[3][0] % 2 == orbital2 % 2)):
value = numpy.exp(-1j * (k1ac + k2bd))
# Subtract because it came from a single anti-commutation.
expectation_value -= value
return expectation_value
def expectation_three_body_db_operator_computational_basis_state(
dual_basis_action, plane_wave_occ_orbitals, grid, spinless):
"""Compute expectation value of a 3-body dual-basis operator with a
plane wave computational basis state.
Args:
dual_basis_action: Dual-basis action of FermionOperator to
evaluate expectation value of.
plane_wave_occ_orbitals (list): list of occupied plane-wave orbitals.
grid (fermilib.utils.Grid): The grid used for discretization.
spinless (bool): Whether the system is spinless.
Returns:
A float giving the expectation value.
"""
expectation_value = 0.0
r_a = position_vector(grid_indices(dual_basis_action[0][0],
grid, spinless), grid)
r_b = position_vector(grid_indices(dual_basis_action[1][0],
grid, spinless), grid)
r_c = position_vector(grid_indices(dual_basis_action[2][0],
grid, spinless), grid)
r_d = position_vector(grid_indices(dual_basis_action[3][0],
grid, spinless), grid)
r_e = position_vector(grid_indices(dual_basis_action[4][0],
grid, spinless), grid)
r_f = position_vector(grid_indices(dual_basis_action[5][0],
grid, spinless), grid)
kad_dict = {}
kae_dict = {}
kaf_dict = {}
kbd_dict = {}
kbe_dict = {}
kbf_dict = {}
kcd_dict = {}
kce_dict = {}
kcf_dict = {}
for i in plane_wave_occ_orbitals:
k = momentum_vector(grid_indices(i, grid, spinless), grid)
kad_dict[i] = k.dot(r_a - r_d)
kae_dict[i] = k.dot(r_a - r_e)
kaf_dict[i] = k.dot(r_a - r_f)
kbd_dict[i] = k.dot(r_b - r_d)
kbe_dict[i] = k.dot(r_b - r_e)
kbf_dict[i] = k.dot(r_b - r_f)
kcd_dict[i] = k.dot(r_c - r_d)
kce_dict[i] = k.dot(r_c - r_e)
kcf_dict[i] = k.dot(r_c - r_f)
for orbital1 in plane_wave_occ_orbitals:
k1ad = kad_dict[orbital1]
k1ae = kae_dict[orbital1]
k1af = kaf_dict[orbital1]
for orbital2 in plane_wave_occ_orbitals:
if orbital1 != orbital2:
k2bd = kbd_dict[orbital2]
k2be = kbe_dict[orbital2]
k2bf = kbf_dict[orbital2]
for orbital3 in plane_wave_occ_orbitals:
if orbital1 != orbital3 and orbital2 != orbital3:
k3cd = kcd_dict[orbital3]
k3ce = kce_dict[orbital3]
k3cf = kcf_dict[orbital3]
# Handle \delta_{ad} \delta_{bf} \delta_{ce} after FT.
# The Fourier transform is spin-conserving.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[3][0] % 2 ==
orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[5][0] % 2 ==
orbital2 % 2) and
(dual_basis_action[2][0] % 2 ==
dual_basis_action[4][0] % 2 ==
orbital3 % 2)):
expectation_value += numpy.exp(-1j * (
k1ad + k2bf + k3ce))
# Handle -\delta_{ad} \delta_{be} \delta_{cf} after FT.
# The Fourier transform is spin-conserving.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[3][0] % 2 ==
orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[4][0] % 2 ==
orbital2 % 2) and
(dual_basis_action[2][0] % 2 ==
dual_basis_action[5][0] % 2 ==
orbital3 % 2)):
expectation_value -= numpy.exp(-1j * (
k1ad + k2be + k3cf))
# Handle -\delta_{ae} \delta_{bf} \delta_{cd} after FT.
# The Fourier transform is spin-conserving.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[4][0] % 2 ==
orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[5][0] % 2 ==
orbital2 % 2) and
(dual_basis_action[2][0] % 2 ==
dual_basis_action[3][0] % 2 ==
orbital3 % 2)):
expectation_value -= numpy.exp(-1j * (
k1ae + k2bf + k3cd))
# Handle \delta_{ae} \delta_{bd} \delta_{cf} after FT.
# The Fourier transform is spin-conserving.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[4][0] % 2 ==
orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[3][0] % 2 ==
orbital2 % 2) and
(dual_basis_action[2][0] % 2 ==
dual_basis_action[5][0] % 2 ==
orbital3 % 2)):
expectation_value += numpy.exp(-1j * (
k1ae + k2bd + k3cf))
# Handle \delta_{af} \delta_{be} \delta_{cd} after FT.
# The Fourier transform is spin-conserving.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[5][0] % 2 ==
orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[4][0] % 2 ==
orbital2 % 2) and
(dual_basis_action[2][0] % 2 ==
dual_basis_action[3][0] % 2 ==
orbital3 % 2)):
expectation_value += numpy.exp(-1j * (
k1af + k2be + k3cd))
# Handle -\delta_{af} \delta_{bd} \delta_{ce} after FT.
# The Fourier transform is spin-conserving.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[5][0] % 2 ==
orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[3][0] % 2 ==
orbital2 % 2) and
(dual_basis_action[2][0] % 2 ==
dual_basis_action[4][0] % 2 ==
orbital3 % 2)):
expectation_value -= numpy.exp(-1j * (
k1af + k2bd + k3ce))
return expectation_value
def get_gap(sparse_operator):
"""Compute gap between lowest eigenvalue and first excited state.
Returns: A real float giving eigenvalue gap.
"""
if not is_hermitian(sparse_operator):
raise ValueError('sparse_operator must be Hermitian.')
values, _ = scipy.sparse.linalg.eigsh(
sparse_operator, 2, which='SA', maxiter=1e7)
gap = abs(values[1] - values[0])
return gap
| [
"functools.reduce",
"numpy.log2",
"numpy.sort",
"numpy.ix_",
"scipy.sparse.linalg.eigsh",
"scipy.sparse.csr_matrix",
"numpy.linalg.eigvalsh",
"numpy.linalg.eigvals",
"fermilib.utils._jellium.grid_indices",
"numpy.exp",
"fermilib.utils.count_qubits",
"numpy.concatenate",
"scipy.sparse.coo_mat... | [((1272, 1325), 'scipy.sparse.identity', 'scipy.sparse.identity', (['(2)'], {'format': '"""csr"""', 'dtype': 'complex'}), "(2, format='csr', dtype=complex)\n", (1293, 1325), False, 'import scipy\n'), ((1340, 1404), 'scipy.sparse.csc_matrix', 'scipy.sparse.csc_matrix', (['[[0.0, 1.0], [1.0, 0.0]]'], {'dtype': 'complex'}), '([[0.0, 1.0], [1.0, 0.0]], dtype=complex)\n', (1363, 1404), False, 'import scipy\n'), ((1415, 1482), 'scipy.sparse.csc_matrix', 'scipy.sparse.csc_matrix', (['[[0.0, -1.0j], [1.0j, 0.0]]'], {'dtype': 'complex'}), '([[0.0, -1.0j], [1.0j, 0.0]], dtype=complex)\n', (1438, 1482), False, 'import scipy\n'), ((1493, 1558), 'scipy.sparse.csc_matrix', 'scipy.sparse.csc_matrix', (['[[1.0, 0.0], [0.0, -1.0]]'], {'dtype': 'complex'}), '([[1.0, 0.0], [0.0, -1.0]], dtype=complex)\n', (1516, 1558), False, 'import scipy\n'), ((1909, 1957), 'scipy.sparse.kron', 'scipy.sparse.kron', (['operator_1', 'operator_2', '"""csc"""'], {}), "(operator_1, operator_2, 'csc')\n", (1926, 1957), False, 'import scipy\n'), ((2083, 2115), 'functools.reduce', 'reduce', (['wrapped_kronecker', '*args'], {}), '(wrapped_kronecker, *args)\n', (2089, 2115), False, 'from functools import reduce\n'), ((4694, 4724), 'numpy.concatenate', 'numpy.concatenate', (['values_list'], {}), '(values_list)\n', (4711, 4724), False, 'import numpy\n'), ((4740, 4767), 'numpy.concatenate', 'numpy.concatenate', (['row_list'], {}), '(row_list)\n', (4757, 4767), False, 'import numpy\n'), ((4786, 4816), 'numpy.concatenate', 'numpy.concatenate', (['column_list'], {}), '(column_list)\n', (4803, 4816), False, 'import numpy\n'), ((7133, 7163), 'numpy.concatenate', 'numpy.concatenate', (['values_list'], {}), '(values_list)\n', (7150, 7163), False, 'import numpy\n'), ((7179, 7206), 'numpy.concatenate', 'numpy.concatenate', (['row_list'], {}), '(row_list)\n', (7196, 7206), False, 'import numpy\n'), ((7225, 7255), 'numpy.concatenate', 'numpy.concatenate', (['column_list'], {}), '(column_list)\n', (7242, 7255), False, 'import numpy\n'), ((7612, 7660), 'scipy.sparse.csr_matrix', 'scipy.sparse.csr_matrix', (['[[0], [1]]'], {'dtype': 'float'}), '([[0], [1]], dtype=float)\n', (7635, 7660), False, 'import scipy\n'), ((7691, 7739), 'scipy.sparse.csr_matrix', 'scipy.sparse.csr_matrix', (['[[1], [0]]'], {'dtype': 'float'}), '([[1], [0]], dtype=float)\n', (7714, 7739), False, 'import scipy\n'), ((9743, 9803), 'scipy.sparse.csc_matrix', 'scipy.sparse.csc_matrix', (['(n_qubits, n_qubits)'], {'dtype': 'complex'}), '((n_qubits, n_qubits), dtype=complex)\n', (9766, 9803), False, 'import scipy\n'), ((10626, 10703), 'scipy.sparse.linalg.eigsh', 'scipy.sparse.linalg.eigsh', (['sparse_operator', '(2)'], {'which': '"""SA"""', 'maxiter': '(10000000.0)'}), "(sparse_operator, 2, which='SA', maxiter=10000000.0)\n", (10651, 10703), False, 'import scipy\n'), ((10724, 10762), 'scipy.sparse.csc_matrix', 'scipy.sparse.csc_matrix', (['vectors[:, 0]'], {}), '(vectors[:, 0])\n', (10747, 10762), False, 'import scipy\n'), ((11231, 11256), 'numpy.sort', 'numpy.sort', (['eigenspectrum'], {}), '(eigenspectrum)\n', (11241, 11256), False, 'import numpy\n'), ((14583, 14608), 'future.utils.iteritems', 'iteritems', (['operator.terms'], {}), '(operator.terms)\n', (14592, 14608), False, 'from future.utils import iteritems\n'), ((28349, 28426), 'scipy.sparse.linalg.eigsh', 'scipy.sparse.linalg.eigsh', (['sparse_operator', '(2)'], {'which': '"""SA"""', 'maxiter': '(10000000.0)'}), "(sparse_operator, 2, which='SA', maxiter=10000000.0)\n", (28374, 28426), False, 'import scipy\n'), ((2552, 2622), 'scipy.sparse.identity', 'scipy.sparse.identity', (['(2 ** tensor_factor)'], {'dtype': 'complex', 'format': '"""csc"""'}), "(2 ** tensor_factor, dtype=complex, format='csc')\n", (2573, 2622), False, 'import scipy\n'), ((3323, 3353), 'fermilib.utils.count_qubits', 'count_qubits', (['fermion_operator'], {}), '(fermion_operator)\n', (3335, 3353), False, 'from fermilib.utils import count_qubits\n'), ((5431, 5459), 'fermilib.utils.count_qubits', 'count_qubits', (['qubit_operator'], {}), '(qubit_operator)\n', (5443, 5459), False, 'from fermilib.utils import count_qubits\n'), ((5478, 5506), 'fermilib.utils.count_qubits', 'count_qubits', (['qubit_operator'], {}), '(qubit_operator)\n', (5490, 5506), False, 'from fermilib.utils import count_qubits\n'), ((7793, 7832), 'scipy.sparse.kron', 'scipy.sparse.kron', (['psi', 'occupied', '"""csr"""'], {}), "(psi, occupied, 'csr')\n", (7810, 7832), False, 'import scipy\n'), ((7899, 7940), 'scipy.sparse.kron', 'scipy.sparse.kron', (['psi', 'unoccupied', '"""csr"""'], {}), "(psi, unoccupied, 'csr')\n", (7916, 7940), False, 'import scipy\n'), ((9596, 9637), 'numpy.ix_', 'numpy.ix_', (['select_indices', 'select_indices'], {}), '(select_indices, select_indices)\n', (9605, 9637), False, 'import numpy\n'), ((11111, 11148), 'numpy.linalg.eigvalsh', 'numpy.linalg.eigvalsh', (['dense_operator'], {}), '(dense_operator)\n', (11132, 11148), False, 'import numpy\n'), ((11183, 11219), 'numpy.linalg.eigvals', 'numpy.linalg.eigvals', (['dense_operator'], {}), '(dense_operator)\n', (11203, 11219), False, 'import numpy\n'), ((16201, 16254), 'fermilib.utils._jellium.grid_indices', 'grid_indices', (['dual_basis_action[0][0]', 'grid', 'spinless'], {}), '(dual_basis_action[0][0], grid, spinless)\n', (16213, 16254), False, 'from fermilib.utils._jellium import momentum_vector, position_vector, grid_indices\n'), ((16327, 16380), 'fermilib.utils._jellium.grid_indices', 'grid_indices', (['dual_basis_action[1][0]', 'grid', 'spinless'], {}), '(dual_basis_action[1][0], grid, spinless)\n', (16339, 16380), False, 'from fermilib.utils._jellium import momentum_vector, position_vector, grid_indices\n'), ((17827, 17880), 'fermilib.utils._jellium.grid_indices', 'grid_indices', (['dual_basis_action[0][0]', 'grid', 'spinless'], {}), '(dual_basis_action[0][0], grid, spinless)\n', (17839, 17880), False, 'from fermilib.utils._jellium import momentum_vector, position_vector, grid_indices\n'), ((17953, 18006), 'fermilib.utils._jellium.grid_indices', 'grid_indices', (['dual_basis_action[1][0]', 'grid', 'spinless'], {}), '(dual_basis_action[1][0], grid, spinless)\n', (17965, 18006), False, 'from fermilib.utils._jellium import momentum_vector, position_vector, grid_indices\n'), ((18079, 18132), 'fermilib.utils._jellium.grid_indices', 'grid_indices', (['dual_basis_action[2][0]', 'grid', 'spinless'], {}), '(dual_basis_action[2][0], grid, spinless)\n', (18091, 18132), False, 'from fermilib.utils._jellium import momentum_vector, position_vector, grid_indices\n'), ((18205, 18258), 'fermilib.utils._jellium.grid_indices', 'grid_indices', (['dual_basis_action[3][0]', 'grid', 'spinless'], {}), '(dual_basis_action[3][0], grid, spinless)\n', (18217, 18258), False, 'from fermilib.utils._jellium import momentum_vector, position_vector, grid_indices\n'), ((20982, 21035), 'fermilib.utils._jellium.grid_indices', 'grid_indices', (['dual_basis_action[0][0]', 'grid', 'spinless'], {}), '(dual_basis_action[0][0], grid, spinless)\n', (20994, 21035), False, 'from fermilib.utils._jellium import momentum_vector, position_vector, grid_indices\n'), ((21108, 21161), 'fermilib.utils._jellium.grid_indices', 'grid_indices', (['dual_basis_action[1][0]', 'grid', 'spinless'], {}), '(dual_basis_action[1][0], grid, spinless)\n', (21120, 21161), False, 'from fermilib.utils._jellium import momentum_vector, position_vector, grid_indices\n'), ((21234, 21287), 'fermilib.utils._jellium.grid_indices', 'grid_indices', (['dual_basis_action[2][0]', 'grid', 'spinless'], {}), '(dual_basis_action[2][0], grid, spinless)\n', (21246, 21287), False, 'from fermilib.utils._jellium import momentum_vector, position_vector, grid_indices\n'), ((21360, 21413), 'fermilib.utils._jellium.grid_indices', 'grid_indices', (['dual_basis_action[3][0]', 'grid', 'spinless'], {}), '(dual_basis_action[3][0], grid, spinless)\n', (21372, 21413), False, 'from fermilib.utils._jellium import momentum_vector, position_vector, grid_indices\n'), ((21486, 21539), 'fermilib.utils._jellium.grid_indices', 'grid_indices', (['dual_basis_action[4][0]', 'grid', 'spinless'], {}), '(dual_basis_action[4][0], grid, spinless)\n', (21498, 21539), False, 'from fermilib.utils._jellium import momentum_vector, position_vector, grid_indices\n'), ((21612, 21665), 'fermilib.utils._jellium.grid_indices', 'grid_indices', (['dual_basis_action[5][0]', 'grid', 'spinless'], {}), '(dual_basis_action[5][0], grid, spinless)\n', (21624, 21665), False, 'from fermilib.utils._jellium import momentum_vector, position_vector, grid_indices\n'), ((4136, 4201), 'scipy.sparse.identity', 'scipy.sparse.identity', (['(2 ** n_qubits)'], {'dtype': 'complex', 'format': '"""csc"""'}), "(2 ** n_qubits, dtype=complex, format='csc')\n", (4157, 4201), False, 'import scipy\n'), ((4839, 4937), 'scipy.sparse.coo_matrix', 'scipy.sparse.coo_matrix', (['(values_list, (row_list, column_list))'], {'shape': '(n_hilbert, n_hilbert)'}), '((values_list, (row_list, column_list)), shape=(\n n_hilbert, n_hilbert))\n', (4862, 4937), False, 'import scipy\n'), ((6666, 6738), 'scipy.sparse.identity', 'scipy.sparse.identity', (['(2 ** identity_qubits)'], {'dtype': 'complex', 'format': '"""csc"""'}), "(2 ** identity_qubits, dtype=complex, format='csc')\n", (6687, 6738), False, 'import scipy\n'), ((7278, 7376), 'scipy.sparse.coo_matrix', 'scipy.sparse.coo_matrix', (['(values_list, (row_list, column_list))'], {'shape': '(n_hilbert, n_hilbert)'}), '((values_list, (row_list, column_list)), shape=(\n n_hilbert, n_hilbert))\n', (7301, 7376), False, 'import scipy\n'), ((9482, 9511), 'numpy.log2', 'numpy.log2', (['operator.shape[0]'], {}), '(operator.shape[0])\n', (9492, 9511), False, 'import numpy\n'), ((16646, 16683), 'fermilib.utils._jellium.grid_indices', 'grid_indices', (['orbital', 'grid', 'spinless'], {}), '(orbital, grid, spinless)\n', (16658, 16683), False, 'from fermilib.utils._jellium import momentum_vector, position_vector, grid_indices\n'), ((18444, 18475), 'fermilib.utils._jellium.grid_indices', 'grid_indices', (['i', 'grid', 'spinless'], {}), '(i, grid, spinless)\n', (18456, 18475), False, 'from fermilib.utils._jellium import momentum_vector, position_vector, grid_indices\n'), ((21941, 21972), 'fermilib.utils._jellium.grid_indices', 'grid_indices', (['i', 'grid', 'spinless'], {}), '(i, grid, spinless)\n', (21953, 21972), False, 'from fermilib.utils._jellium import momentum_vector, position_vector, grid_indices\n'), ((6147, 6219), 'scipy.sparse.identity', 'scipy.sparse.identity', (['(2 ** identity_qubits)'], {'dtype': 'complex', 'format': '"""csc"""'}), "(2 ** identity_qubits, dtype=complex, format='csc')\n", (6168, 6219), False, 'import scipy\n'), ((19428, 19460), 'numpy.exp', 'numpy.exp', (['(-1.0j * (k1ad + k2bc))'], {}), '(-1.0j * (k1ad + k2bc))\n', (19437, 19460), False, 'import numpy\n'), ((20080, 20112), 'numpy.exp', 'numpy.exp', (['(-1.0j * (k1ac + k2bd))'], {}), '(-1.0j * (k1ac + k2bd))\n', (20089, 20112), False, 'import numpy\n'), ((23743, 23782), 'numpy.exp', 'numpy.exp', (['(-1.0j * (k1ad + k2bf + k3ce))'], {}), '(-1.0j * (k1ad + k2bf + k3ce))\n', (23752, 23782), False, 'import numpy\n'), ((24588, 24627), 'numpy.exp', 'numpy.exp', (['(-1.0j * (k1ad + k2be + k3cf))'], {}), '(-1.0j * (k1ad + k2be + k3cf))\n', (24597, 24627), False, 'import numpy\n'), ((25433, 25472), 'numpy.exp', 'numpy.exp', (['(-1.0j * (k1ae + k2bf + k3cd))'], {}), '(-1.0j * (k1ae + k2bf + k3cd))\n', (25442, 25472), False, 'import numpy\n'), ((26277, 26316), 'numpy.exp', 'numpy.exp', (['(-1.0j * (k1ae + k2bd + k3cf))'], {}), '(-1.0j * (k1ae + k2bd + k3cf))\n', (26286, 26316), False, 'import numpy\n'), ((27121, 27160), 'numpy.exp', 'numpy.exp', (['(-1.0j * (k1af + k2be + k3cd))'], {}), '(-1.0j * (k1af + k2be + k3cd))\n', (27130, 27160), False, 'import numpy\n'), ((27966, 28005), 'numpy.exp', 'numpy.exp', (['(-1.0j * (k1af + k2bd + k3ce))'], {}), '(-1.0j * (k1af + k2bd + k3ce))\n', (27975, 28005), False, 'import numpy\n')] |
#!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: <EMAIL>
@Software: PyCharm
@File: vad_test.py
@Time: 2019/11/16 下午6:52
@Overview:
"""
import pdb
from scipy import signal
import numpy as np
from scipy.io import wavfile
import torch
import torch.nn as nn
import torch.optim as optim
from Process_Data.Compute_Feat.compute_vad import ComputeVadEnergy
from python_speech_features import fbank
import matplotlib.pyplot as plt
torch.manual_seed(0)
# def preemphasis(signal,coeff=0.95):
# """perform preemphasis on the input signal.
#
# :param signal: The signal to filter.
# :param coeff: The preemphasis coefficient. 0 is no filter, default is 0.95.
# :returns: the filtered signal.
# """
# return np.append(signal[0],signal[1:]-coeff*signal[:-1])
#
# audio_pre = preemphasis(audio, 0.97)
#
# f, t, Zxx = signal.stft(audio_pre,
# fs,
# window=signal.hamming(int(fs*0.025)),
# noverlap=fs * 0.015,
# nperseg=fs * 0.025,
# nfft=512)
# energy = 1.0/512 * np.square(np.absolute(Zxx))
# energy = np.sum(energy,1)
class Vad_layer(nn.Module):
def __init__(self, feat_dim):
super(Vad_layer, self).__init__()
self.linear1 = nn.Linear(feat_dim, feat_dim)
self.conv1 = nn.Conv2d(in_channels=1,
out_channels=1,
kernel_size=(5, 1),
padding=(2, 0))
self.linear2 = nn.Linear(feat_dim, 1)
self.avg = nn.AdaptiveAvgPool3d((1, None, 26))
self.max = nn.MaxPool2d(kernel_size=(5, 5),
stride=1,
padding=(2,2))
self.relu2 = nn.ReLU()
self.acti = nn.ReLU()
# nn.init.eye(self.linear1.weight.data)
# nn.init.zeros_(self.linear1.bias.data)
# nn.init.eye(self.linear2.weight.data)
nn.init.normal_(self.linear2.bias.data, mean=-16, std=1)
nn.init.normal_(self.linear2.weight.data, mean=1, std=0.1)
nn.init.normal_(self.conv1.weight.data, mean=1, std=0.1)
def forward(self, input):
input = input.float()
# vad_fb = self.weight1.mm(torch.log(input))
vad_fb = self.conv1(input)
vad_fb = self.max(vad_fb)
vad_fb = self.avg(vad_fb)
# vad_fb = self.linear1(vad_fb)
vad_fb = self.linear2(vad_fb)
vad_fb = self.relu2(vad_fb)
#vad_fb = torch.sign(vad_fb)
return vad_fb
def en_forward(self, input):
input = torch.log(input.float())
vad_fb = self.conv1(input)
vad_fb = self.linear2(vad_fb)
vad_fb = self.max(vad_fb)
# vad_fb = self.linear2(input)
vad_fb = self.acti(vad_fb)
vad_fb = torch.sign(vad_fb)
# vad_fb = self.acti(vad_fb)
return vad_fb
def vad_fb(filename):
fs, audio = wavfile.read(filename)
fb, ener = fbank(audio, samplerate=fs, nfilt=64, winfunc=np.hamming)
fb[:, 0] = np.log(ener)
# log_ener = np.log(ener)
ener = ener.reshape((ener.shape[0], 1))
ten_fb = torch.from_numpy(fb).unsqueeze(0)
ten_fb = ten_fb.unsqueeze(0)
vad_lis = []
ComputeVadEnergy(ener, vad_lis)
vad = np.array(vad_lis)
print(float(np.sum(vad)) / len(vad))
ten_vad = torch.from_numpy(vad)
ten_vad = ten_vad.unsqueeze(1).float()
return ten_fb, ten_vad, torch.from_numpy(ener).unsqueeze(0)
def main():
filename2 = 'Data/voxceleb1/vox1_dev_noise/id10001/1zcIwhmdeo4/00001.wav'
filename1 = 'Data/Aishell/data_aishell/wav/train/S0002/BAC009S0002W0128.wav'
filename3 = 'Data/voxceleb1/vox1_dev_noise/id10001/1zcIwhmdeo4/00002.wav'
fb1, vad1, ener1 = vad_fb(filename1)
fb2, vad2, ener2 = vad_fb(filename2)
fb3, vad3, ener3 = vad_fb(filename3)
if fb1.shape[2]<=fb2.shape[2]:
fb2 = fb2[:,:,:fb1.shape[2],:]
vad2 = vad2[:fb1.shape[2], :]
ener2 = ener2[:, :fb1.shape[2], :]
fb = torch.cat((fb1, fb2), dim=0)
vad = torch.cat((vad1.unsqueeze(0), vad2.unsqueeze(0)), dim=0)
ener = torch.cat((ener1.unsqueeze(0), ener2.unsqueeze(0)), dim=0)
vad1 = vad1.unsqueeze(0)
input = fb1
vad = vad1
# input = ener.view(ener.shape[0], ener.shape[1], ener.shape[3], ener.shape[2])
# vad_fb = ten_vad.mul(ten_fb).float()
model = Vad_layer(input.shape[3])
optimizer = optim.Adagrad(model.parameters(), lr=0.01,
weight_decay=1e-3)
# optimizer = optim.SGD(model.parameters(), lr=0.001,
# momentum=0.99, dampening=0.9,
# weight_decay=1e-4)
ce1 = nn.L1Loss(reduction='mean')
ce2 = nn.MSELoss(reduction='mean')
ce3 = nn.BCELoss()
sm = nn.Softmax(dim=1)
epochs = 501
loss_va = []
accuracy = []
for epoch in range(1, epochs):
vad_out = model.en_forward(input)
loss = ce2(vad_out, vad) # +ce1(vad_out, vad) # + #+ (vad_out.mean()) #
#loss = ce3(vad_out.view(-1), vad.view(-1))
# loss = ce3(vad_out, vad)
if epoch % 4==3:
print('Loss is {}'.format(loss.item()))
loss_va.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
# pdb.set_trace()
# acc_num = torch.max(sm(vad_out.view(-1)), dim=1)[1]
acc_num = vad_out.view(-1)
for i in range(len(acc_num)):
if acc_num[i]>-0.5:
acc_num[i]=1
acc = float((acc_num.long() == vad.view(-1).long()).sum())
accuracy.append(acc/len(vad_out.squeeze().view(-1)))
vad_out = model.en_forward(input)
print(vad_out)
plt.plot(loss_va)
plt.show()
plt.plot(accuracy)
plt.show()
if __name__ == '__main__':
main()
| [
"torch.nn.ReLU",
"Process_Data.Compute_Feat.compute_vad.ComputeVadEnergy",
"torch.nn.L1Loss",
"numpy.log",
"torch.from_numpy",
"numpy.array",
"torch.nn.MSELoss",
"matplotlib.pyplot.plot",
"torch.sign",
"python_speech_features.fbank",
"scipy.io.wavfile.read",
"torch.cat",
"matplotlib.pyplot.s... | [((450, 470), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (467, 470), False, 'import torch\n'), ((2957, 2979), 'scipy.io.wavfile.read', 'wavfile.read', (['filename'], {}), '(filename)\n', (2969, 2979), False, 'from scipy.io import wavfile\n'), ((2996, 3053), 'python_speech_features.fbank', 'fbank', (['audio'], {'samplerate': 'fs', 'nfilt': '(64)', 'winfunc': 'np.hamming'}), '(audio, samplerate=fs, nfilt=64, winfunc=np.hamming)\n', (3001, 3053), False, 'from python_speech_features import fbank\n'), ((3069, 3081), 'numpy.log', 'np.log', (['ener'], {}), '(ener)\n', (3075, 3081), True, 'import numpy as np\n'), ((3258, 3289), 'Process_Data.Compute_Feat.compute_vad.ComputeVadEnergy', 'ComputeVadEnergy', (['ener', 'vad_lis'], {}), '(ener, vad_lis)\n', (3274, 3289), False, 'from Process_Data.Compute_Feat.compute_vad import ComputeVadEnergy\n'), ((3300, 3317), 'numpy.array', 'np.array', (['vad_lis'], {}), '(vad_lis)\n', (3308, 3317), True, 'import numpy as np\n'), ((3373, 3394), 'torch.from_numpy', 'torch.from_numpy', (['vad'], {}), '(vad)\n', (3389, 3394), False, 'import torch\n'), ((4043, 4071), 'torch.cat', 'torch.cat', (['(fb1, fb2)'], {'dim': '(0)'}), '((fb1, fb2), dim=0)\n', (4052, 4071), False, 'import torch\n'), ((4713, 4740), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (4722, 4740), True, 'import torch.nn as nn\n'), ((4751, 4779), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (4761, 4779), True, 'import torch.nn as nn\n'), ((4790, 4802), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (4800, 4802), True, 'import torch.nn as nn\n'), ((4812, 4829), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (4822, 4829), True, 'import torch.nn as nn\n'), ((5737, 5754), 'matplotlib.pyplot.plot', 'plt.plot', (['loss_va'], {}), '(loss_va)\n', (5745, 5754), True, 'import matplotlib.pyplot as plt\n'), ((5759, 5769), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5767, 5769), True, 'import matplotlib.pyplot as plt\n'), ((5774, 5792), 'matplotlib.pyplot.plot', 'plt.plot', (['accuracy'], {}), '(accuracy)\n', (5782, 5792), True, 'import matplotlib.pyplot as plt\n'), ((5797, 5807), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5805, 5807), True, 'import matplotlib.pyplot as plt\n'), ((1307, 1336), 'torch.nn.Linear', 'nn.Linear', (['feat_dim', 'feat_dim'], {}), '(feat_dim, feat_dim)\n', (1316, 1336), True, 'import torch.nn as nn\n'), ((1358, 1434), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(1)', 'kernel_size': '(5, 1)', 'padding': '(2, 0)'}), '(in_channels=1, out_channels=1, kernel_size=(5, 1), padding=(2, 0))\n', (1367, 1434), True, 'import torch.nn as nn\n'), ((1552, 1574), 'torch.nn.Linear', 'nn.Linear', (['feat_dim', '(1)'], {}), '(feat_dim, 1)\n', (1561, 1574), True, 'import torch.nn as nn\n'), ((1594, 1629), 'torch.nn.AdaptiveAvgPool3d', 'nn.AdaptiveAvgPool3d', (['(1, None, 26)'], {}), '((1, None, 26))\n', (1614, 1629), True, 'import torch.nn as nn\n'), ((1649, 1707), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(5, 5)', 'stride': '(1)', 'padding': '(2, 2)'}), '(kernel_size=(5, 5), stride=1, padding=(2, 2))\n', (1661, 1707), True, 'import torch.nn as nn\n'), ((1792, 1801), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1799, 1801), True, 'import torch.nn as nn\n'), ((1822, 1831), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1829, 1831), True, 'import torch.nn as nn\n'), ((1985, 2041), 'torch.nn.init.normal_', 'nn.init.normal_', (['self.linear2.bias.data'], {'mean': '(-16)', 'std': '(1)'}), '(self.linear2.bias.data, mean=-16, std=1)\n', (2000, 2041), True, 'import torch.nn as nn\n'), ((2050, 2108), 'torch.nn.init.normal_', 'nn.init.normal_', (['self.linear2.weight.data'], {'mean': '(1)', 'std': '(0.1)'}), '(self.linear2.weight.data, mean=1, std=0.1)\n', (2065, 2108), True, 'import torch.nn as nn\n'), ((2118, 2174), 'torch.nn.init.normal_', 'nn.init.normal_', (['self.conv1.weight.data'], {'mean': '(1)', 'std': '(0.1)'}), '(self.conv1.weight.data, mean=1, std=0.1)\n', (2133, 2174), True, 'import torch.nn as nn\n'), ((2840, 2858), 'torch.sign', 'torch.sign', (['vad_fb'], {}), '(vad_fb)\n', (2850, 2858), False, 'import torch\n'), ((3170, 3190), 'torch.from_numpy', 'torch.from_numpy', (['fb'], {}), '(fb)\n', (3186, 3190), False, 'import torch\n'), ((3334, 3345), 'numpy.sum', 'np.sum', (['vad'], {}), '(vad)\n', (3340, 3345), True, 'import numpy as np\n'), ((3467, 3489), 'torch.from_numpy', 'torch.from_numpy', (['ener'], {}), '(ener)\n', (3483, 3489), False, 'import torch\n')] |
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.colors as colors
import matplotlib.cm as cmx
import os
np.random.seed(0)
def do_tsne(feats, labs, cls, show_unlabeled=False, sec='', savefig=True):
# hyperparameters:
n_pca = 256
n_cls = 12
n_dim = 2
n_iter = 300
# prepare data:
labs = [int(i) for i in labs]
slabs = list(set(labs))
# map class ids to names:
if not isinstance(cls, np.ndarray): cls = np.array(cls)
cls = np.array( ['unlabeled']+cls.tolist() )
cls_ind = range(feats.shape[0])
labsc = np.array([cls[labs[i]] for i in cls_ind])
#reduce dimensionality:
print('applying PCA...')
pca = PCA(n_components=n_pca)
featsc = pca.fit_transform(feats)
print('applying t-SNE...')
time_start = time.time()
tsne = TSNE(n_components=n_dim, verbose=1, perplexity=40, n_iter=n_iter)
feats_tsne = tsne.fit_transform(featsc)
print('t-SNE done! Time elapsed: {:.3f} seconds'.format(time.time()-time_start))
# Visualize the results
fig, ax = plt.subplots()
values = range(n_cls) if show_unlabeled else range(1,n_cls)
jet = cm = plt.get_cmap('jet')
cNorm = colors.Normalize(vmin=0, vmax=values[-1])
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
for c in values:
colorVal = np.array(scalarMap.to_rgba(c), ndmin=2)
embed_idx = np.where(labsc == cls[c])[0]
embed_x = feats_tsne[embed_idx,0]
embed_y = feats_tsne[embed_idx,1]
ax.scatter(embed_x, embed_y, c=colorVal, label=cls[c])
plt.axis('off')
plt.axis('equal')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
fig.tight_layout()
plt.show()
print('2-dimensional t-sne plot.')
if savefig:
out_fname = 'results/tsne_sect-{}.png'.format(sec)
if not os.path.exists(os.path.dirname(out_fname)):
os.makedirs(os.path.dirname(out_fname))
plt.savefig(out_fname, bbox_inches='tight')
plt.close()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Run t-SNE on features representations for retrieval')
parser.add_argument('--dataset', '-d', type=str, default='oxford', choices=('oxford'), help='selects the dataset')
parser.add_argument('--backbone', '-b', type=str, default='alexnet', choices=('alexnet', 'resnet18', 'resnet50'), help='selects the backbone architecture')
parser.add_argument('--trained', '-t', type=str, default='classification', help='selects the task used to train the model')
parser.add_argument('--finetuned', '-f', type=bool, default=False, help='switchs if the model is finetuned (trained on landmarks) or not (trained on Imagenet)')
parser.add_argument('--show_unlabeled', '-s', type=bool, default=False, help='show unlabeled data')
args = parser.parse_args()
train_db = 'landmark' if args.finetuned else 'imagenet'
data_f = 'data/'+args.dataset+'_res_224_'+args.backbone+'_'+train_db+'_'+args.trained+'.npz'
data = np.load(data_f)
feats = data['feats']
labs = data['labs']
cls = ['landmark 1','landmark 2','landmark 3','landmark 4','landmark 5','landmark 6','landmark 7','landmark 8','landmark 9','landmark 10','landmark 11']
do_tsne(feats, labs, cls, sec='test')
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"sklearn.decomposition.PCA",
"numpy.where",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.close",
"numpy.array",
"os.path.dirname",
"matplotlib.cm.ScalarMappable",
"numpy.random.seed",
"matplotlib.colors.Normalize"... | [((227, 244), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (241, 244), True, 'import numpy as np\n'), ((687, 728), 'numpy.array', 'np.array', (['[cls[labs[i]] for i in cls_ind]'], {}), '([cls[labs[i]] for i in cls_ind])\n', (695, 728), True, 'import numpy as np\n'), ((800, 823), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_pca'}), '(n_components=n_pca)\n', (803, 823), False, 'from sklearn.decomposition import PCA\n'), ((915, 926), 'time.time', 'time.time', ([], {}), '()\n', (924, 926), False, 'import time\n'), ((938, 1003), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': 'n_dim', 'verbose': '(1)', 'perplexity': '(40)', 'n_iter': 'n_iter'}), '(n_components=n_dim, verbose=1, perplexity=40, n_iter=n_iter)\n', (942, 1003), False, 'from sklearn.manifold import TSNE\n'), ((1179, 1193), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1191, 1193), True, 'import matplotlib.pyplot as plt\n'), ((1277, 1296), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (1289, 1296), True, 'import matplotlib.pyplot as plt\n'), ((1313, 1354), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': '(0)', 'vmax': 'values[-1]'}), '(vmin=0, vmax=values[-1])\n', (1329, 1354), True, 'import matplotlib.colors as colors\n'), ((1371, 1411), 'matplotlib.cm.ScalarMappable', 'cmx.ScalarMappable', ([], {'norm': 'cNorm', 'cmap': 'jet'}), '(norm=cNorm, cmap=jet)\n', (1389, 1411), True, 'import matplotlib.cm as cmx\n'), ((1703, 1718), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1711, 1718), True, 'import matplotlib.pyplot as plt\n'), ((1723, 1740), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (1731, 1740), True, 'import matplotlib.pyplot as plt\n'), ((1745, 1807), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '(2)', 'borderaxespad': '(0.0)'}), '(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)\n', (1755, 1807), True, 'import matplotlib.pyplot as plt\n'), ((1834, 1844), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1842, 1844), True, 'import matplotlib.pyplot as plt\n'), ((2128, 2139), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2137, 2139), True, 'import matplotlib.pyplot as plt\n'), ((2201, 2296), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run t-SNE on features representations for retrieval"""'}), "(description=\n 'Run t-SNE on features representations for retrieval')\n", (2224, 2296), False, 'import argparse\n'), ((3170, 3185), 'numpy.load', 'np.load', (['data_f'], {}), '(data_f)\n', (3177, 3185), True, 'import numpy as np\n'), ((570, 583), 'numpy.array', 'np.array', (['cls'], {}), '(cls)\n', (578, 583), True, 'import numpy as np\n'), ((2079, 2122), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_fname'], {'bbox_inches': '"""tight"""'}), "(out_fname, bbox_inches='tight')\n", (2090, 2122), True, 'import matplotlib.pyplot as plt\n'), ((1518, 1543), 'numpy.where', 'np.where', (['(labsc == cls[c])'], {}), '(labsc == cls[c])\n', (1526, 1543), True, 'import numpy as np\n'), ((1109, 1120), 'time.time', 'time.time', ([], {}), '()\n', (1118, 1120), False, 'import time\n'), ((1990, 2016), 'os.path.dirname', 'os.path.dirname', (['out_fname'], {}), '(out_fname)\n', (2005, 2016), False, 'import os\n'), ((2043, 2069), 'os.path.dirname', 'os.path.dirname', (['out_fname'], {}), '(out_fname)\n', (2058, 2069), False, 'import os\n')] |
from collections import OrderedDict
import sys
import numpy as np
import onnx
from array import array
from pprint import pprint
def onnx2darknet(onnxfile):
# Load the ONNX model
model = onnx.load(onnxfile)
# Check that the IR is well formed
onnx.checker.check_model(model)
# Print a human readable representation of the graph
print(onnx.helper.printable_graph(model.graph))
#onnx -> darknet convert
jj=0
kk=0
i= 0
end = 0
layer_num = 0
act_layer = []
k=0
wdata = []
blocks = []
block = OrderedDict()
block['type'] = 'net'
block['batch'] = 1
block['channels'] = 3
block['height'] = 416
block['width'] = 416
blocks.append(block)
while i < len(model.graph.node):
layer = model.graph.node[i]
if layer.op_type == 'Conv':
#[route] layer => attribute 1
if int( layer.input[0]) !=1 and act_layer.index( int(layer.input[0])) - len(act_layer) +1 < 0:
block = OrderedDict()
block['type'] = 'route'
block['layers'] = act_layer.index( int(layer.input[0])) - len(act_layer)
blocks.append(block)
act_layer.append(int(layer.output[0]))
block = OrderedDict()
block['type'] = 'convolutional'
#Input informations => filters
input_num = layer.input[1]
block['filters'] = model.graph.input[int(input_num)].type.tensor_type.shape.dim[0].dim_value
j=0
while j < len(layer.attribute):
#kernel_shape => size
if layer.attribute[j].name == 'kernel_shape':
block['size'] = layer.attribute[j].ints[0]
j = j+1
#strides => stride
elif layer.attribute[j].name == 'strides':
block['stride'] = '1'
j = j+1
#pads => pad
elif layer.attribute[j].name == 'pads':
block['pad'] ='1'
j = j+1
else:
#blocks.append("<unknown>")
j = j+1
i = i + 1
elif layer.op_type == 'BatchNormalization':
#is_test => batch_normalize
if layer.attribute[0].name == 'is_test':
block['batch_normalize'] = '1'
kk = kk + 5
while jj < len(model.graph.initializer[kk-3].raw_data):
wdata += list(array('f',model.graph.initializer[kk-3].raw_data[jj:jj+4]))
jj = jj + 4
jj = 0
while jj < len(model.graph.initializer[kk-4].raw_data):
wdata += list(array('f',model.graph.initializer[kk-4].raw_data[jj:jj+4]))
jj = jj + 4
jj = 0
while jj < len(model.graph.initializer[kk-2].raw_data):
wdata += list(array('f',model.graph.initializer[kk-2].raw_data[jj:jj+4]))
jj = jj + 4
jj = 0
while jj < len(model.graph.initializer[kk-1].raw_data):
wdata += list(array('f',model.graph.initializer[kk-1].raw_data[jj:jj+4]))
jj = jj + 4
jj = 0
while jj < len(model.graph.initializer[kk-5].raw_data):
wdata += list(array('f',model.graph.initializer[kk-5].raw_data[jj:jj+4]))
jj = jj + 4
jj = 0
i = i + 1
elif layer.op_type == 'LeakyRelu':
#LeakyRelu => activation=leaky
block['activation'] = 'leaky'
blocks.append(block)
i = i + 1
act_layer.append(int(layer.output[0]))
elif layer.op_type == 'Add':
#LeakyRelu => activation=linear
block['activation'] = 'linear '
blocks.append(block)
kk = kk + 1
while jj < len(model.graph.initializer[kk].raw_data):
wdata += list(array('f',model.graph.initializer[kk].raw_data[jj:jj+4]))
jj = jj + 4
jj = 0
while jj < len(model.graph.initializer[kk-1].raw_data):
wdata += list(array('f',model.graph.initializer[kk-1].raw_data[jj:jj+4]))
jj = jj + 4
jj = 0
i = i + 1
########################################################
elif layer.op_type == 'MaxPool':
block = OrderedDict()
block['type'] = 'maxpool'
j = 0
while j < len(layer.attribute):
#kernel_shape => size
if layer.attribute[j].name == 'kernel_shape':
block['size'] = layer.attribute[j].ints[0]
j = j + 1
#strides => stride
elif layer.attribute[j].name == 'strides':
block['stride'] = layer.attribute[j].ints[0]
blocks.append(block)
j = j + 1
else:
j = j + 1
i = i + 1
act_layer.append(int(layer.output[0]))
########################################################
#Reshpae => reorg layer
elif layer.op_type == 'Reshape':
if end == 0:
block = OrderedDict()
block['type'] = 'reorg'
block['stride'] = '2'
blocks.append(block)
end = 1
else:
if(model.graph.node[i+1].op_type) == 'Transpose':
end
else:
act_layer.append(int(layer.output[0]))
i = i + 1
########################################################
# elif layer.op_type == 'Transpose':
# if layer['attribute'] == 'perm':
########################################################
#Concat => [route] layer => attribute 2
elif layer.op_type == 'Concat':
block = OrderedDict()
block['type'] = 'route'
first_num = act_layer.index( int(layer.input[0])) - len(act_layer)
last_num = act_layer.index( int(layer.input[1])) - len(act_layer)
block['layers'] = str(first_num) + ',' + str(last_num)
blocks.append(block)
i = i + 1
act_layer.append(int(layer.output[0]))
########################################################
else:
i = i + 1
block = OrderedDict()
block['type'] = 'region'
block['anchors'] = '0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828'
block['bias_match']=1
block['classes']=80
block['coords']=4
block['num']=5
block['softmax']=1
block['jitter']=.3
block['rescore']=1
block['object_scale']=5
block['noobject_scale']=1
block['class_scale']=1
block['coord_scale']=1
block['absolute']=1
block['thresh'] =' .6'
block['random']=1
blocks.append(block)
return blocks, np.array(wdata)
def save_cfg(blocks, cfgfile):
print ('Save to ', cfgfile)
with open(cfgfile, 'w') as fp:
for block in blocks:
fp.write('[%s]\n' % (block['type']))
for key,value in block.items():
if key != 'type':
fp.write('%s=%s\n' % (key, value))
fp.write('\n')
def save_weights(data, weightfile):
#onnx weights -> darknet weights
print ('Save to ', weightfile)
wsize = data.size
weights = np.zeros((wsize+4,), dtype=np.int32)
## write info
weights[0] = 0 ## major version
weights[1] = 1 ## minor version
weights[2] = 0 ## revision
weights[3] = 0 ## net.seen
weights.tofile(weightfile)
weights = np.fromfile(weightfile, dtype=np.float32)
weights[4:] = data
weights.tofile(weightfile)
if __name__ == '__main__':
import sys
if len(sys.argv) != 4:
print('try:')
print('python onnx2darknet.py yolov2.onnx yolov2.cfg yolov2.weights')
exit()
onnxfile = sys.argv[1]
cfgfile = sys.argv[2]
weightfile = sys.argv[3]
blocks, data = onnx2darknet(onnxfile)
save_cfg(blocks, cfgfile)
save_weights(data, weightfile)
| [
"collections.OrderedDict",
"numpy.fromfile",
"array.array",
"onnx.helper.printable_graph",
"numpy.array",
"numpy.zeros",
"onnx.load",
"onnx.checker.check_model"
] | [((203, 222), 'onnx.load', 'onnx.load', (['onnxfile'], {}), '(onnxfile)\n', (212, 222), False, 'import onnx\n'), ((267, 298), 'onnx.checker.check_model', 'onnx.checker.check_model', (['model'], {}), '(model)\n', (291, 298), False, 'import onnx\n'), ((546, 559), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (557, 559), False, 'from collections import OrderedDict\n'), ((5410, 5423), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5421, 5423), False, 'from collections import OrderedDict\n'), ((6378, 6416), 'numpy.zeros', 'np.zeros', (['(wsize + 4,)'], {'dtype': 'np.int32'}), '((wsize + 4,), dtype=np.int32)\n', (6386, 6416), True, 'import numpy as np\n'), ((6620, 6661), 'numpy.fromfile', 'np.fromfile', (['weightfile'], {'dtype': 'np.float32'}), '(weightfile, dtype=np.float32)\n', (6631, 6661), True, 'import numpy as np\n'), ((367, 407), 'onnx.helper.printable_graph', 'onnx.helper.printable_graph', (['model.graph'], {}), '(model.graph)\n', (394, 407), False, 'import onnx\n'), ((5940, 5955), 'numpy.array', 'np.array', (['wdata'], {}), '(wdata)\n', (5948, 5955), True, 'import numpy as np\n'), ((1163, 1176), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1174, 1176), False, 'from collections import OrderedDict\n'), ((952, 965), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (963, 965), False, 'from collections import OrderedDict\n'), ((2165, 2228), 'array.array', 'array', (['"""f"""', 'model.graph.initializer[kk - 3].raw_data[jj:jj + 4]'], {}), "('f', model.graph.initializer[kk - 3].raw_data[jj:jj + 4])\n", (2170, 2228), False, 'from array import array\n'), ((2336, 2399), 'array.array', 'array', (['"""f"""', 'model.graph.initializer[kk - 4].raw_data[jj:jj + 4]'], {}), "('f', model.graph.initializer[kk - 4].raw_data[jj:jj + 4])\n", (2341, 2399), False, 'from array import array\n'), ((2507, 2570), 'array.array', 'array', (['"""f"""', 'model.graph.initializer[kk - 2].raw_data[jj:jj + 4]'], {}), "('f', model.graph.initializer[kk - 2].raw_data[jj:jj + 4])\n", (2512, 2570), False, 'from array import array\n'), ((2678, 2741), 'array.array', 'array', (['"""f"""', 'model.graph.initializer[kk - 1].raw_data[jj:jj + 4]'], {}), "('f', model.graph.initializer[kk - 1].raw_data[jj:jj + 4])\n", (2683, 2741), False, 'from array import array\n'), ((2849, 2912), 'array.array', 'array', (['"""f"""', 'model.graph.initializer[kk - 5].raw_data[jj:jj + 4]'], {}), "('f', model.graph.initializer[kk - 5].raw_data[jj:jj + 4])\n", (2854, 2912), False, 'from array import array\n'), ((3768, 3781), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3779, 3781), False, 'from collections import OrderedDict\n'), ((3376, 3435), 'array.array', 'array', (['"""f"""', 'model.graph.initializer[kk].raw_data[jj:jj + 4]'], {}), "('f', model.graph.initializer[kk].raw_data[jj:jj + 4])\n", (3381, 3435), False, 'from array import array\n'), ((3541, 3604), 'array.array', 'array', (['"""f"""', 'model.graph.initializer[kk - 1].raw_data[jj:jj + 4]'], {}), "('f', model.graph.initializer[kk - 1].raw_data[jj:jj + 4])\n", (3546, 3604), False, 'from array import array\n'), ((4418, 4431), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4429, 4431), False, 'from collections import OrderedDict\n'), ((4969, 4982), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4980, 4982), False, 'from collections import OrderedDict\n')] |
import os
import json
import warnings
import time
import random
import numpy as np
from gym_unity.envs import UnityEnv
from stable_baselines.common.vec_env import SubprocVecEnv
from supervisors.supervisor import Supervisor
def create_vec_env(visibility=2.5, safe_info=True, safe_states=True, supervisor=None, safety_distance_logical=.35,
repl=1, worker_start=1000, lidar_num_bins=15, num_envs=20, cbf_quadratic=False,
cbf_learn_online=True, rewards=None, log_name=None, num_prior_data_cbf=None, search_space='random',
scale_reward=False):
n_forks = num_envs
warnings.filterwarnings("ignore")
env_list = []
for i in range(0, n_forks):
def create_env(number=i):
time.sleep(number)
# set seed - unity engine sets a seed by default
seed = int(number + (repl + 1) * n_forks + 1)
random.seed(seed)
np.random.seed(seed)
#movement = [['circular', 'linear', 'circular'],
# ['linear', 'circular', 'linear'],
# ['circular', 'circular', 'circular'],
# ['circular', 'circular', 'linear'],
# ['circular', 'circular', 'linear'],
# ['linear', 'linear', 'linear'],
# ['linear', 'linear', 'circular'],
# ['linear', 'circular', 'linear']]
movement = [['chase', 'linear', 'linear'],
['linear', 'linear', 'chase'],
['linear', 'chase', 'linear'],
['chase', 'chase', 'linear'],
['linear', 'chase', 'chase'],
['linear', 'linear', 'linear'],
['chase', 'linear', 'chase'],
['linear', 'linear', 'linear']]
#coordinate_static = [np.array([[-.13, -1], [1.2, -1.4], [-.9, -0.25], [-1, 3.0], [-1, -2.20], [0.6, -0.4],
# [-1, -1.80], [-1, -2.0], [-1, -1.75], [-1, -1.55], [1.6, -3], [1.4, -1.23],
# [-1, -1.35], [-1, -1.15], [-1, -0.95], [-1, -0.75], [-1, -0.55], [-1, -0.35],
# [0.94, -0.5], [0.3, -2.2], [1.9, -0.4], [0.8, -0.99], [1.34, -2.2]
# ]),
# np.array([[1.75, -0.75], [-1.25, -1.25], [.7, -1.4], [-1.3, -0.32], [-.3, -1.55],
# [-1, -2.2],
# [-1.3, -0.75], [-.2, -0.86], [.3, -1.9], [1.3, -1.22], [-2.2, -1], [2.2, -1],
# [-1.7, -0.5], [2.2, -2.7], [0.9, -1.9], [2.3, -2.9], [1.7, -2.2]
# ]),
# np.array([[.25, -.95], [.05, -1.25], [-.7, -1], [.55, -1.75], [.9, -1.25], [1.7, -1],
# [1, -1.75], [.2, -2.25], [.7, 0.2], [1.6, -1.5], [1.4, -.5], [-1.1, -.5],
# [2, -.95], [-1.05, -1.25], [-1.7, -1.7], [-1.25, -.95], [-1.05, -2.25],
# [-1.7, -1.2], [-1.25, .95], [-2.05, 1.25], [-2.7, -1.5], [1.25, 2.95],
# [1.05, 1.25]
# ]),
# np.array([[.55, -2.15], [-.55, -1.25], [1.7, -.71], [1.45, -1.15], [-.95, -1.25],
# [.7, -1.9], [2.05, -1.55], [-1.25, -1.25], [.7, -2.1], [-1.45, -1.15],
# [-2.25, -.25], [.7, -1.7], [-2.05, -1.15], [-.25, -.75], [.7, -.9],
# [-0.1, -1.15], [1.25, 1.25], [1.7, -.51], [2.3, -2.9], [1.7, -2.2],
# [2.2, -2.7]
# ])]
coordinate_static = [np.array([[1.2, -1.4], [-.9, -0.25], [-1, 3.0], [-1, -2.20],
[-1, -1.80], [-1, -2.0], [-1, -1.75], [-1, -1.55], [1.4, -1.23],
[-1, -1.35], [-1, -0.55],
[0.3, -2.2], [1.9, -0.4], [0.8, -0.99], [1.34, -2.2]
]),
np.array([[1.75, -0.75], [-1.25, -1.25], [.7, -1.4],
[-1, -2.2],
[-1.3, -0.75], [.3, -1.9], [1.3, -1.22], [-2.2, -1], [2.2, -1],
[-1.7, -0.5], [2.2, -2.7], [2.3, -2.9], [1.7, -2.2]
]),
np.array([[-.7, -1], [.55, -1.75], [.9, -1.25], [1.7, -1],
[1, -1.75], [1.4, -.5], [-1.1, -.5],
[2, -.95], [-1.05, -1.25], [-1.25, -.95], [-2.7, -1.5]
]),
np.array([[.55, -2.15], [1.7, -.71], [1.45, -1.15], [-.95, -1.25],
[.7, -2.1], [-1.45, -1.15],
[-2.05, -1.15], [-.25, -.75], [.7, -.9],
[2.3, -2.9], [1.7, -2.2],
[2.2, -2.7]
])]
coordinate_dynamic = [[{'x': '.25', 'y': '0.46', 'z': '-0.75'},
{'x': '0.25', 'y': '0.46', 'z': '-1.25'}, ##
{'x': '0.7', 'y': '0.46', 'z': '-1.4'}],
[{'x': '-0.13', 'y': '0.46', 'z': '-1'},
{'x': '1.2', 'y': '0.46', 'z': '-1.4'},
{'x': '-0.9', 'y': '0.46', 'z': '-0.45'}],
[{'x': '.45', 'y': '0.46', 'z': '-0.75'},
{'x': '0.25', 'y': '0.46', 'z': '-1.25'},
{'x': '-0.35', 'y': '0.46', 'z': '-.35'}],
[{'x': '.25', 'y': '0.46', 'z': '-0.95'},
{'x': '.05', 'y': '0.46', 'z': '-1.25'},
{'x': '-.7', 'y': '0.46', 'z': '-1'}]]
env_config = open("env_config.json", "r")
json_object = json.load(env_config)
env_config.close()
n_socket = number
if number > 14:
number = number - 15
if number > 7:
index = number % 8
else:
index = number
if index > 7:
index = 0
json_object['movement'] = movement[index]
if number > 3:
index = number % 4
else:
index = number
if index > 3:
index = 0
coordinate_static = coordinate_static[index]
json_object['animatedPositions'] = coordinate_dynamic[index]
env_config = open("env_config.json", "w")
json.dump(json_object, env_config)
env_config.close()
worker = worker_start + repl * n_forks + 1 + n_socket
file_name = 'drone-delivery-0.1'
env_name = "env/drone-delivery-0.1/" + file_name
if log_name is None:
log_dir = "logs/drone_ppo_" + supervisor + "_" + str(safety_distance_logical) + "_" + str(visibility) +\
"/drone_ppo_" + supervisor + "_" + str(safety_distance_logical) + "_" + str(visibility) + \
"_" + str(repl) + "/"
else:
log_dir = "./logs/" + log_name + "/" + str(repl) + "/"
os.makedirs(log_dir, exist_ok=True)
env = UnityEnv(env_name,
worker_id=worker,
use_visual=False,
uint8_visual=False,
allow_multiple_visual_obs=False,
no_graphics=True)
env.observation_space.shape = (((lidar_num_bins + 8) * 3),)
env = Supervisor(env, log_dir, safety_distance=safety_distance_logical, visibility=visibility,
safe_info=safe_info, safe_states=safe_states, supervisor=supervisor,
coordinates_static_obstacles=coordinate_static, lidar_num_bins=lidar_num_bins,
which_static=index, record_svm_gp=False, cbf_quadratic=cbf_quadratic,
cbf_learn_online=cbf_learn_online, rewards=rewards, num_prior_data_cbf=num_prior_data_cbf,
search_space=search_space, scale_reward=scale_reward)
return env
env_list.append(create_env)
env = SubprocVecEnv(env_list, start_method="fork")
time.sleep(num_envs + 3)
return env
| [
"os.makedirs",
"stable_baselines.common.vec_env.SubprocVecEnv",
"supervisors.supervisor.Supervisor",
"random.seed",
"time.sleep",
"numpy.array",
"gym_unity.envs.UnityEnv",
"numpy.random.seed",
"json.load",
"warnings.filterwarnings",
"json.dump"
] | [((625, 658), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (648, 658), False, 'import warnings\n'), ((8982, 9026), 'stable_baselines.common.vec_env.SubprocVecEnv', 'SubprocVecEnv', (['env_list'], {'start_method': '"""fork"""'}), "(env_list, start_method='fork')\n", (8995, 9026), False, 'from stable_baselines.common.vec_env import SubprocVecEnv\n'), ((9031, 9055), 'time.sleep', 'time.sleep', (['(num_envs + 3)'], {}), '(num_envs + 3)\n', (9041, 9055), False, 'import time\n'), ((755, 773), 'time.sleep', 'time.sleep', (['number'], {}), '(number)\n', (765, 773), False, 'import time\n'), ((905, 922), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (916, 922), False, 'import random\n'), ((935, 955), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (949, 955), True, 'import numpy as np\n'), ((6528, 6549), 'json.load', 'json.load', (['env_config'], {}), '(env_config)\n', (6537, 6549), False, 'import json\n'), ((7252, 7286), 'json.dump', 'json.dump', (['json_object', 'env_config'], {}), '(json_object, env_config)\n', (7261, 7286), False, 'import json\n'), ((7911, 7946), 'os.makedirs', 'os.makedirs', (['log_dir'], {'exist_ok': '(True)'}), '(log_dir, exist_ok=True)\n', (7922, 7946), False, 'import os\n'), ((7965, 8094), 'gym_unity.envs.UnityEnv', 'UnityEnv', (['env_name'], {'worker_id': 'worker', 'use_visual': '(False)', 'uint8_visual': '(False)', 'allow_multiple_visual_obs': '(False)', 'no_graphics': '(True)'}), '(env_name, worker_id=worker, use_visual=False, uint8_visual=False,\n allow_multiple_visual_obs=False, no_graphics=True)\n', (7973, 8094), False, 'from gym_unity.envs import UnityEnv\n'), ((8316, 8793), 'supervisors.supervisor.Supervisor', 'Supervisor', (['env', 'log_dir'], {'safety_distance': 'safety_distance_logical', 'visibility': 'visibility', 'safe_info': 'safe_info', 'safe_states': 'safe_states', 'supervisor': 'supervisor', 'coordinates_static_obstacles': 'coordinate_static', 'lidar_num_bins': 'lidar_num_bins', 'which_static': 'index', 'record_svm_gp': '(False)', 'cbf_quadratic': 'cbf_quadratic', 'cbf_learn_online': 'cbf_learn_online', 'rewards': 'rewards', 'num_prior_data_cbf': 'num_prior_data_cbf', 'search_space': 'search_space', 'scale_reward': 'scale_reward'}), '(env, log_dir, safety_distance=safety_distance_logical,\n visibility=visibility, safe_info=safe_info, safe_states=safe_states,\n supervisor=supervisor, coordinates_static_obstacles=coordinate_static,\n lidar_num_bins=lidar_num_bins, which_static=index, record_svm_gp=False,\n cbf_quadratic=cbf_quadratic, cbf_learn_online=cbf_learn_online, rewards\n =rewards, num_prior_data_cbf=num_prior_data_cbf, search_space=\n search_space, scale_reward=scale_reward)\n', (8326, 8793), False, 'from supervisors.supervisor import Supervisor\n'), ((4016, 4231), 'numpy.array', 'np.array', (['[[1.2, -1.4], [-0.9, -0.25], [-1, 3.0], [-1, -2.2], [-1, -1.8], [-1, -2.0],\n [-1, -1.75], [-1, -1.55], [1.4, -1.23], [-1, -1.35], [-1, -0.55], [0.3,\n -2.2], [1.9, -0.4], [0.8, -0.99], [1.34, -2.2]]'], {}), '([[1.2, -1.4], [-0.9, -0.25], [-1, 3.0], [-1, -2.2], [-1, -1.8], [-\n 1, -2.0], [-1, -1.75], [-1, -1.55], [1.4, -1.23], [-1, -1.35], [-1, -\n 0.55], [0.3, -2.2], [1.9, -0.4], [0.8, -0.99], [1.34, -2.2]])\n', (4024, 4231), True, 'import numpy as np\n'), ((4430, 4623), 'numpy.array', 'np.array', (['[[1.75, -0.75], [-1.25, -1.25], [0.7, -1.4], [-1, -2.2], [-1.3, -0.75], [\n 0.3, -1.9], [1.3, -1.22], [-2.2, -1], [2.2, -1], [-1.7, -0.5], [2.2, -\n 2.7], [2.3, -2.9], [1.7, -2.2]]'], {}), '([[1.75, -0.75], [-1.25, -1.25], [0.7, -1.4], [-1, -2.2], [-1.3, -\n 0.75], [0.3, -1.9], [1.3, -1.22], [-2.2, -1], [2.2, -1], [-1.7, -0.5],\n [2.2, -2.7], [2.3, -2.9], [1.7, -2.2]])\n', (4438, 4623), True, 'import numpy as np\n'), ((4820, 4989), 'numpy.array', 'np.array', (['[[-0.7, -1], [0.55, -1.75], [0.9, -1.25], [1.7, -1], [1, -1.75], [1.4, -0.5\n ], [-1.1, -0.5], [2, -0.95], [-1.05, -1.25], [-1.25, -0.95], [-2.7, -1.5]]'], {}), '([[-0.7, -1], [0.55, -1.75], [0.9, -1.25], [1.7, -1], [1, -1.75], [\n 1.4, -0.5], [-1.1, -0.5], [2, -0.95], [-1.05, -1.25], [-1.25, -0.95], [\n -2.7, -1.5]])\n', (4828, 4989), True, 'import numpy as np\n'), ((5137, 5329), 'numpy.array', 'np.array', (['[[0.55, -2.15], [1.7, -0.71], [1.45, -1.15], [-0.95, -1.25], [0.7, -2.1], [\n -1.45, -1.15], [-2.05, -1.15], [-0.25, -0.75], [0.7, -0.9], [2.3, -2.9],\n [1.7, -2.2], [2.2, -2.7]]'], {}), '([[0.55, -2.15], [1.7, -0.71], [1.45, -1.15], [-0.95, -1.25], [0.7,\n -2.1], [-1.45, -1.15], [-2.05, -1.15], [-0.25, -0.75], [0.7, -0.9], [\n 2.3, -2.9], [1.7, -2.2], [2.2, -2.7]])\n', (5145, 5329), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 11:35, 11/07/2021 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
from mealpy.evolutionary_based.GA import BaseGA
from mealpy.utils.visualize import *
from numpy import sum, mean, sqrt
## Define your own fitness function
# Multi-objective but single fitness/target value. By using weighting method to convert from multiple objectives to single target
def obj_function(solution):
f1 = (sum(solution ** 2) - mean(solution)) / len(solution)
f2 = sum(sqrt(abs(solution)))
f3 = sum(mean(solution ** 2) - solution)
return [f1, f2, f3]
## Setting parameters
verbose = True
epoch = 100
pop_size = 50
lb1 = [-10, -5, -15, -20, -10, -15, -10, -30]
ub1 = [10, 5, 15, 20, 50, 30, 100, 85]
optimizer = BaseGA(obj_function, lb1, ub1, "min", verbose, epoch, pop_size, obj_weight=[0.2, 0.5, 0.3])
best_position, best_fitness, g_best_fit_list, c_best_fit_list = optimizer.train()
print(best_position)
## On the exploration and exploitation in popular swarm-based metaheuristic algorithms (the idea come from this paper)
# This exploration/exploitation chart should draws for single algorithm and single fitness function
export_explore_exploit_chart([optimizer.history_list_explore, optimizer.history_list_exploit]) # Draw exploration and exploitation chart
# Parameter for this function
# data: is the list of array
# + optimizer.history_list_explore -> List of exploration percentages
# + optimizer.history_list_exploit -> List of exploitation percentages
# title: title of the figure
# list_legends: list of line's name, default = ("Exploration %", "Exploitation %")
# list_styles: matplotlib API, default = ('-', '-')
# list_colors: matplotlib API, default = ('blue', 'orange')
# x_label: string, default = "#Iteration"
# y_label: string, default = "Percentage"
# filename: string, default = "explore_exploit_chart"
# exts: matplotlib API, default = (".png", ".pdf") --> save figure in format of png and pdf
# verbose: show the figure on Python IDE, default = True
# This diversity chart should draws for multiple algorithms for a single fitness function at the same time to compare the diversity spreading
export_diversity_chart([optimizer.history_list_div], list_legends=['GA']) # Draw diversity measurement chart
# Parameter for this function
# data: is the list of array
# + optimizer1.history_list_div -> List of diversity spreading for this optimizer1
# + optimizer2.history_list_div -> List of diversity spreading for this optimizer1
# title: title of the figure
# list_legends: list, e.g. ("GA", "PSO",..)
# list_styles: matplotlib API, default = None
# list_colors: matplotlib API, default = None
# x_label: string, default = "#Iteration"
# y_label: string, default = "Diversity Measurement"
# filename: string, default = "diversity_chart"
# exts: matplotlib API, default = (".png", ".pdf") --> save figure in format of png and pdf
# verbose: show the figure on Python IDE, default = True
| [
"numpy.sum",
"mealpy.evolutionary_based.GA.BaseGA",
"numpy.mean"
] | [((1393, 1489), 'mealpy.evolutionary_based.GA.BaseGA', 'BaseGA', (['obj_function', 'lb1', 'ub1', '"""min"""', 'verbose', 'epoch', 'pop_size'], {'obj_weight': '[0.2, 0.5, 0.3]'}), "(obj_function, lb1, ub1, 'min', verbose, epoch, pop_size, obj_weight=\n [0.2, 0.5, 0.3])\n", (1399, 1489), False, 'from mealpy.evolutionary_based.GA import BaseGA\n'), ((1073, 1091), 'numpy.sum', 'sum', (['(solution ** 2)'], {}), '(solution ** 2)\n', (1076, 1091), False, 'from numpy import sum, mean, sqrt\n'), ((1094, 1108), 'numpy.mean', 'mean', (['solution'], {}), '(solution)\n', (1098, 1108), False, 'from numpy import sum, mean, sqrt\n'), ((1173, 1192), 'numpy.mean', 'mean', (['(solution ** 2)'], {}), '(solution ** 2)\n', (1177, 1192), False, 'from numpy import sum, mean, sqrt\n')] |
#!/usr/bin/env python
"""
This module does some of the math for doing ADMM.
All the 3D ADMM math is a Python version of the ideas and code in the
following references:
1. "High density 3D localization microscopy using sparse support recovery",
Ovesny et al., Optics Express, 2014.
2. "Computational methods in single molecule localization microscopy",
<NAME>, Thesis 2016.
3. https://github.com/zitmen/3densestorm
The expectation is that the contents of Cell objects are the FFTs of the PSFs
for different z values. This means that all the math that involves individual
PSF matrices is done element wise.
Hazen 11/19
"""
import numpy
class ADMMMathException(Exception):
pass
##
## Cell class and cell math.
##
class Cells(object):
"""
Class for storage and manipulation of cells of matrices
all of which are the same size.
Notes:
1. The matrices are the FFTs of PSFs, or derivatives
thereof.
2. The indexing is [row, col] even though internally the
matrices are stored by column, then by row.
"""
def __init__(self, n_rows, n_cols, mx = None, my = None, **kwds):
"""
n_rows - Number of matrices in a row (fast axis).
n_cols - Number of matrices in a column (slow axis).
mx - Matrix X size (slow axis).
my - Matrix Y size (fast axis).
"""
super(Cells, self).__init__(**kwds)
self.n_rows = n_rows
self.n_cols = n_cols
self.mx = mx
self.my = my
self.cells = []
for c in range(self.n_cols):
row = []
for r in range(self.n_rows):
row.append(None)
self.cells.append(row)
def __getitem__(self, key):
return self.cells[key[1]][key[0]]
def __setitem__(self, key, val):
if (self.mx is None):
self.mx = val.shape[0]
if (len(val.shape) == 2):
self.my = val.shape[1]
if (self.mx != val.shape[0]):
raise ADMMMathException("Unexpected matrix X size {0:d}, {1:d}!".format(self.mx, val.shape[0]))
if (self.my is not None) and (self.my != val.shape[1]):
raise ADMMMathException("Unexpected matrix Y size {0:d}, {1:d}!".format(self.my, val.shape[1]))
self.cells[key[1]][key[0]] = val
def getCellsShape(self):
return (self.n_rows, self.n_cols)
def getMatrixShape(self):
if (self.my is None):
return (self.mx, )
else:
return (self.mx, self.my)
def getNCols(self):
return self.n_cols
def getNRows(self):
return self.n_rows
def cellToMatrix(A):
"""
Convert Cell matrices into a single large matrix.
"""
nr, nc = A.getCellsShape()
mx, my = A.getMatrixShape()
m = numpy.zeros((nc*mx, nr*my))
for c in range(A.getNCols()):
for r in range(A.getNRows()):
m[c*mx:(c+1)*mx,r*my:(r+1)*my] = numpy.copy(A[r,c])
return m
def copyCell(A):
"""
Create a duplicate of a Cell object.
"""
B = Cells(*A.getCellsShape())
for i in range(A.getNRows()):
for j in range(A.getNCols()):
B[i,j] = numpy.copy(A[i,j])
return B
##
## ADMM Math
##
def lduG(G):
"""
G a Cell object containing AtA + rhoI matrices. The A
matrices are the PSF matrices, I is the identity matrix and
rho is the ADMM timestep.
"""
nr, nc = G.getCellsShape()
mshape = G.getMatrixShape()
if (nr != nc):
raise ADMMMathException("G Cell must be square!")
nmat = nr
# Create empty M matrix.
M = Cells(nmat, nmat)
for i in range(nmat):
for j in range(nmat):
M[i,j] = numpy.zeros_like(G[0,0])
# Schur decomposition.
D = Cells(nmat, nmat)
L = Cells(nmat, nmat)
U = Cells(nmat, nmat)
for r in range(nmat-1,-1,-1):
for c in range(nmat-1,-1,-1):
k = max(r,c)
M[r,c] = G[r,c]
for s in range(nmat-1,k,-1):
M[r,c] = M[r,c] - M[r,s] * M[s,c] / M[s,s]
if (r == c):
D[r,c] = M[r,c]
L[r,c] = identityMatrix(mshape)
U[r,c] = identityMatrix(mshape)
elif (r > c):
D[r,c] = numpy.zeros(mshape)
L[r,c] = M[r,c] / M[k,k]
U[r,c] = numpy.zeros(mshape)
elif (r < c):
D[r,c] = numpy.zeros(mshape)
L[r,c] = numpy.zeros(mshape)
U[r,c] = M[r,c] / M[k,k]
return [L, D, U]
def identityMatrix(mshape, scale = 1.0):
"""
Returns FFT of the identity matrix.
"""
return numpy.ones(mshape, dtype = numpy.complex)*scale
def invD(D):
"""
Calculate inverse of D Cell.
"""
nr, nc = D.getCellsShape()
if (nr != nc):
raise ADMMMathException("D Cell must be square!")
nmat = nr
d_inv = Cells(nmat,nmat)
for i in range(nmat):
for j in range(nmat):
if (i == j):
d_inv[i,j] = 1.0/D[i,j]
else:
d_inv[i,j] = numpy.zeros_like(D[0,0])
return d_inv
def invL(L):
"""
Calculate inverse of L Cell.
"""
nr, nc = L.getCellsShape()
mshape = L.getMatrixShape()
if (nr != nc):
raise ADMMMathException("L Cell must be square!")
nmat = nr
l_tmp = copyCell(L)
l_inv = Cells(nmat, nmat)
for i in range(nmat):
for j in range(nmat):
if (i == j):
l_inv[i,j] = identityMatrix(mshape)
else:
l_inv[i,j] = numpy.zeros_like(L[0,0])
for j in range(nmat-1):
for i in range(j+1,nmat):
tmp = l_tmp[i,j]
for k in range(nmat):
l_tmp[i,k] = l_tmp[i,k] - tmp * l_tmp[j,k]
l_inv[i,k] = l_inv[i,k] - tmp * l_inv[j,k]
return l_inv
def invU(U):
"""
Calculate inverse of U Cell.
"""
nr, nc = U.getCellsShape()
mshape = U.getMatrixShape()
if (nr != nc):
raise ADMMMathException("U Cell must be square!")
nmat = nr
u_tmp = copyCell(U)
u_inv = Cells(nmat, nmat)
for i in range(nmat):
for j in range(nmat):
if (i == j):
u_inv[i,j] = identityMatrix(mshape)
else:
u_inv[i,j] = numpy.zeros_like(U[0,0])
for j in range(nmat-1,0,-1):
for i in range(j-1,-1,-1):
tmp = u_tmp[i,j]
for k in range(nmat):
u_tmp[i,k] = u_tmp[i,k] - tmp * u_tmp[j,k]
u_inv[i,k] = u_inv[i,k] - tmp * u_inv[j,k]
return u_inv
def multiplyMatMat(A, B):
"""
Multiply two Cell objects following matrix multiplication rules.
"""
nr_a, nc_a = A.getCellsShape()
nr_b, nc_b = B.getCellsShape()
if (nr_b != nc_a):
raise ADMMMathException("A, B shapes don't match!")
C = Cells(nr_b, nc_a)
for r in range(nr_b):
for c in range(nc_a):
C[r,c] = numpy.zeros_like(A[0,0])
for k in range(nr_a):
C[r,c] += A[k,c] * B[r,k]
return C
def multiplyMatVec(A, v):
"""
Multiply Cell object by a vector.
"""
nr_a, nc_a = A.getCellsShape()
mshape = A.getMatrixShape()
# V is a vector.
if (v.ndim == 1):
mx = mshape[0]
if (len(mshape) != 1):
raise ADMMMathException("A and v shapes don't match!")
if ((nr_a*mx) != v.size):
raise ADMMMathException("A and v sizes don't match!")
b = numpy.zeros((nc_a*mx))
for r in range(nr_a):
v_fft = numpy.fft.fft(v[r*mx:(r+1)*mx])
for c in range(nc_a):
b[c*mx:(c+1)*mx] += numpy.real(numpy.fft.ifft(A[r,c] * v_fft))
return b
# V is a matrix.
#
# In this case A must be a vector and V is the size of a single cell.
#
elif (v.ndim == 2):
mx, my = mshape
if (mx != v.shape[0]):
raise ADMMMathException("v shape[0] doesn't match A cell size!")
if (my != v.shape[1]):
raise ADMMMathException("v shape[1] doesn't match A cell size!")
if (nr_a != 1):
raise ADMMMathException("A must be a vector or scalar!")
b = numpy.zeros((mx, my, nc_a))
v_fft = numpy.fft.fft2(v)
for r in range(nr_a):
for c in range(nc_a):
b[:,:,c] += numpy.real(numpy.fft.ifft2(A[r,c] * v_fft))
return b
# V is a matrix of matrices.
#
# This follows the current decon convention where the first two axises
# are the image and that last axis is the z plane, so [x, y, z].
#
# FIXME: When nc_a = 1, maybe we should chop the last axis?
#
elif (v.ndim == 3):
mx, my = mshape
if (mx != v.shape[0]):
raise ADMMMathException("v shape[0] doesn't match A cell size!")
if (my != v.shape[1]):
raise ADMMMathException("v shape[1] doesn't match A cell size!")
if (nr_a != v.shape[2]):
raise ADMMMathException("v shape[2] doesn't match A size!")
b = numpy.zeros((mx, my, nc_a))
for r in range(nr_a):
v_fft = numpy.fft.fft2(v[:,:,r])
for c in range(nc_a):
b[:,:,c] += numpy.real(numpy.fft.ifft2(A[r,c] * v_fft))
return b
else:
raise ADMMMathException("v must be a vector or a matrix!")
def printCell(A):
"""
Print the matrices in the cells list.
"""
for r in range(A.getNRows()):
for c in range(A.getNCols()):
print(A[r,c])
print()
def transpose(A):
"""
Returns the transpose of Cell.
"""
nr, nc = A.getCellsShape()
B = Cells(nc, nr)
for r in range(nr):
for c in range(nc):
B[c,r] = numpy.conj(A[r,c])
return B
| [
"numpy.copy",
"numpy.ones",
"numpy.fft.ifft2",
"numpy.conj",
"numpy.fft.fft",
"numpy.fft.fft2",
"numpy.zeros",
"numpy.fft.ifft",
"numpy.zeros_like"
] | [((2841, 2872), 'numpy.zeros', 'numpy.zeros', (['(nc * mx, nr * my)'], {}), '((nc * mx, nr * my))\n', (2852, 2872), False, 'import numpy\n'), ((4757, 4796), 'numpy.ones', 'numpy.ones', (['mshape'], {'dtype': 'numpy.complex'}), '(mshape, dtype=numpy.complex)\n', (4767, 4796), False, 'import numpy\n'), ((7764, 7786), 'numpy.zeros', 'numpy.zeros', (['(nc_a * mx)'], {}), '(nc_a * mx)\n', (7775, 7786), False, 'import numpy\n'), ((2986, 3005), 'numpy.copy', 'numpy.copy', (['A[r, c]'], {}), '(A[r, c])\n', (2996, 3005), False, 'import numpy\n'), ((3221, 3240), 'numpy.copy', 'numpy.copy', (['A[i, j]'], {}), '(A[i, j])\n', (3231, 3240), False, 'import numpy\n'), ((3750, 3775), 'numpy.zeros_like', 'numpy.zeros_like', (['G[0, 0]'], {}), '(G[0, 0])\n', (3766, 3775), False, 'import numpy\n'), ((7223, 7248), 'numpy.zeros_like', 'numpy.zeros_like', (['A[0, 0]'], {}), '(A[0, 0])\n', (7239, 7248), False, 'import numpy\n'), ((7837, 7874), 'numpy.fft.fft', 'numpy.fft.fft', (['v[r * mx:(r + 1) * mx]'], {}), '(v[r * mx:(r + 1) * mx])\n', (7850, 7874), False, 'import numpy\n'), ((8496, 8523), 'numpy.zeros', 'numpy.zeros', (['(mx, my, nc_a)'], {}), '((mx, my, nc_a))\n', (8507, 8523), False, 'import numpy\n'), ((8540, 8557), 'numpy.fft.fft2', 'numpy.fft.fft2', (['v'], {}), '(v)\n', (8554, 8557), False, 'import numpy\n'), ((10079, 10098), 'numpy.conj', 'numpy.conj', (['A[r, c]'], {}), '(A[r, c])\n', (10089, 10098), False, 'import numpy\n'), ((5195, 5220), 'numpy.zeros_like', 'numpy.zeros_like', (['D[0, 0]'], {}), '(D[0, 0])\n', (5211, 5220), False, 'import numpy\n'), ((5728, 5753), 'numpy.zeros_like', 'numpy.zeros_like', (['L[0, 0]'], {}), '(L[0, 0])\n', (5744, 5753), False, 'import numpy\n'), ((6512, 6537), 'numpy.zeros_like', 'numpy.zeros_like', (['U[0, 0]'], {}), '(U[0, 0])\n', (6528, 6537), False, 'import numpy\n'), ((9364, 9391), 'numpy.zeros', 'numpy.zeros', (['(mx, my, nc_a)'], {}), '((mx, my, nc_a))\n', (9375, 9391), False, 'import numpy\n'), ((4337, 4356), 'numpy.zeros', 'numpy.zeros', (['mshape'], {}), '(mshape)\n', (4348, 4356), False, 'import numpy\n'), ((4423, 4442), 'numpy.zeros', 'numpy.zeros', (['mshape'], {}), '(mshape)\n', (4434, 4442), False, 'import numpy\n'), ((7950, 7981), 'numpy.fft.ifft', 'numpy.fft.ifft', (['(A[r, c] * v_fft)'], {}), '(A[r, c] * v_fft)\n', (7964, 7981), False, 'import numpy\n'), ((9442, 9468), 'numpy.fft.fft2', 'numpy.fft.fft2', (['v[:, :, r]'], {}), '(v[:, :, r])\n', (9456, 9468), False, 'import numpy\n'), ((4507, 4526), 'numpy.zeros', 'numpy.zeros', (['mshape'], {}), '(mshape)\n', (4518, 4526), False, 'import numpy\n'), ((4552, 4571), 'numpy.zeros', 'numpy.zeros', (['mshape'], {}), '(mshape)\n', (4563, 4571), False, 'import numpy\n'), ((8661, 8693), 'numpy.fft.ifft2', 'numpy.fft.ifft2', (['(A[r, c] * v_fft)'], {}), '(A[r, c] * v_fft)\n', (8676, 8693), False, 'import numpy\n'), ((9540, 9572), 'numpy.fft.ifft2', 'numpy.fft.ifft2', (['(A[r, c] * v_fft)'], {}), '(A[r, c] * v_fft)\n', (9555, 9572), False, 'import numpy\n')] |
from sys import platform as sys_pf
if sys_pf == 'darwin':
import matplotlib
matplotlib.use("TkAgg")
import unittest
import numpy.testing as np_test
from scripts.algorithms.polynomial_predictor import PolynomialPredictor
class PolynomialPredictorTests(unittest.TestCase):
def test_static_sequence(self):
time_series = [1.0, 1.0, 1.0, 1.0, 1.0]
num_predicted_periods = 3
expected_prediction = [1] * num_predicted_periods
predictor = PolynomialPredictor(time_series, num_predicted_periods)
actual_prediction = predictor.predict_counts()
np_test.assert_almost_equal(actual_prediction, expected_prediction, decimal=4)
def test_linearly_increasing_sequence(self):
time_series = [8.9, 11.0, 13.0, 15.1, 17.0, 18.9, 21.0]
num_predicted_periods = 4
expected_prediction = [23.0, 25.0, 27.0, 29.0]
predictor = PolynomialPredictor(time_series, num_predicted_periods)
actual_prediction = predictor.predict_counts()
np_test.assert_almost_equal(actual_prediction, expected_prediction, decimal=0)
def test_quadratically_increasing_sequence(self):
values = list(map(lambda x: (x ** 2) - (3 * x) + 2, range(15)))
num_predicted_periods = 4
time_series = values[:-num_predicted_periods]
expected_prediction = values[-num_predicted_periods:]
predictor = PolynomialPredictor(time_series, num_predicted_periods)
actual_prediction = predictor.predict_counts()
np_test.assert_almost_equal(actual_prediction, expected_prediction, decimal=1)
| [
"matplotlib.use",
"numpy.testing.assert_almost_equal",
"scripts.algorithms.polynomial_predictor.PolynomialPredictor"
] | [((86, 109), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (100, 109), False, 'import matplotlib\n'), ((481, 536), 'scripts.algorithms.polynomial_predictor.PolynomialPredictor', 'PolynomialPredictor', (['time_series', 'num_predicted_periods'], {}), '(time_series, num_predicted_periods)\n', (500, 536), False, 'from scripts.algorithms.polynomial_predictor import PolynomialPredictor\n'), ((602, 680), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['actual_prediction', 'expected_prediction'], {'decimal': '(4)'}), '(actual_prediction, expected_prediction, decimal=4)\n', (629, 680), True, 'import numpy.testing as np_test\n'), ((904, 959), 'scripts.algorithms.polynomial_predictor.PolynomialPredictor', 'PolynomialPredictor', (['time_series', 'num_predicted_periods'], {}), '(time_series, num_predicted_periods)\n', (923, 959), False, 'from scripts.algorithms.polynomial_predictor import PolynomialPredictor\n'), ((1025, 1103), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['actual_prediction', 'expected_prediction'], {'decimal': '(0)'}), '(actual_prediction, expected_prediction, decimal=0)\n', (1052, 1103), True, 'import numpy.testing as np_test\n'), ((1401, 1456), 'scripts.algorithms.polynomial_predictor.PolynomialPredictor', 'PolynomialPredictor', (['time_series', 'num_predicted_periods'], {}), '(time_series, num_predicted_periods)\n', (1420, 1456), False, 'from scripts.algorithms.polynomial_predictor import PolynomialPredictor\n'), ((1522, 1600), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['actual_prediction', 'expected_prediction'], {'decimal': '(1)'}), '(actual_prediction, expected_prediction, decimal=1)\n', (1549, 1600), True, 'import numpy.testing as np_test\n')] |
"""Tests data processing functionality in src/aposteriori/create_frame_dataset.py"""
from pathlib import Path
import copy
import tempfile
from hypothesis import given, settings
from hypothesis.strategies import integers
import ampal
import ampal.geometry as g
import aposteriori.data_prep.create_frame_data_set as cfds
import h5py
import numpy as np
import numpy.testing as npt
import pytest
TEST_DATA_DIR = Path("tests/testing_files/pdb_files/")
@settings(deadline=1500)
@given(integers(min_value=0, max_value=214))
def test_create_residue_frame_cnocb_encoding(residue_number):
assembly = ampal.load_pdb(str(TEST_DATA_DIR / "3qy1.pdb"))
focus_residue = assembly[0][residue_number]
# Make sure that residue correctly aligns peptide plane to XY
cfds.align_to_residue_plane(focus_residue)
cfds.encode_cb_to_ampal_residue(focus_residue)
assert np.array_equal(
focus_residue["CA"].array, (0, 0, 0,)
), "The CA atom should lie on the origin."
assert np.isclose(focus_residue["N"].x, 0), "The nitrogen atom should lie on XY."
assert np.isclose(focus_residue["N"].z, 0), "The nitrogen atom should lie on XY."
assert np.isclose(focus_residue["C"].z, 0), "The carbon atom should lie on XY."
assert np.isclose(
focus_residue["CB"].x, -0.741287356,
), f"The Cb has not been encoded at position X = -0.741287356"
assert np.isclose(
focus_residue["CB"].y, -0.53937931,
), f"The Cb has not been encoded at position Y = -0.53937931"
assert np.isclose(
focus_residue["CB"].z, -1.224287356,
), f"The Cb has not been encoded at position Z = -1.224287356"
# Make sure that all relevant atoms are pulled into the frame
frame_edge_length = 12.0
voxels_per_side = 21
centre = voxels_per_side // 2
max_dist = np.sqrt(((frame_edge_length / 2) ** 2) * 3)
for atom in (
a
for a in assembly.get_atoms(ligands=False)
if cfds.within_frame(frame_edge_length, a)
):
assert g.distance(atom, (0, 0, 0)) <= max_dist, (
"All atoms filtered by `within_frame` should be within "
"`frame_edge_length/2` of the origin"
)
# Obtain atom encoder:
codec = cfds.Codec.CNOCB()
# Make sure that aligned residue sits on XY after it is discretized
single_res_assembly = ampal.Assembly(
molecules=ampal.Polypeptide(monomers=copy.deepcopy(focus_residue).backbone)
)
# Need to reassign the parent so that the residue is the only thing in the assembly
single_res_assembly[0].parent = single_res_assembly
single_res_assembly[0][0].parent = single_res_assembly[0]
array = cfds.create_residue_frame(
single_res_assembly[0][0], frame_edge_length, voxels_per_side, encode_cb=True, codec=codec)
np.testing.assert_array_equal(array[centre, centre, centre], [True, False, False, False], err_msg="The central atom should be CA.")
nonzero_indices = list(zip(*np.nonzero(array)))
assert (
len(nonzero_indices) == 5
), "There should be only 5 backbone atoms in this frame"
nonzero_on_xy_indices = list(zip(*np.nonzero(array[:, :, centre])))
assert (
3 <= len(nonzero_on_xy_indices) <= 4
), "N, CA and C should lie on the xy plane."
@settings(deadline=1500)
@given(integers(min_value=0, max_value=214))
def test_create_residue_frame_backbone_only(residue_number):
assembly = ampal.load_pdb(str(TEST_DATA_DIR / "3qy1.pdb"))
focus_residue = assembly[0][residue_number]
# Make sure that residue correctly aligns peptide plane to XY
cfds.align_to_residue_plane(focus_residue)
assert np.array_equal(
focus_residue["CA"].array, (0, 0, 0,)
), "The CA atom should lie on the origin."
assert np.isclose(focus_residue["N"].x, 0), "The nitrogen atom should lie on XY."
assert np.isclose(focus_residue["N"].z, 0), "The nitrogen atom should lie on XY."
assert np.isclose(focus_residue["C"].z, 0), "The carbon atom should lie on XY."
# Make sure that all relevant atoms are pulled into the frame
frame_edge_length = 12.0
voxels_per_side = 21
centre = voxels_per_side // 2
max_dist = np.sqrt(((frame_edge_length / 2) ** 2) * 3)
for atom in (
a
for a in assembly.get_atoms(ligands=False)
if cfds.within_frame(frame_edge_length, a)
):
assert g.distance(atom, (0, 0, 0)) <= max_dist, (
"All atoms filtered by `within_frame` should be within "
"`frame_edge_length/2` of the origin"
)
# Make sure that aligned residue sits on XY after it is discretized
single_res_assembly = ampal.Assembly(
molecules=ampal.Polypeptide(monomers=copy.deepcopy(focus_residue).backbone)
)
# Need to reassign the parent so that the residue is the only thing in the assembly
single_res_assembly[0].parent = single_res_assembly
single_res_assembly[0][0].parent = single_res_assembly[0]
# Obtain atom encoder:
codec = cfds.Codec.CNO()
array = cfds.create_residue_frame(
single_res_assembly[0][0], frame_edge_length, voxels_per_side,
encode_cb=False, codec=codec
)
np.testing.assert_array_equal(array[centre, centre, centre], [True, False, False], err_msg="The central atom should be CA.")
nonzero_indices = list(zip(*np.nonzero(array)))
assert (
len(nonzero_indices) == 4
), "There should be only 4 backbone atoms in this frame"
nonzero_on_xy_indices = list(zip(*np.nonzero(array[:, :, centre])))
assert (
3 <= len(nonzero_on_xy_indices) <= 4
), "N, CA and C should lie on the xy plane."
@given(integers(min_value=1))
def test_even_voxels_per_side(voxels_per_side):
frame_edge_length = 18.0
if voxels_per_side % 2:
voxels_per_side += 1
# Obtain atom encoder:
codec = cfds.Codec.CNO()
with pytest.raises(AssertionError, match=r".*must be odd*"):
output_file_path = cfds.make_frame_dataset(
structure_files=["eep"],
output_folder=".",
name="test_dataset",
frame_edge_length=frame_edge_length,
voxels_per_side=voxels_per_side,
require_confirmation=False,
encode_cb=True,
codec=codec
)
def test_make_frame_dataset():
"""Tests the creation of a frame data set."""
test_file = TEST_DATA_DIR / "1ubq.pdb"
frame_edge_length = 18.0
voxels_per_side = 31
ampal_1ubq = ampal.load_pdb(str(test_file))
for atom in ampal_1ubq.get_atoms():
if not cfds.default_atom_filter(atom):
del atom.parent.atoms[atom.res_label]
del atom
with tempfile.TemporaryDirectory() as tmpdir:
# Obtain atom encoder:
codec = cfds.Codec.CNO()
output_file_path = cfds.make_frame_dataset(
structure_files=[test_file],
output_folder=tmpdir,
name="test_dataset",
frame_edge_length=frame_edge_length,
voxels_per_side=voxels_per_side,
verbosity=1,
require_confirmation=False,
codec=codec,
)
with h5py.File(output_file_path, "r") as dataset:
for n in range(1, 77):
# check that the frame for all the data frames match between the input
# arrays and the ones that come out of the HDF5 data set
residue_number = str(n)
test_frame = cfds.create_residue_frame(
residue=ampal_1ubq["A"][residue_number],
frame_edge_length=frame_edge_length,
voxels_per_side=voxels_per_side,
encode_cb=False,
codec=codec,
)
hdf5_array = dataset["1ubq"]["A"][residue_number][()]
npt.assert_array_equal(
hdf5_array,
test_frame,
err_msg=(
"The frame in the HDF5 data set should be the same as the "
"input frame."
),
)
def test_convert_atom_to_gaussian_density():
# No modifiers:
opt_frame = cfds.convert_atom_to_gaussian_density((0,0,0), 0.6, optimized=True)
non_opt_frame = cfds.convert_atom_to_gaussian_density((0,0,0), 0.6, optimized=False)
np.testing.assert_array_almost_equal(opt_frame, non_opt_frame, decimal=2)
np.testing.assert_almost_equal(np.sum(non_opt_frame), np.sum(opt_frame))
# With modifiers:
opt_frame = cfds.convert_atom_to_gaussian_density((0.5, 0, 0), 0.6, optimized=True)
non_opt_frame = cfds.convert_atom_to_gaussian_density((0.5, 0, 0), 0.6, optimized=False)
np.testing.assert_array_almost_equal(opt_frame, non_opt_frame, decimal=2)
def test_make_frame_dataset_as_gaussian():
"""Tests the creation of a frame data set."""
test_file = TEST_DATA_DIR / "1ubq.pdb"
frame_edge_length = 18.0
voxels_per_side = 31
ampal_1ubq = ampal.load_pdb(str(test_file))
for atom in ampal_1ubq.get_atoms():
if not cfds.default_atom_filter(atom):
del atom.parent.atoms[atom.res_label]
del atom
with tempfile.TemporaryDirectory() as tmpdir:
# Obtain atom encoder:
codec = cfds.Codec.CNO()
output_file_path = cfds.make_frame_dataset(
structure_files=[test_file],
output_folder=tmpdir,
name="test_dataset",
frame_edge_length=frame_edge_length,
voxels_per_side=voxels_per_side,
verbosity=1,
require_confirmation=False,
codec=codec,
voxels_as_gaussian=True,
)
with h5py.File(output_file_path, "r") as dataset:
for n in range(1, 77):
# check that the frame for all the data frames match between the input
# arrays and the ones that come out of the HDF5 data set
residue_number = str(n)
test_frame = cfds.create_residue_frame(
residue=ampal_1ubq["A"][residue_number],
frame_edge_length=frame_edge_length,
voxels_per_side=voxels_per_side,
encode_cb=False,
codec=codec,
voxels_as_gaussian=True,
)
hdf5_array = dataset["1ubq"]["A"][residue_number][()]
npt.assert_array_equal(
hdf5_array,
test_frame,
err_msg=(
"The frame in the HDF5 data set should be the same as the "
"input frame."
),
)
@settings(deadline=700)
@given(integers(min_value=0, max_value=214))
def test_default_atom_filter(residue_number: int):
assembly = ampal.load_pdb(str(TEST_DATA_DIR / "3qy1.pdb"))
focus_residue = assembly[0][residue_number]
backbone_atoms = ("N", "CA", "C", "O")
for atom in focus_residue:
filtered_atom = True if atom.res_label in backbone_atoms else False
filtered_scenario = cfds.default_atom_filter(atom)
assert filtered_atom == filtered_scenario, f"Expected {atom.res_label} to return {filtered_atom} after filter"
@settings(deadline=700)
@given(integers(min_value=0, max_value=214))
def test_cb_atom_filter(residue_number: int):
assembly = ampal.load_pdb(str(TEST_DATA_DIR / "3qy1.pdb"))
focus_residue = assembly[0][residue_number]
backbone_atoms = ("N", "CA", "C", "O", "CB")
for atom in focus_residue:
filtered_atom = True if atom.res_label in backbone_atoms else False
filtered_scenario = cfds.keep_sidechain_cb_atom_filter(atom)
assert filtered_atom == filtered_scenario, f"Expected {atom.res_label} to return {filtered_atom} after filter"
def test_add_gaussian_at_position():
main_matrix = np.zeros((5, 5, 5, 5), dtype=np.float)
modifiers_triple = (0, 0, 0)
codec = cfds.Codec.CNOCBCA()
secondary_matrix, atom_idx = codec.encode_gaussian_atom(
"C", modifiers_triple
)
atom_coord = (1, 1, 1)
added_matrix = cfds.add_gaussian_at_position(main_matrix, secondary_matrix[:,:,:, atom_idx], atom_coord, atom_idx)
# Check general sum:
np.testing.assert_array_almost_equal(np.sum(added_matrix), 1.0, decimal=2)
# Check center:
assert (0 < added_matrix[1, 1, 1][0] < 1), f"The central atom should be 1 but got {main_matrix[1, 1, 1, 0]}."
# Check middle points (in each direction so 6 total points):
# +---+---+---+
# | _ | X | _ |
# | X | 0 | X |
# | _ | X | _ |
# +---+---+---+
# Where 0 is the central atom
np.testing.assert_array_almost_equal(added_matrix[1, 0, 1, 0], added_matrix[0, 1, 1, 0], decimal=2, err_msg=f"The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[1, 0, 1, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[1, 1, 0, 0], added_matrix[0, 1, 1, 0], decimal=2, err_msg=f"The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[1, 1, 0, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[1, 1, 2, 0], added_matrix[0, 1, 1, 0], decimal=2, err_msg=f"The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[1, 1, 2, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[1, 2, 1, 0], added_matrix[0, 1, 1, 0], decimal=2, err_msg=f"The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[1, 2, 1, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[2, 1, 1, 0], added_matrix[0, 1, 1, 0], decimal=2, err_msg=f"The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[2, 1, 1, 0]}.")
# Check inner corners (in each direction so 12 total points):
# +---+---+---+
# | X | _ | X |
# | _ | 0 | _ |
# | X | _ | X |
# +---+---+---+
np.testing.assert_array_almost_equal(added_matrix[0, 1, 0, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[0, 1, 0, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[0, 1, 2, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[0, 1, 2, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[0, 2, 1, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[0, 2, 1, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[1, 0, 0, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[1, 0, 0, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[1, 0, 2, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[1, 0, 2, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[1, 2, 0, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[1, 2, 0, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[1, 2, 2, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[1, 2, 2, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[2, 0, 1, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[2, 0, 1, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[2, 1, 0, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[2, 1, 0, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[2, 1, 2, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[2, 1, 2, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[2, 2, 1, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[2, 2, 1, 0]}.")
# Check outer corners(in each direction so 8 total points):
# +---+---+---+
# | X | _ | X |
# | _ | _ | _ |
# | X | _ | X |
# +---+---+---+
np.testing.assert_array_almost_equal(added_matrix[0, 2, 0, 0], added_matrix[0, 0, 2, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[0, 2, 0, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[0, 2, 2, 0], added_matrix[0, 0, 2, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[0, 2, 2, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[2, 0, 0, 0], added_matrix[0, 0, 2, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[2, 0, 0, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[2, 0, 2, 0], added_matrix[0, 0, 2, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[2, 0, 2, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[2, 2, 0, 0], added_matrix[0, 0, 2, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[2, 2, 0, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[2, 2, 2, 0], added_matrix[0, 0, 2, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[2, 2, 2, 0]}.")
# Add additional point and check whether the sum is 2:
atom_coord = (2, 2, 2)
added_matrix = cfds.add_gaussian_at_position(added_matrix, secondary_matrix[:,:,:, atom_idx], atom_coord, atom_idx)
np.testing.assert_array_almost_equal(np.sum(added_matrix), 2.0, decimal=2)
# Add point in top left corner and check whether the normalization still adds up to 1:
# +---+---+---+
# | _ | _ | _ |
# | _ | 0 | X |
# | _ | X | X |
# +---+---+---+
# We are keeping all the X and 0
atom_coord = (0, 0, 0)
added_matrix = cfds.add_gaussian_at_position(main_matrix, secondary_matrix[:,:,:, atom_idx], atom_coord, atom_idx)
np.testing.assert_array_almost_equal(np.sum(added_matrix), 3.0, decimal=2)
np.testing.assert_array_less(added_matrix[0, 0, 0][0], 1)
assert (0 < added_matrix[0, 0, 0][0] <= 1), f"The central atom value should be between 0 and 1 but was {added_matrix[0, 0, 0][0]}"
# Testing N, O, Ca, Cb atom channels. Adding atoms at (0, 0, 0) in different channels:
N_secondary_matrix, N_atom_idx = codec.encode_gaussian_atom(
"N", modifiers_triple
)
added_matrix = cfds.add_gaussian_at_position(main_matrix, N_secondary_matrix[:,:,:, N_atom_idx], atom_coord, N_atom_idx)
np.testing.assert_array_almost_equal(np.sum(added_matrix), 4.0, decimal=2)
np.testing.assert_array_less(added_matrix[0, 0, 0][N_atom_idx], 1)
assert (0 < added_matrix[0, 0, 0][N_atom_idx] <= 1), f"The central atom value should be between 0 and 1 but was {added_matrix[0, 0, 0][N_atom_idx]}"
O_secondary_matrix, O_atom_idx = codec.encode_gaussian_atom(
"O", modifiers_triple
)
added_matrix = cfds.add_gaussian_at_position(main_matrix, O_secondary_matrix[:,:,:, O_atom_idx], atom_coord, O_atom_idx)
np.testing.assert_array_almost_equal(np.sum(added_matrix), 5.0, decimal=2)
np.testing.assert_array_less(added_matrix[0, 0, 0][O_atom_idx], 1)
assert (0 < added_matrix[0, 0, 0][O_atom_idx] <= 1), f"The central atom value should be between 0 and 1 but was {added_matrix[0, 0, 0][O_atom_idx]}"
CA_secondary_matrix, CA_atom_idx = codec.encode_gaussian_atom(
"CA", modifiers_triple
)
added_matrix = cfds.add_gaussian_at_position(main_matrix, CA_secondary_matrix[:,:,:, CA_atom_idx], atom_coord, CA_atom_idx)
np.testing.assert_array_almost_equal(np.sum(added_matrix), 6.0, decimal=2)
np.testing.assert_array_less(added_matrix[0, 0, 0][CA_atom_idx], 1)
assert (0 < added_matrix[0, 0, 0][CA_atom_idx] <= 1), f"The central atom value should be between 0 and 1 but was {added_matrix[0, 0, 0][CA_atom_idx]}"
CB_secondary_matrix, CB_atom_idx = codec.encode_gaussian_atom(
"CB", modifiers_triple
)
added_matrix = cfds.add_gaussian_at_position(main_matrix, CB_secondary_matrix[:,:,:, CB_atom_idx], atom_coord, CB_atom_idx)
np.testing.assert_array_almost_equal(np.sum(added_matrix), 7.0, decimal=2)
np.testing.assert_array_less(added_matrix[0, 0, 0][CB_atom_idx], 1)
assert (0 < added_matrix[0, 0, 0][CB_atom_idx] <= 1), f"The central atom value should be between 0 and 1 but was {CB_atom_idx[0, 0, 0][CA_atom_idx]}"
def test_download_pdb_from_csv_file():
download_csv = Path("tests/testing_files/csv_pdb_list/pdb_to_test.csv")
test_file_paths = cfds.download_pdb_from_csv_file(
download_csv,
verbosity=1,
pdb_outpath=TEST_DATA_DIR,
workers=3,
voxelise_all_states=False,
)
assert (
TEST_DATA_DIR / "1qys.pdb1" in test_file_paths
), f"Expected to find {TEST_DATA_DIR / '1qys.pdb1'} as part of the generated paths."
assert (
TEST_DATA_DIR / "3qy1A.pdb1" in test_file_paths
), f"Expected to find {TEST_DATA_DIR / '3qy1A.pdb1'} as part of the generated paths."
assert (
TEST_DATA_DIR / "6ct4.pdb1" in test_file_paths
), f"Expected to find {TEST_DATA_DIR / '6ct4.pdb1'} as part of the generated paths."
assert (
TEST_DATA_DIR / "1qys.pdb1"
).exists(), f"Expected download of 1QYS to return PDB file"
assert (
TEST_DATA_DIR / "3qy1A.pdb1"
).exists(), f"Expected download of 3QYA to return PDB file"
assert (
TEST_DATA_DIR / "6ct4.pdb1"
).exists(), f"Expected download of 6CT4 to return PDB file"
# Delete files:
(TEST_DATA_DIR / "1qys.pdb1").unlink(), (TEST_DATA_DIR / "3qy1A.pdb1").unlink(), (
TEST_DATA_DIR / "6ct4.pdb1"
).unlink()
test_file_paths = cfds.download_pdb_from_csv_file(
download_csv,
verbosity=1,
pdb_outpath=TEST_DATA_DIR,
workers=3,
voxelise_all_states=True,
)
assert (
TEST_DATA_DIR / "1qys.pdb"
).exists(), f"Expected download of 1QYS to return PDB file"
assert (
TEST_DATA_DIR / "3qy1A.pdb"
).exists(), f"Expected download of 3QYA to return PDB file"
(TEST_DATA_DIR / "1qys.pdb").unlink(), (TEST_DATA_DIR / "3qy1A.pdb").unlink()
for i in range(0, 10):
pdb_code = f'6ct4_{i}.pdb'
new_paths = TEST_DATA_DIR / pdb_code
assert new_paths.exists(), f"Could not find path {new_paths} for {pdb_code}"
new_paths.unlink()
def test_filter_structures_by_blacklist():
blacklist_file = Path("tests/testing_files/filter/pdb_to_filter.csv")
structure_files = []
for pdb in ["1qys.pdb1", "3qy1A.pdb1", "6ct4.pdb1"]:
structure_files.append(Path(pdb))
filtered_structures = cfds.filter_structures_by_blacklist(
structure_files, blacklist_file
)
assert len(structure_files) == 3, f"Expected 3 structures to be in the list"
assert (
len(filtered_structures) == 2
), f"Expected 2 structures to be in the filtered list"
assert Path("1qys.pdb1") in filtered_structures, f"Expected 1qys to be in the list"
assert Path("6ct4.pdb1") in filtered_structures, f"Expected 6CT4 to be in the list"
assert (
Path("3qy1A.pdb1") not in filtered_structures
), f"Expected 3qy1A not to be in the list"
| [
"numpy.sqrt",
"aposteriori.data_prep.create_frame_data_set.Codec.CNOCBCA",
"aposteriori.data_prep.create_frame_data_set.encode_cb_to_ampal_residue",
"copy.deepcopy",
"numpy.testing.assert_array_less",
"numpy.testing.assert_array_almost_equal",
"pathlib.Path",
"hypothesis.settings",
"ampal.geometry.d... | [((410, 448), 'pathlib.Path', 'Path', (['"""tests/testing_files/pdb_files/"""'], {}), "('tests/testing_files/pdb_files/')\n", (414, 448), False, 'from pathlib import Path\n'), ((452, 475), 'hypothesis.settings', 'settings', ([], {'deadline': '(1500)'}), '(deadline=1500)\n', (460, 475), False, 'from hypothesis import given, settings\n'), ((3260, 3283), 'hypothesis.settings', 'settings', ([], {'deadline': '(1500)'}), '(deadline=1500)\n', (3268, 3283), False, 'from hypothesis import given, settings\n'), ((10687, 10709), 'hypothesis.settings', 'settings', ([], {'deadline': '(700)'}), '(deadline=700)\n', (10695, 10709), False, 'from hypothesis import given, settings\n'), ((11249, 11271), 'hypothesis.settings', 'settings', ([], {'deadline': '(700)'}), '(deadline=700)\n', (11257, 11271), False, 'from hypothesis import given, settings\n'), ((765, 807), 'aposteriori.data_prep.create_frame_data_set.align_to_residue_plane', 'cfds.align_to_residue_plane', (['focus_residue'], {}), '(focus_residue)\n', (792, 807), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((812, 858), 'aposteriori.data_prep.create_frame_data_set.encode_cb_to_ampal_residue', 'cfds.encode_cb_to_ampal_residue', (['focus_residue'], {}), '(focus_residue)\n', (843, 858), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((870, 922), 'numpy.array_equal', 'np.array_equal', (["focus_residue['CA'].array", '(0, 0, 0)'], {}), "(focus_residue['CA'].array, (0, 0, 0))\n", (884, 922), True, 'import numpy as np\n'), ((990, 1025), 'numpy.isclose', 'np.isclose', (["focus_residue['N'].x", '(0)'], {}), "(focus_residue['N'].x, 0)\n", (1000, 1025), True, 'import numpy as np\n'), ((1076, 1111), 'numpy.isclose', 'np.isclose', (["focus_residue['N'].z", '(0)'], {}), "(focus_residue['N'].z, 0)\n", (1086, 1111), True, 'import numpy as np\n'), ((1162, 1197), 'numpy.isclose', 'np.isclose', (["focus_residue['C'].z", '(0)'], {}), "(focus_residue['C'].z, 0)\n", (1172, 1197), True, 'import numpy as np\n'), ((1246, 1293), 'numpy.isclose', 'np.isclose', (["focus_residue['CB'].x", '(-0.741287356)'], {}), "(focus_residue['CB'].x, -0.741287356)\n", (1256, 1293), True, 'import numpy as np\n'), ((1381, 1427), 'numpy.isclose', 'np.isclose', (["focus_residue['CB'].y", '(-0.53937931)'], {}), "(focus_residue['CB'].y, -0.53937931)\n", (1391, 1427), True, 'import numpy as np\n'), ((1514, 1561), 'numpy.isclose', 'np.isclose', (["focus_residue['CB'].z", '(-1.224287356)'], {}), "(focus_residue['CB'].z, -1.224287356)\n", (1524, 1561), True, 'import numpy as np\n'), ((1807, 1848), 'numpy.sqrt', 'np.sqrt', (['((frame_edge_length / 2) ** 2 * 3)'], {}), '((frame_edge_length / 2) ** 2 * 3)\n', (1814, 1848), True, 'import numpy as np\n'), ((2214, 2232), 'aposteriori.data_prep.create_frame_data_set.Codec.CNOCB', 'cfds.Codec.CNOCB', ([], {}), '()\n', (2230, 2232), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((2655, 2776), 'aposteriori.data_prep.create_frame_data_set.create_residue_frame', 'cfds.create_residue_frame', (['single_res_assembly[0][0]', 'frame_edge_length', 'voxels_per_side'], {'encode_cb': '(True)', 'codec': 'codec'}), '(single_res_assembly[0][0], frame_edge_length,\n voxels_per_side, encode_cb=True, codec=codec)\n', (2680, 2776), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((2786, 2922), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['array[centre, centre, centre]', '[True, False, False, False]'], {'err_msg': '"""The central atom should be CA."""'}), "(array[centre, centre, centre], [True, False, \n False, False], err_msg='The central atom should be CA.')\n", (2815, 2922), True, 'import numpy as np\n'), ((483, 519), 'hypothesis.strategies.integers', 'integers', ([], {'min_value': '(0)', 'max_value': '(214)'}), '(min_value=0, max_value=214)\n', (491, 519), False, 'from hypothesis.strategies import integers\n'), ((3572, 3614), 'aposteriori.data_prep.create_frame_data_set.align_to_residue_plane', 'cfds.align_to_residue_plane', (['focus_residue'], {}), '(focus_residue)\n', (3599, 3614), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((3626, 3678), 'numpy.array_equal', 'np.array_equal', (["focus_residue['CA'].array", '(0, 0, 0)'], {}), "(focus_residue['CA'].array, (0, 0, 0))\n", (3640, 3678), True, 'import numpy as np\n'), ((3746, 3781), 'numpy.isclose', 'np.isclose', (["focus_residue['N'].x", '(0)'], {}), "(focus_residue['N'].x, 0)\n", (3756, 3781), True, 'import numpy as np\n'), ((3832, 3867), 'numpy.isclose', 'np.isclose', (["focus_residue['N'].z", '(0)'], {}), "(focus_residue['N'].z, 0)\n", (3842, 3867), True, 'import numpy as np\n'), ((3918, 3953), 'numpy.isclose', 'np.isclose', (["focus_residue['C'].z", '(0)'], {}), "(focus_residue['C'].z, 0)\n", (3928, 3953), True, 'import numpy as np\n'), ((4160, 4201), 'numpy.sqrt', 'np.sqrt', (['((frame_edge_length / 2) ** 2 * 3)'], {}), '((frame_edge_length / 2) ** 2 * 3)\n', (4167, 4201), True, 'import numpy as np\n'), ((4978, 4994), 'aposteriori.data_prep.create_frame_data_set.Codec.CNO', 'cfds.Codec.CNO', ([], {}), '()\n', (4992, 4994), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((5007, 5129), 'aposteriori.data_prep.create_frame_data_set.create_residue_frame', 'cfds.create_residue_frame', (['single_res_assembly[0][0]', 'frame_edge_length', 'voxels_per_side'], {'encode_cb': '(False)', 'codec': 'codec'}), '(single_res_assembly[0][0], frame_edge_length,\n voxels_per_side, encode_cb=False, codec=codec)\n', (5032, 5129), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((5152, 5281), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['array[centre, centre, centre]', '[True, False, False]'], {'err_msg': '"""The central atom should be CA."""'}), "(array[centre, centre, centre], [True, False, \n False], err_msg='The central atom should be CA.')\n", (5181, 5281), True, 'import numpy as np\n'), ((3291, 3327), 'hypothesis.strategies.integers', 'integers', ([], {'min_value': '(0)', 'max_value': '(214)'}), '(min_value=0, max_value=214)\n', (3299, 3327), False, 'from hypothesis.strategies import integers\n'), ((5821, 5837), 'aposteriori.data_prep.create_frame_data_set.Codec.CNO', 'cfds.Codec.CNO', ([], {}), '()\n', (5835, 5837), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((5625, 5646), 'hypothesis.strategies.integers', 'integers', ([], {'min_value': '(1)'}), '(min_value=1)\n', (5633, 5646), False, 'from hypothesis.strategies import integers\n'), ((8166, 8235), 'aposteriori.data_prep.create_frame_data_set.convert_atom_to_gaussian_density', 'cfds.convert_atom_to_gaussian_density', (['(0, 0, 0)', '(0.6)'], {'optimized': '(True)'}), '((0, 0, 0), 0.6, optimized=True)\n', (8203, 8235), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((8254, 8324), 'aposteriori.data_prep.create_frame_data_set.convert_atom_to_gaussian_density', 'cfds.convert_atom_to_gaussian_density', (['(0, 0, 0)', '(0.6)'], {'optimized': '(False)'}), '((0, 0, 0), 0.6, optimized=False)\n', (8291, 8324), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((8327, 8400), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['opt_frame', 'non_opt_frame'], {'decimal': '(2)'}), '(opt_frame, non_opt_frame, decimal=2)\n', (8363, 8400), True, 'import numpy as np\n'), ((8516, 8587), 'aposteriori.data_prep.create_frame_data_set.convert_atom_to_gaussian_density', 'cfds.convert_atom_to_gaussian_density', (['(0.5, 0, 0)', '(0.6)'], {'optimized': '(True)'}), '((0.5, 0, 0), 0.6, optimized=True)\n', (8553, 8587), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((8608, 8680), 'aposteriori.data_prep.create_frame_data_set.convert_atom_to_gaussian_density', 'cfds.convert_atom_to_gaussian_density', (['(0.5, 0, 0)', '(0.6)'], {'optimized': '(False)'}), '((0.5, 0, 0), 0.6, optimized=False)\n', (8645, 8680), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((8685, 8758), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['opt_frame', 'non_opt_frame'], {'decimal': '(2)'}), '(opt_frame, non_opt_frame, decimal=2)\n', (8721, 8758), True, 'import numpy as np\n'), ((10717, 10753), 'hypothesis.strategies.integers', 'integers', ([], {'min_value': '(0)', 'max_value': '(214)'}), '(min_value=0, max_value=214)\n', (10725, 10753), False, 'from hypothesis.strategies import integers\n'), ((11279, 11315), 'hypothesis.strategies.integers', 'integers', ([], {'min_value': '(0)', 'max_value': '(214)'}), '(min_value=0, max_value=214)\n', (11287, 11315), False, 'from hypothesis.strategies import integers\n'), ((11876, 11914), 'numpy.zeros', 'np.zeros', (['(5, 5, 5, 5)'], {'dtype': 'np.float'}), '((5, 5, 5, 5), dtype=np.float)\n', (11884, 11914), True, 'import numpy as np\n'), ((11960, 11980), 'aposteriori.data_prep.create_frame_data_set.Codec.CNOCBCA', 'cfds.Codec.CNOCBCA', ([], {}), '()\n', (11978, 11980), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((12126, 12231), 'aposteriori.data_prep.create_frame_data_set.add_gaussian_at_position', 'cfds.add_gaussian_at_position', (['main_matrix', 'secondary_matrix[:, :, :, atom_idx]', 'atom_coord', 'atom_idx'], {}), '(main_matrix, secondary_matrix[:, :, :,\n atom_idx], atom_coord, atom_idx)\n', (12155, 12231), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((12667, 12874), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[1, 0, 1, 0]', 'added_matrix[0, 1, 1, 0]'], {'decimal': '(2)', 'err_msg': 'f"""The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[1, 0, 1, 0]}."""'}), "(added_matrix[1, 0, 1, 0], added_matrix\n [0, 1, 1, 0], decimal=2, err_msg=\n f'The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[1, 0, 1, 0]}.'\n )\n", (12703, 12874), True, 'import numpy as np\n'), ((12864, 13071), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[1, 1, 0, 0]', 'added_matrix[0, 1, 1, 0]'], {'decimal': '(2)', 'err_msg': 'f"""The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[1, 1, 0, 0]}."""'}), "(added_matrix[1, 1, 0, 0], added_matrix\n [0, 1, 1, 0], decimal=2, err_msg=\n f'The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[1, 1, 0, 0]}.'\n )\n", (12900, 13071), True, 'import numpy as np\n'), ((13061, 13268), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[1, 1, 2, 0]', 'added_matrix[0, 1, 1, 0]'], {'decimal': '(2)', 'err_msg': 'f"""The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[1, 1, 2, 0]}."""'}), "(added_matrix[1, 1, 2, 0], added_matrix\n [0, 1, 1, 0], decimal=2, err_msg=\n f'The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[1, 1, 2, 0]}.'\n )\n", (13097, 13268), True, 'import numpy as np\n'), ((13258, 13465), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[1, 2, 1, 0]', 'added_matrix[0, 1, 1, 0]'], {'decimal': '(2)', 'err_msg': 'f"""The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[1, 2, 1, 0]}."""'}), "(added_matrix[1, 2, 1, 0], added_matrix\n [0, 1, 1, 0], decimal=2, err_msg=\n f'The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[1, 2, 1, 0]}.'\n )\n", (13294, 13465), True, 'import numpy as np\n'), ((13455, 13662), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[2, 1, 1, 0]', 'added_matrix[0, 1, 1, 0]'], {'decimal': '(2)', 'err_msg': 'f"""The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[2, 1, 1, 0]}."""'}), "(added_matrix[2, 1, 1, 0], added_matrix\n [0, 1, 1, 0], decimal=2, err_msg=\n f'The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[2, 1, 1, 0]}.'\n )\n", (13491, 13662), True, 'import numpy as np\n'), ((13818, 14026), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[0, 1, 0, 0]', 'added_matrix[0, 0, 1, 0]'], {'decimal': '(4)', 'err_msg': 'f"""The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[0, 1, 0, 0]}."""'}), "(added_matrix[0, 1, 0, 0], added_matrix\n [0, 0, 1, 0], decimal=4, err_msg=\n f'The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[0, 1, 0, 0]}.'\n )\n", (13854, 14026), True, 'import numpy as np\n'), ((14016, 14224), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[0, 1, 2, 0]', 'added_matrix[0, 0, 1, 0]'], {'decimal': '(4)', 'err_msg': 'f"""The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[0, 1, 2, 0]}."""'}), "(added_matrix[0, 1, 2, 0], added_matrix\n [0, 0, 1, 0], decimal=4, err_msg=\n f'The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[0, 1, 2, 0]}.'\n )\n", (14052, 14224), True, 'import numpy as np\n'), ((14214, 14422), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[0, 2, 1, 0]', 'added_matrix[0, 0, 1, 0]'], {'decimal': '(4)', 'err_msg': 'f"""The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[0, 2, 1, 0]}."""'}), "(added_matrix[0, 2, 1, 0], added_matrix\n [0, 0, 1, 0], decimal=4, err_msg=\n f'The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[0, 2, 1, 0]}.'\n )\n", (14250, 14422), True, 'import numpy as np\n'), ((14412, 14620), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[1, 0, 0, 0]', 'added_matrix[0, 0, 1, 0]'], {'decimal': '(4)', 'err_msg': 'f"""The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[1, 0, 0, 0]}."""'}), "(added_matrix[1, 0, 0, 0], added_matrix\n [0, 0, 1, 0], decimal=4, err_msg=\n f'The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[1, 0, 0, 0]}.'\n )\n", (14448, 14620), True, 'import numpy as np\n'), ((14610, 14818), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[1, 0, 2, 0]', 'added_matrix[0, 0, 1, 0]'], {'decimal': '(4)', 'err_msg': 'f"""The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[1, 0, 2, 0]}."""'}), "(added_matrix[1, 0, 2, 0], added_matrix\n [0, 0, 1, 0], decimal=4, err_msg=\n f'The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[1, 0, 2, 0]}.'\n )\n", (14646, 14818), True, 'import numpy as np\n'), ((14808, 15016), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[1, 2, 0, 0]', 'added_matrix[0, 0, 1, 0]'], {'decimal': '(4)', 'err_msg': 'f"""The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[1, 2, 0, 0]}."""'}), "(added_matrix[1, 2, 0, 0], added_matrix\n [0, 0, 1, 0], decimal=4, err_msg=\n f'The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[1, 2, 0, 0]}.'\n )\n", (14844, 15016), True, 'import numpy as np\n'), ((15006, 15214), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[1, 2, 2, 0]', 'added_matrix[0, 0, 1, 0]'], {'decimal': '(4)', 'err_msg': 'f"""The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[1, 2, 2, 0]}."""'}), "(added_matrix[1, 2, 2, 0], added_matrix\n [0, 0, 1, 0], decimal=4, err_msg=\n f'The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[1, 2, 2, 0]}.'\n )\n", (15042, 15214), True, 'import numpy as np\n'), ((15204, 15412), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[2, 0, 1, 0]', 'added_matrix[0, 0, 1, 0]'], {'decimal': '(4)', 'err_msg': 'f"""The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[2, 0, 1, 0]}."""'}), "(added_matrix[2, 0, 1, 0], added_matrix\n [0, 0, 1, 0], decimal=4, err_msg=\n f'The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[2, 0, 1, 0]}.'\n )\n", (15240, 15412), True, 'import numpy as np\n'), ((15402, 15610), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[2, 1, 0, 0]', 'added_matrix[0, 0, 1, 0]'], {'decimal': '(4)', 'err_msg': 'f"""The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[2, 1, 0, 0]}."""'}), "(added_matrix[2, 1, 0, 0], added_matrix\n [0, 0, 1, 0], decimal=4, err_msg=\n f'The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[2, 1, 0, 0]}.'\n )\n", (15438, 15610), True, 'import numpy as np\n'), ((15600, 15808), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[2, 1, 2, 0]', 'added_matrix[0, 0, 1, 0]'], {'decimal': '(4)', 'err_msg': 'f"""The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[2, 1, 2, 0]}."""'}), "(added_matrix[2, 1, 2, 0], added_matrix\n [0, 0, 1, 0], decimal=4, err_msg=\n f'The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[2, 1, 2, 0]}.'\n )\n", (15636, 15808), True, 'import numpy as np\n'), ((15798, 16006), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[2, 2, 1, 0]', 'added_matrix[0, 0, 1, 0]'], {'decimal': '(4)', 'err_msg': 'f"""The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[2, 2, 1, 0]}."""'}), "(added_matrix[2, 2, 1, 0], added_matrix\n [0, 0, 1, 0], decimal=4, err_msg=\n f'The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[2, 2, 1, 0]}.'\n )\n", (15834, 16006), True, 'import numpy as np\n'), ((16160, 16368), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[0, 2, 0, 0]', 'added_matrix[0, 0, 2, 0]'], {'decimal': '(4)', 'err_msg': 'f"""The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[0, 2, 0, 0]}."""'}), "(added_matrix[0, 2, 0, 0], added_matrix\n [0, 0, 2, 0], decimal=4, err_msg=\n f'The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[0, 2, 0, 0]}.'\n )\n", (16196, 16368), True, 'import numpy as np\n'), ((16358, 16566), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[0, 2, 2, 0]', 'added_matrix[0, 0, 2, 0]'], {'decimal': '(4)', 'err_msg': 'f"""The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[0, 2, 2, 0]}."""'}), "(added_matrix[0, 2, 2, 0], added_matrix\n [0, 0, 2, 0], decimal=4, err_msg=\n f'The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[0, 2, 2, 0]}.'\n )\n", (16394, 16566), True, 'import numpy as np\n'), ((16556, 16764), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[2, 0, 0, 0]', 'added_matrix[0, 0, 2, 0]'], {'decimal': '(4)', 'err_msg': 'f"""The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[2, 0, 0, 0]}."""'}), "(added_matrix[2, 0, 0, 0], added_matrix\n [0, 0, 2, 0], decimal=4, err_msg=\n f'The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[2, 0, 0, 0]}.'\n )\n", (16592, 16764), True, 'import numpy as np\n'), ((16754, 16962), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[2, 0, 2, 0]', 'added_matrix[0, 0, 2, 0]'], {'decimal': '(4)', 'err_msg': 'f"""The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[2, 0, 2, 0]}."""'}), "(added_matrix[2, 0, 2, 0], added_matrix\n [0, 0, 2, 0], decimal=4, err_msg=\n f'The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[2, 0, 2, 0]}.'\n )\n", (16790, 16962), True, 'import numpy as np\n'), ((16952, 17160), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[2, 2, 0, 0]', 'added_matrix[0, 0, 2, 0]'], {'decimal': '(4)', 'err_msg': 'f"""The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[2, 2, 0, 0]}."""'}), "(added_matrix[2, 2, 0, 0], added_matrix\n [0, 0, 2, 0], decimal=4, err_msg=\n f'The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[2, 2, 0, 0]}.'\n )\n", (16988, 17160), True, 'import numpy as np\n'), ((17150, 17358), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['added_matrix[2, 2, 2, 0]', 'added_matrix[0, 0, 2, 0]'], {'decimal': '(4)', 'err_msg': 'f"""The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[2, 2, 2, 0]}."""'}), "(added_matrix[2, 2, 2, 0], added_matrix\n [0, 0, 2, 0], decimal=4, err_msg=\n f'The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[2, 2, 2, 0]}.'\n )\n", (17186, 17358), True, 'import numpy as np\n'), ((17449, 17555), 'aposteriori.data_prep.create_frame_data_set.add_gaussian_at_position', 'cfds.add_gaussian_at_position', (['added_matrix', 'secondary_matrix[:, :, :, atom_idx]', 'atom_coord', 'atom_idx'], {}), '(added_matrix, secondary_matrix[:, :, :,\n atom_idx], atom_coord, atom_idx)\n', (17478, 17555), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((17903, 18008), 'aposteriori.data_prep.create_frame_data_set.add_gaussian_at_position', 'cfds.add_gaussian_at_position', (['main_matrix', 'secondary_matrix[:, :, :, atom_idx]', 'atom_coord', 'atom_idx'], {}), '(main_matrix, secondary_matrix[:, :, :,\n atom_idx], atom_coord, atom_idx)\n', (17932, 18008), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((18086, 18143), 'numpy.testing.assert_array_less', 'np.testing.assert_array_less', (['added_matrix[0, 0, 0][0]', '(1)'], {}), '(added_matrix[0, 0, 0][0], 1)\n', (18114, 18143), True, 'import numpy as np\n'), ((18490, 18601), 'aposteriori.data_prep.create_frame_data_set.add_gaussian_at_position', 'cfds.add_gaussian_at_position', (['main_matrix', 'N_secondary_matrix[:, :, :, N_atom_idx]', 'atom_coord', 'N_atom_idx'], {}), '(main_matrix, N_secondary_matrix[:, :, :,\n N_atom_idx], atom_coord, N_atom_idx)\n', (18519, 18601), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((18679, 18745), 'numpy.testing.assert_array_less', 'np.testing.assert_array_less', (['added_matrix[0, 0, 0][N_atom_idx]', '(1)'], {}), '(added_matrix[0, 0, 0][N_atom_idx], 1)\n', (18707, 18745), True, 'import numpy as np\n'), ((19019, 19130), 'aposteriori.data_prep.create_frame_data_set.add_gaussian_at_position', 'cfds.add_gaussian_at_position', (['main_matrix', 'O_secondary_matrix[:, :, :, O_atom_idx]', 'atom_coord', 'O_atom_idx'], {}), '(main_matrix, O_secondary_matrix[:, :, :,\n O_atom_idx], atom_coord, O_atom_idx)\n', (19048, 19130), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((19208, 19274), 'numpy.testing.assert_array_less', 'np.testing.assert_array_less', (['added_matrix[0, 0, 0][O_atom_idx]', '(1)'], {}), '(added_matrix[0, 0, 0][O_atom_idx], 1)\n', (19236, 19274), True, 'import numpy as np\n'), ((19551, 19665), 'aposteriori.data_prep.create_frame_data_set.add_gaussian_at_position', 'cfds.add_gaussian_at_position', (['main_matrix', 'CA_secondary_matrix[:, :, :, CA_atom_idx]', 'atom_coord', 'CA_atom_idx'], {}), '(main_matrix, CA_secondary_matrix[:, :, :,\n CA_atom_idx], atom_coord, CA_atom_idx)\n', (19580, 19665), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((19743, 19810), 'numpy.testing.assert_array_less', 'np.testing.assert_array_less', (['added_matrix[0, 0, 0][CA_atom_idx]', '(1)'], {}), '(added_matrix[0, 0, 0][CA_atom_idx], 1)\n', (19771, 19810), True, 'import numpy as np\n'), ((20089, 20203), 'aposteriori.data_prep.create_frame_data_set.add_gaussian_at_position', 'cfds.add_gaussian_at_position', (['main_matrix', 'CB_secondary_matrix[:, :, :, CB_atom_idx]', 'atom_coord', 'CB_atom_idx'], {}), '(main_matrix, CB_secondary_matrix[:, :, :,\n CB_atom_idx], atom_coord, CB_atom_idx)\n', (20118, 20203), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((20281, 20348), 'numpy.testing.assert_array_less', 'np.testing.assert_array_less', (['added_matrix[0, 0, 0][CB_atom_idx]', '(1)'], {}), '(added_matrix[0, 0, 0][CB_atom_idx], 1)\n', (20309, 20348), True, 'import numpy as np\n'), ((20563, 20619), 'pathlib.Path', 'Path', (['"""tests/testing_files/csv_pdb_list/pdb_to_test.csv"""'], {}), "('tests/testing_files/csv_pdb_list/pdb_to_test.csv')\n", (20567, 20619), False, 'from pathlib import Path\n'), ((20642, 20770), 'aposteriori.data_prep.create_frame_data_set.download_pdb_from_csv_file', 'cfds.download_pdb_from_csv_file', (['download_csv'], {'verbosity': '(1)', 'pdb_outpath': 'TEST_DATA_DIR', 'workers': '(3)', 'voxelise_all_states': '(False)'}), '(download_csv, verbosity=1, pdb_outpath=\n TEST_DATA_DIR, workers=3, voxelise_all_states=False)\n', (20673, 20770), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((21806, 21933), 'aposteriori.data_prep.create_frame_data_set.download_pdb_from_csv_file', 'cfds.download_pdb_from_csv_file', (['download_csv'], {'verbosity': '(1)', 'pdb_outpath': 'TEST_DATA_DIR', 'workers': '(3)', 'voxelise_all_states': '(True)'}), '(download_csv, verbosity=1, pdb_outpath=\n TEST_DATA_DIR, workers=3, voxelise_all_states=True)\n', (21837, 21933), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((22576, 22628), 'pathlib.Path', 'Path', (['"""tests/testing_files/filter/pdb_to_filter.csv"""'], {}), "('tests/testing_files/filter/pdb_to_filter.csv')\n", (22580, 22628), False, 'from pathlib import Path\n'), ((22779, 22847), 'aposteriori.data_prep.create_frame_data_set.filter_structures_by_blacklist', 'cfds.filter_structures_by_blacklist', (['structure_files', 'blacklist_file'], {}), '(structure_files, blacklist_file)\n', (22814, 22847), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((5847, 5900), 'pytest.raises', 'pytest.raises', (['AssertionError'], {'match': '""".*must be odd*"""'}), "(AssertionError, match='.*must be odd*')\n", (5860, 5900), False, 'import pytest\n'), ((5930, 6155), 'aposteriori.data_prep.create_frame_data_set.make_frame_dataset', 'cfds.make_frame_dataset', ([], {'structure_files': "['eep']", 'output_folder': '"""."""', 'name': '"""test_dataset"""', 'frame_edge_length': 'frame_edge_length', 'voxels_per_side': 'voxels_per_side', 'require_confirmation': '(False)', 'encode_cb': '(True)', 'codec': 'codec'}), "(structure_files=['eep'], output_folder='.', name=\n 'test_dataset', frame_edge_length=frame_edge_length, voxels_per_side=\n voxels_per_side, require_confirmation=False, encode_cb=True, codec=codec)\n", (5953, 6155), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((6648, 6677), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (6675, 6677), False, 'import tempfile\n'), ((6736, 6752), 'aposteriori.data_prep.create_frame_data_set.Codec.CNO', 'cfds.Codec.CNO', ([], {}), '()\n', (6750, 6752), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((6780, 7012), 'aposteriori.data_prep.create_frame_data_set.make_frame_dataset', 'cfds.make_frame_dataset', ([], {'structure_files': '[test_file]', 'output_folder': 'tmpdir', 'name': '"""test_dataset"""', 'frame_edge_length': 'frame_edge_length', 'voxels_per_side': 'voxels_per_side', 'verbosity': '(1)', 'require_confirmation': '(False)', 'codec': 'codec'}), "(structure_files=[test_file], output_folder=tmpdir,\n name='test_dataset', frame_edge_length=frame_edge_length,\n voxels_per_side=voxels_per_side, verbosity=1, require_confirmation=\n False, codec=codec)\n", (6803, 7012), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((8436, 8457), 'numpy.sum', 'np.sum', (['non_opt_frame'], {}), '(non_opt_frame)\n', (8442, 8457), True, 'import numpy as np\n'), ((8459, 8476), 'numpy.sum', 'np.sum', (['opt_frame'], {}), '(opt_frame)\n', (8465, 8476), True, 'import numpy as np\n'), ((9167, 9196), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (9194, 9196), False, 'import tempfile\n'), ((9255, 9271), 'aposteriori.data_prep.create_frame_data_set.Codec.CNO', 'cfds.Codec.CNO', ([], {}), '()\n', (9269, 9271), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((9299, 9556), 'aposteriori.data_prep.create_frame_data_set.make_frame_dataset', 'cfds.make_frame_dataset', ([], {'structure_files': '[test_file]', 'output_folder': 'tmpdir', 'name': '"""test_dataset"""', 'frame_edge_length': 'frame_edge_length', 'voxels_per_side': 'voxels_per_side', 'verbosity': '(1)', 'require_confirmation': '(False)', 'codec': 'codec', 'voxels_as_gaussian': '(True)'}), "(structure_files=[test_file], output_folder=tmpdir,\n name='test_dataset', frame_edge_length=frame_edge_length,\n voxels_per_side=voxels_per_side, verbosity=1, require_confirmation=\n False, codec=codec, voxels_as_gaussian=True)\n", (9322, 9556), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((11096, 11126), 'aposteriori.data_prep.create_frame_data_set.default_atom_filter', 'cfds.default_atom_filter', (['atom'], {}), '(atom)\n', (11120, 11126), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((11659, 11699), 'aposteriori.data_prep.create_frame_data_set.keep_sidechain_cb_atom_filter', 'cfds.keep_sidechain_cb_atom_filter', (['atom'], {}), '(atom)\n', (11693, 11699), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((12292, 12312), 'numpy.sum', 'np.sum', (['added_matrix'], {}), '(added_matrix)\n', (12298, 12312), True, 'import numpy as np\n'), ((17591, 17611), 'numpy.sum', 'np.sum', (['added_matrix'], {}), '(added_matrix)\n', (17597, 17611), True, 'import numpy as np\n'), ((18044, 18064), 'numpy.sum', 'np.sum', (['added_matrix'], {}), '(added_matrix)\n', (18050, 18064), True, 'import numpy as np\n'), ((18637, 18657), 'numpy.sum', 'np.sum', (['added_matrix'], {}), '(added_matrix)\n', (18643, 18657), True, 'import numpy as np\n'), ((19166, 19186), 'numpy.sum', 'np.sum', (['added_matrix'], {}), '(added_matrix)\n', (19172, 19186), True, 'import numpy as np\n'), ((19701, 19721), 'numpy.sum', 'np.sum', (['added_matrix'], {}), '(added_matrix)\n', (19707, 19721), True, 'import numpy as np\n'), ((20239, 20259), 'numpy.sum', 'np.sum', (['added_matrix'], {}), '(added_matrix)\n', (20245, 20259), True, 'import numpy as np\n'), ((23064, 23081), 'pathlib.Path', 'Path', (['"""1qys.pdb1"""'], {}), "('1qys.pdb1')\n", (23068, 23081), False, 'from pathlib import Path\n'), ((23152, 23169), 'pathlib.Path', 'Path', (['"""6ct4.pdb1"""'], {}), "('6ct4.pdb1')\n", (23156, 23169), False, 'from pathlib import Path\n'), ((23250, 23268), 'pathlib.Path', 'Path', (['"""3qy1A.pdb1"""'], {}), "('3qy1A.pdb1')\n", (23254, 23268), False, 'from pathlib import Path\n'), ((1941, 1980), 'aposteriori.data_prep.create_frame_data_set.within_frame', 'cfds.within_frame', (['frame_edge_length', 'a'], {}), '(frame_edge_length, a)\n', (1958, 1980), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((2003, 2030), 'ampal.geometry.distance', 'g.distance', (['atom', '(0, 0, 0)'], {}), '(atom, (0, 0, 0))\n', (2013, 2030), True, 'import ampal.geometry as g\n'), ((4294, 4333), 'aposteriori.data_prep.create_frame_data_set.within_frame', 'cfds.within_frame', (['frame_edge_length', 'a'], {}), '(frame_edge_length, a)\n', (4311, 4333), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((4356, 4383), 'ampal.geometry.distance', 'g.distance', (['atom', '(0, 0, 0)'], {}), '(atom, (0, 0, 0))\n', (4366, 4383), True, 'import ampal.geometry as g\n'), ((6536, 6566), 'aposteriori.data_prep.create_frame_data_set.default_atom_filter', 'cfds.default_atom_filter', (['atom'], {}), '(atom)\n', (6560, 6566), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((7120, 7152), 'h5py.File', 'h5py.File', (['output_file_path', '"""r"""'], {}), "(output_file_path, 'r')\n", (7129, 7152), False, 'import h5py\n'), ((9055, 9085), 'aposteriori.data_prep.create_frame_data_set.default_atom_filter', 'cfds.default_atom_filter', (['atom'], {}), '(atom)\n', (9079, 9085), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((9676, 9708), 'h5py.File', 'h5py.File', (['output_file_path', '"""r"""'], {}), "(output_file_path, 'r')\n", (9685, 9708), False, 'import h5py\n'), ((22742, 22751), 'pathlib.Path', 'Path', (['pdb'], {}), '(pdb)\n', (22746, 22751), False, 'from pathlib import Path\n'), ((2950, 2967), 'numpy.nonzero', 'np.nonzero', (['array'], {}), '(array)\n', (2960, 2967), True, 'import numpy as np\n'), ((3116, 3147), 'numpy.nonzero', 'np.nonzero', (['array[:, :, centre]'], {}), '(array[:, :, centre])\n', (3126, 3147), True, 'import numpy as np\n'), ((5309, 5326), 'numpy.nonzero', 'np.nonzero', (['array'], {}), '(array)\n', (5319, 5326), True, 'import numpy as np\n'), ((5475, 5506), 'numpy.nonzero', 'np.nonzero', (['array[:, :, centre]'], {}), '(array[:, :, centre])\n', (5485, 5506), True, 'import numpy as np\n'), ((7429, 7603), 'aposteriori.data_prep.create_frame_data_set.create_residue_frame', 'cfds.create_residue_frame', ([], {'residue': "ampal_1ubq['A'][residue_number]", 'frame_edge_length': 'frame_edge_length', 'voxels_per_side': 'voxels_per_side', 'encode_cb': '(False)', 'codec': 'codec'}), "(residue=ampal_1ubq['A'][residue_number],\n frame_edge_length=frame_edge_length, voxels_per_side=voxels_per_side,\n encode_cb=False, codec=codec)\n", (7454, 7603), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((7801, 7933), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['hdf5_array', 'test_frame'], {'err_msg': '"""The frame in the HDF5 data set should be the same as the input frame."""'}), "(hdf5_array, test_frame, err_msg=\n 'The frame in the HDF5 data set should be the same as the input frame.')\n", (7823, 7933), True, 'import numpy.testing as npt\n'), ((9985, 10184), 'aposteriori.data_prep.create_frame_data_set.create_residue_frame', 'cfds.create_residue_frame', ([], {'residue': "ampal_1ubq['A'][residue_number]", 'frame_edge_length': 'frame_edge_length', 'voxels_per_side': 'voxels_per_side', 'encode_cb': '(False)', 'codec': 'codec', 'voxels_as_gaussian': '(True)'}), "(residue=ampal_1ubq['A'][residue_number],\n frame_edge_length=frame_edge_length, voxels_per_side=voxels_per_side,\n encode_cb=False, codec=codec, voxels_as_gaussian=True)\n", (10010, 10184), True, 'import aposteriori.data_prep.create_frame_data_set as cfds\n'), ((10402, 10534), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['hdf5_array', 'test_frame'], {'err_msg': '"""The frame in the HDF5 data set should be the same as the input frame."""'}), "(hdf5_array, test_frame, err_msg=\n 'The frame in the HDF5 data set should be the same as the input frame.')\n", (10424, 10534), True, 'import numpy.testing as npt\n'), ((2392, 2420), 'copy.deepcopy', 'copy.deepcopy', (['focus_residue'], {}), '(focus_residue)\n', (2405, 2420), False, 'import copy\n'), ((4688, 4716), 'copy.deepcopy', 'copy.deepcopy', (['focus_residue'], {}), '(focus_residue)\n', (4701, 4716), False, 'import copy\n')] |
import time
import numpy
def getNotes():
return {
"id1": {
"noteId": "id1",
"userId": "user1",
"content": str(numpy.array([1,2,3,4])),
"createdAt": int(time.time()),
},
"id2": {
"noteId": "id2",
"userId": "user2",
"content": str(numpy.array([5,6,7,8])),
"createdAt": int(time.time()-1000),
},
}
| [
"numpy.array",
"time.time"
] | [((157, 182), 'numpy.array', 'numpy.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (168, 182), False, 'import numpy\n'), ((209, 220), 'time.time', 'time.time', ([], {}), '()\n', (218, 220), False, 'import time\n'), ((336, 361), 'numpy.array', 'numpy.array', (['[5, 6, 7, 8]'], {}), '([5, 6, 7, 8])\n', (347, 361), False, 'import numpy\n'), ((388, 399), 'time.time', 'time.time', ([], {}), '()\n', (397, 399), False, 'import time\n')] |
import os
import shutil
import numpy as np
import pandas as pd
import scipy.integrate, scipy.stats, scipy.optimize, scipy.signal
from scipy.stats import mannwhitneyu
import statsmodels.formula.api as smf
import pystan
def clean_folder(folder):
"""Create a new folder, or if the folder already exists,
delete all containing files
Args:
folder (string): Path to folder
"""
if os.path.isdir(folder):
shutil.rmtree(folder)
try:
os.makedirs(folder)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_data_for_stan(y):
"""Convenience function for
collecting data for STAN estimation
Args:
y (np vector): Data series for Bayesian filtering
Returns:
dict: Data for Stan estimation
"""
assert y.ndim == 1, \
"y must be a vector"
assert len(y) > 0, \
"y must have positive length"
assert isinstance(y, np.ndarray), \
"y must be a numpy array"
N_obs = len(pd.Series(y).dropna())
N_mis = np.sum(np.isnan(y))
ii_obs = list(range(1, N_obs + N_mis + 1))
ii_mis = []
if N_mis > 0:
for ii in np.argwhere(np.isnan(y)):
ii_mis.append(ii[0] + 1)
ii_obs.remove(ii[0] + 1)
return {'N_obs': N_obs,
'N_mis': N_mis,
'ii_obs': ii_obs,
'ii_mis': ii_mis,
'y_obs': pd.Series(y).dropna()}
def estimate_R(y, gamma, stm_missing, stm_no_missing, num_iter, num_chains, num_warmup, rng, sig_levels, full_output = False):
"""Estimate R using Bayesian Kalman
smoothing
Args:
y (np array): Data series for the growth rate of infected individuals
gamma (double): Inverse of average infectiousness duration
stm_missing (pickle): Stan model (for case with missing data)
stm_no_missing (pickle): Stan model (for case without missing data)
num_iter (int): Number of iterations
num_chains (int): Number of MCMC chains
num_warmup (int): Number of warmup periods
rng (obj): Numpy random state
sig_levels (list): List of significance levels for credible bounds
full_output (bool, optional): If True, return full output from Stan
Returns:
TYPE: Description
"""
assert y.ndim == 1, \
"y must be a vector"
assert len(y) > 0, \
"y must have positive length"
assert isinstance(y, np.ndarray), \
"y must be a numpy array"
assert isinstance(num_chains, int) and isinstance(num_iter, int) and isinstance(num_warmup, int), \
"num_chains, num_iter, and num_warmup must be integers"
assert num_chains > 0 and num_iter > 0 and num_warmup > 0, \
"num_chains, num_iter, and num_warmup must be positive"
assert len(sig_levels) >= 1 and all(isinstance(x, int) for x in sig_levels), \
"sig_levels must be a list with only integers"
# Get data in Stan format
s_data = get_data_for_stan(y)
# Estimate model
if np.sum(np.isnan(y)) > 0:
fit = stm_missing.sampling(data = s_data,
iter = num_iter,
chains = num_chains,
warmup = num_warmup,
verbose = False,
seed = rng)
else:
fit = stm_no_missing.sampling(data = s_data,
iter = num_iter,
chains = num_chains,
warmup = num_warmup,
verbose = False,
seed = rng)
fit_res = fit.extract(permuted = True)
# Collect results
res = {}
res['R'] = 1 + 1 / gamma * fit_res['mu'].mean(axis = 0)
for aa in sig_levels:
ub = 1 + 1 / gamma * np.percentile(fit_res['mu'], axis = 0, q = 100 - aa / 2.0)
lb = np.maximum(1 + 1 / gamma * np.percentile(fit_res['mu'], axis = 0, q = aa / 2.0), 0.0)
res['ub_{}'.format(100 - aa)] = ub
res['lb_{}'.format(100 - aa)] = lb
res['signal_to_noise'] = fit_res['signal_to_noise'].mean()
res['var_irregular'] = (1 / fit_res['precision_irregular']).mean()
# Extract convergence statistics
fit_summary = fit.summary()
df_conv_stats = pd.DataFrame(fit_summary['summary'])
df_conv_stats.columns = fit_summary['summary_colnames']
df_conv_stats['var_name'] = fit_summary['summary_rownames']
mask = df_conv_stats['var_name'].apply(lambda x: 'mu' in x)
df_conv_stats = df_conv_stats.loc[mask, ]
res['n_eff_pct'] = df_conv_stats['n_eff'].min() / float(num_chains * (num_iter - num_warmup))
res['Rhat_diff'] = (df_conv_stats['Rhat'] - 1).abs().max()
# If requested, extract full Stan fit
if full_output:
res['stan_fit'] = fit
return res
def mean_se(x, robust = True):
"""Aggregation function for
pandas to calculate standard errors
for the mean
Args:
x (series): pandas Series
robust (bool, optional): if True, calculate
heteroskedasticity-robust standard errors
Returns:
float: standard error
"""
x = pd.DataFrame(x)
x.columns = ['x']
if robust:
mod = smf.ols('x ~ 1', data = x).fit(cov_type = 'HC2')
else:
mod = smf.ols('x ~ 1', data = x).fit()
return mod.bse['Intercept']
def simulate_AR1(rho, sigma, T, shocks = None):
"""Simulate a time series for
an AR(1) process with
x_{t + 1} = rho x_t + eps_{t+1}
where
eps_{t + 1} ~ N(0, sigma ^ 2).
Initial condition is
x_0 ~ N(0, sigma ^ 2 / (1 - rho ^ 2))
Persistence parameter must lie in (-1, 1)
for an AR(1) to be simulated.
Args:
rho (float): AR(1) persistence parameter
sigma (float): Standard deviation of shocks
T (int): Length of simulated time series
shocks (array, optional): If provided,
use the time series in shocks for the disturbances (eps)
Returns:
dict: Dictionary, contains:
shocks (float): Simulated shocks (eps)
x (float): Simulated time series
"""
assert rho > - 1 and rho < 1, \
'Persistence parameter should be in (-1, 1).'
if shocks is None:
shocks = np.random.randn(1, T).flatten() * sigma
shocks[0] = np.random.randn(1, 1) * sigma / np.sqrt(1 - rho ** 2)
return {'shocks': shocks,
'x': scipy.signal.lfilter([1] ,[1, -rho], shocks)} | [
"pandas.Series",
"numpy.sqrt",
"os.makedirs",
"os.path.isdir",
"numpy.isnan",
"statsmodels.formula.api.ols",
"shutil.rmtree",
"pandas.DataFrame",
"numpy.percentile",
"numpy.random.randn"
] | [((410, 431), 'os.path.isdir', 'os.path.isdir', (['folder'], {}), '(folder)\n', (423, 431), False, 'import os\n'), ((4128, 4164), 'pandas.DataFrame', 'pd.DataFrame', (["fit_summary['summary']"], {}), "(fit_summary['summary'])\n", (4140, 4164), True, 'import pandas as pd\n'), ((5015, 5030), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {}), '(x)\n', (5027, 5030), True, 'import pandas as pd\n'), ((441, 462), 'shutil.rmtree', 'shutil.rmtree', (['folder'], {}), '(folder)\n', (454, 462), False, 'import shutil\n'), ((480, 499), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (491, 499), False, 'import os\n'), ((1060, 1071), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (1068, 1071), True, 'import numpy as np\n'), ((1184, 1195), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (1192, 1195), True, 'import numpy as np\n'), ((3039, 3050), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (3047, 3050), True, 'import numpy as np\n'), ((6248, 6269), 'numpy.sqrt', 'np.sqrt', (['(1 - rho ** 2)'], {}), '(1 - rho ** 2)\n', (6255, 6269), True, 'import numpy as np\n'), ((1017, 1029), 'pandas.Series', 'pd.Series', (['y'], {}), '(y)\n', (1026, 1029), True, 'import pandas as pd\n'), ((1409, 1421), 'pandas.Series', 'pd.Series', (['y'], {}), '(y)\n', (1418, 1421), True, 'import pandas as pd\n'), ((3660, 3714), 'numpy.percentile', 'np.percentile', (["fit_res['mu']"], {'axis': '(0)', 'q': '(100 - aa / 2.0)'}), "(fit_res['mu'], axis=0, q=100 - aa / 2.0)\n", (3673, 3714), True, 'import numpy as np\n'), ((5082, 5106), 'statsmodels.formula.api.ols', 'smf.ols', (['"""x ~ 1"""'], {'data': 'x'}), "('x ~ 1', data=x)\n", (5089, 5106), True, 'import statsmodels.formula.api as smf\n'), ((5155, 5179), 'statsmodels.formula.api.ols', 'smf.ols', (['"""x ~ 1"""'], {'data': 'x'}), "('x ~ 1', data=x)\n", (5162, 5179), True, 'import statsmodels.formula.api as smf\n'), ((6216, 6237), 'numpy.random.randn', 'np.random.randn', (['(1)', '(1)'], {}), '(1, 1)\n', (6231, 6237), True, 'import numpy as np\n'), ((3759, 3807), 'numpy.percentile', 'np.percentile', (["fit_res['mu']"], {'axis': '(0)', 'q': '(aa / 2.0)'}), "(fit_res['mu'], axis=0, q=aa / 2.0)\n", (3772, 3807), True, 'import numpy as np\n'), ((6156, 6177), 'numpy.random.randn', 'np.random.randn', (['(1)', 'T'], {}), '(1, T)\n', (6171, 6177), True, 'import numpy as np\n')] |
import os.path
import numpy as np
cancer_type_pairs = [
["lung squamous cell carcinoma", "head & neck squamous cell carcinoma"],
["bladder urothelial carcinoma", "cervical & endocervical cancer"],
["colon adenocarcinoma", "rectum adenocarcinoma"],
["stomach adenocarcinoma", "esophageal carcinoma"],
["kidney clear cell carcinoma", "kidney papillary cell carcinoma"],
["glioblastoma multiforme", "sarcoma"],
["adrenocortical cancer", "uveal melanoma"],
["testicular germ cell tumor", "uterine carcinosarcoma"],
["lung adenocarcinoma", "pancreatic adenocarcinoma"],
["ovarian serous cystadenocarcinoma", "uterine corpus endometrioid carcinoma"],
["brain lower grade glioma", "pheochromocytoma & paraganglioma"],
["skin cutaneous melanoma", "mesothelioma"],
["liver hepatocellular carcinoma", "kidney chromophobe"],
["breast invasive carcinoma", "prostate adenocarcinoma"],
["acute myeloid leukemia", "diffuse large B-cell lymphoma"],
["thyroid carcinoma", "cholangiocarcinoma"],
]
priv_pairs = [
cancer_type_pairs[0],
cancer_type_pairs[1],
cancer_type_pairs[2],
cancer_type_pairs[3],
cancer_type_pairs[4],
cancer_type_pairs[5],
cancer_type_pairs[9],
cancer_type_pairs[13],
]
algs = [
('rand_proj',{}),
('PCA',{}),
('VAE',{}),
('VAE_hyper',{}),
]
test_id = ""
def np_loadtxt_or(filename, fallback):
if os.path.isfile(filename) and os.path.getsize(filename) > 0:
return np.loadtxt(filename)
else:
print(" Warning: File not found or empty: %s" % (filename))
return fallback
for pv, priv in enumerate(priv_pairs):
print("priv = %s" % priv)
data_name = (('-'.join(['priv',] + priv)).replace(' ', '_').replace('&', '_'))
for a, (repr_alg, style_args) in enumerate(algs):
test_name = "%s%s-%s" % (test_id, data_name, repr_alg)
params_filename = "param_opt/opt_params-%s.npy" % (test_name)
results_filename = "param_opt/opt_results-%s.npy" % (test_name)
if (os.path.isfile(params_filename) and os.path.getsize(params_filename) > 0 and
os.path.isfile(results_filename) and os.path.getsize(results_filename) > 0):
params = np.load(params_filename)
results = np.load(results_filename)
i = np.argmax(results)
print("%s: %d tested, best: %s -> %s" %
(repr_alg, len(results), params[i], results[i,0]))
#print(np.hstack((params, results)))
else:
print("%s: Error: param and/or result files not found" % (repr_alg))
print()
| [
"numpy.loadtxt",
"numpy.load",
"numpy.argmax"
] | [((1435, 1455), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {}), '(filename)\n', (1445, 1455), True, 'import numpy as np\n'), ((2130, 2154), 'numpy.load', 'np.load', (['params_filename'], {}), '(params_filename)\n', (2137, 2154), True, 'import numpy as np\n'), ((2171, 2196), 'numpy.load', 'np.load', (['results_filename'], {}), '(results_filename)\n', (2178, 2196), True, 'import numpy as np\n'), ((2207, 2225), 'numpy.argmax', 'np.argmax', (['results'], {}), '(results)\n', (2216, 2225), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import torch
import pytrol.util.argsparser as parser
from pytrol.control.agent.HPAgent import HPAgent
from pytrol.control.agent.MAPTrainerModelAgent import MAPTrainerModelAgent
from pytrol.model.knowledge.EnvironmentKnowledge import EnvironmentKnowledge
from pytrol.util.net.Connection import Connection
# Heuristic Pathfinder ReLU Predictor
class HPREstimator(HPAgent, MAPTrainerModelAgent):
def __init__(self,
id_: int,
original_id: str,
env_knl: EnvironmentKnowledge,
connection: Connection,
agts_addrs: list,
datasrc: str = None,
variant: str = '',
gpu: bool = False,
depth: float = 3.0,
model_type: str = "MLP",
model_variant: str = "ReLU",
interaction: bool = True):
r"""
Args:
id_ (int):
original_id (str):
env_knl (EnvironmentKnowledge):
connection (Connection):
agts_addrs (list):
datasrc (str):
variant (str):
gpu (bool):
depth (float):
model_type (str): The type of the model used to make predictions
model_variant (str): The variant of the model used to make
predictions
interaction (bool):
"""
HPAgent.__init__(self, id_=id_, original_id=original_id,
env_knl=env_knl,
connection=connection, agts_addrs=agts_addrs,
variant=variant, depth=depth, interaction=interaction)
if datasrc is None:
args = parser.parse_args()
datasrc = args.datasrc
MAPTrainerModelAgent.__init__(self, id_=id_, original_id=original_id,
env_knl=env_knl, connection=connection,
agts_addrs=agts_addrs, variant=variant,
depth=depth, gpu=gpu,
model_type=model_type,
model_variant=model_variant,
interaction=interaction, datasrc=datasrc)
def run_model(self, input_) -> torch.Tensor:
r"""
Args:
input_:
"""
input_ = self.prepare_input(input_)
output = self.model(input_)
self.model_estm_idls = output
return self.model_estm_idls
def estimate_idls(self) -> np.ndarray:
r"""Predictor function: returns the model's estimation of idlenesses"""
estimated_idls = self.run_model(
self.env_knl.idls).detach().cpu().numpy()
# TODO: changing the model, meanwhile any negative idleness is
# frozen (set to 0)
# Positive estimated idlenesses
positive_estm_idls = np.maximum(estimated_idls,
np.zeros(
np.array(estimated_idls).shape)
)
# For each node the best idleness between the estimated,
# the individual and the previous estimated incremented of 1 is
# selected
best_iidl_estm = \
np.minimum(np.minimum(positive_estm_idls,
self.env_knl.shared_idls),
np.array(self.prev_estimated_idls, dtype=np.int16) + 1)
return best_iidl_estm
| [
"numpy.minimum",
"pytrol.control.agent.HPAgent.HPAgent.__init__",
"numpy.array",
"pytrol.control.agent.MAPTrainerModelAgent.MAPTrainerModelAgent.__init__",
"pytrol.util.argsparser.parse_args"
] | [((1456, 1639), 'pytrol.control.agent.HPAgent.HPAgent.__init__', 'HPAgent.__init__', (['self'], {'id_': 'id_', 'original_id': 'original_id', 'env_knl': 'env_knl', 'connection': 'connection', 'agts_addrs': 'agts_addrs', 'variant': 'variant', 'depth': 'depth', 'interaction': 'interaction'}), '(self, id_=id_, original_id=original_id, env_knl=env_knl,\n connection=connection, agts_addrs=agts_addrs, variant=variant, depth=\n depth, interaction=interaction)\n', (1472, 1639), False, 'from pytrol.control.agent.HPAgent import HPAgent\n'), ((1818, 2097), 'pytrol.control.agent.MAPTrainerModelAgent.MAPTrainerModelAgent.__init__', 'MAPTrainerModelAgent.__init__', (['self'], {'id_': 'id_', 'original_id': 'original_id', 'env_knl': 'env_knl', 'connection': 'connection', 'agts_addrs': 'agts_addrs', 'variant': 'variant', 'depth': 'depth', 'gpu': 'gpu', 'model_type': 'model_type', 'model_variant': 'model_variant', 'interaction': 'interaction', 'datasrc': 'datasrc'}), '(self, id_=id_, original_id=original_id,\n env_knl=env_knl, connection=connection, agts_addrs=agts_addrs, variant=\n variant, depth=depth, gpu=gpu, model_type=model_type, model_variant=\n model_variant, interaction=interaction, datasrc=datasrc)\n', (1847, 2097), False, 'from pytrol.control.agent.MAPTrainerModelAgent import MAPTrainerModelAgent\n'), ((1754, 1773), 'pytrol.util.argsparser.parse_args', 'parser.parse_args', ([], {}), '()\n', (1771, 1773), True, 'import pytrol.util.argsparser as parser\n'), ((3371, 3427), 'numpy.minimum', 'np.minimum', (['positive_estm_idls', 'self.env_knl.shared_idls'], {}), '(positive_estm_idls, self.env_knl.shared_idls)\n', (3381, 3427), True, 'import numpy as np\n'), ((3486, 3536), 'numpy.array', 'np.array', (['self.prev_estimated_idls'], {'dtype': 'np.int16'}), '(self.prev_estimated_idls, dtype=np.int16)\n', (3494, 3536), True, 'import numpy as np\n'), ((3090, 3114), 'numpy.array', 'np.array', (['estimated_idls'], {}), '(estimated_idls)\n', (3098, 3114), True, 'import numpy as np\n')] |
import numpy as np
from optimization.basic_neuralnet_lib import neuralnet
from optimization.basic_neuralnet_lib import tensors
from optimization.basic_neuralnet_lib import loss
from typing import Iterator, NamedTuple
DEFAULT_BATCH_SIZE = 32
def train(
network: neuralnet.NeuralNet,
inputs: tensors.Tensor,
targets: tensors.Tensor,
num_epochs: int,
loss: loss.Loss = loss.TotalSquaredError(),
optimizer: neuralnet.Optimizer = neuralnet.StochasticGradientDescent(),
):
iterator = BatchIterator(batch_size=DEFAULT_BATCH_SIZE)
for epoch in range(num_epochs):
epoch_loss = 0.0
for batch in iterator(inputs, targets):
predicted = network.forward(batch.inputs)
epoch_loss += loss.loss(predicted, batch.targets)
gradient = loss.gradient(predicted, batch.targets)
network.backward(gradient)
optimizer.step(network)
if epoch % 10 == 0:
print(f"epoch: {epoch}, epoch_loss: {epoch_loss}")
class Batch(NamedTuple):
inputs: tensors.Tensor
targets: tensors.Tensor
class BatchIterator:
def __init__(self, batch_size: int, shuffle: bool = True):
self.batch_size = batch_size
self.shuffle = shuffle
def __call__(self, inputs: tensors.Tensor, targets: tensors.Tensor) -> Iterator[Batch]:
# TODO(Jonathon): Add full shuffling, not just shuffling around batches of `batch_size`
batch_starts = np.arange(0, len(inputs), self.batch_size)
if self.shuffle:
np.random.shuffle(batch_starts) # Eg. 0, 20, 40, 60 ... -> 60, 20, 0, 40, ...
for start in batch_starts:
end = start + self.batch_size
batch_inputs = inputs[start:end]
batch_targets = targets[start:end]
yield Batch(
inputs=batch_inputs,
targets=batch_targets
)
| [
"optimization.basic_neuralnet_lib.loss.loss",
"optimization.basic_neuralnet_lib.neuralnet.StochasticGradientDescent",
"optimization.basic_neuralnet_lib.loss.gradient",
"optimization.basic_neuralnet_lib.loss.TotalSquaredError",
"numpy.random.shuffle"
] | [((391, 415), 'optimization.basic_neuralnet_lib.loss.TotalSquaredError', 'loss.TotalSquaredError', ([], {}), '()\n', (413, 415), False, 'from optimization.basic_neuralnet_lib import loss\n'), ((454, 491), 'optimization.basic_neuralnet_lib.neuralnet.StochasticGradientDescent', 'neuralnet.StochasticGradientDescent', ([], {}), '()\n', (489, 491), False, 'from optimization.basic_neuralnet_lib import neuralnet\n'), ((745, 780), 'optimization.basic_neuralnet_lib.loss.loss', 'loss.loss', (['predicted', 'batch.targets'], {}), '(predicted, batch.targets)\n', (754, 780), False, 'from optimization.basic_neuralnet_lib import loss\n'), ((804, 843), 'optimization.basic_neuralnet_lib.loss.gradient', 'loss.gradient', (['predicted', 'batch.targets'], {}), '(predicted, batch.targets)\n', (817, 843), False, 'from optimization.basic_neuralnet_lib import loss\n'), ((1538, 1569), 'numpy.random.shuffle', 'np.random.shuffle', (['batch_starts'], {}), '(batch_starts)\n', (1555, 1569), True, 'import numpy as np\n')] |
#import cv2
import pickle
import numpy as np
import PIL
from PIL import Image
import os.path
import sys
# import cv2
def get_train_data(chunk, img_row, img_col):
# print(" \n get train data - running")
X_train = []
Y_train = []
with open("/home/amit/Desktop/vignesh/allmerge2.pickle",'rb') as f1:
spatial_train_data=pickle.load(f1)
try:
for imgname in chunk:
filename = "/home/amit/Desktop/vignesh/allmerge/"+str(imgname)+'.jpg'
if os.path.exists(filename) == True:
# print(filename)
# img = cv2.imread(filename)
# img = np.rollaxis(cv2.resize(img,(img_row,img_col)).astype(np.float32),2)
a = Image.open(filename)
# print("image opened")
a = a.resize((img_row,img_col), PIL.Image.ANTIALIAS)
# print("resized")
img = np.asarray(a)
# print("converted")
img = np.rollaxis(img.astype(np.float32),2)
# print("rolled")
X_train.append(img)
# print("image appended")
# print("X_train shape is {0}".format(len(X_train)))
Y_train.append(spatial_train_data[imgname])
# print("Y_train shape is {0}".format(len(Y_train)))
X_train = np.asarray(X_train)
Y_train = np.asarray(Y_train)
# print(Y_train.shape)
# print(" \n get train data - finished")
return X_train,Y_train
except:
X_train=None
Y_train=None
print(" \n get train data exception- finished")
return X_train,Y_train
def get_train_data_11_04(chunk, img_row, img_col):
print(" \n get train data - running")
X_train = []
Y_train = []
with open("/home/amit/Desktop/vignesh/11_04_backsub.pickle",'rb') as f1:
spatial_train_data=pickle.load(f1)
try:
for imgname in chunk:
filename = "/home/amit/Desktop/vignesh/11_04_merge/"+str(imgname)+'.jpg'
if os.path.exists(filename) == True:
print(filename)
# img = cv2.imread(filename)
# img = np.rollaxis(cv2.resize(img,(img_row,img_col)).astype(np.float32),2)
a = Image.open(filename)
print("image opened")
a = a.resize((img_row,img_col), PIL.Image.ANTIALIAS)
print("resized")
img = np.asarray(a)
print("converted")
img = np.rollaxis(img.astype(np.float32),2)
print("rolled")
X_train.append(img)
print("image appended")
# print("X_train shape is {0}".format(len(X_train)))
Y_train.append(spatial_train_data[imgname])
# print("Y_train shape is {0}".format(len(Y_train)))
X_train = np.asarray(X_train)
Y_train = np.asarray(Y_train)
print(Y_train.shape)
print(" \n get train data - finished")
return X_train,Y_train
except:
X_train=None
Y_train=None
print(" \n get train data exception- finished")
return X_train,Y_train
def get_train_data_11_03(chunk, img_row, img_col):
print(" \n get train data - running")
X_train = []
Y_train = []
with open("/home/amit/Desktop/vignesh/11_03_backsub.pickle",'rb') as f1:
spatial_train_data=pickle.load(f1)
try:
for imgname in chunk:
filename = "/home/amit/Desktop/vignesh/11_03_merge/"+str(imgname)+'.jpg'
if os.path.exists(filename) == True:
print(filename)
# img = cv2.imread(filename)
# img = np.rollaxis(cv2.resize(img,(img_row,img_col)).astype(np.float32),2)
a = Image.open(filename)
print("image opened")
a = a.resize((img_row,img_col), PIL.Image.ANTIALIAS)
print("resized")
img = np.asarray(a)
print("converted")
img = np.rollaxis(img.astype(np.float32),2)
print("rolled")
X_train.append(img)
print("image appended")
# print("X_train shape is {0}".format(len(X_train)))
Y_train.append(spatial_train_data[imgname])
# print("Y_train shape is {0}".format(len(Y_train)))
X_train = np.asarray(X_train)
Y_train = np.asarray(Y_train)
print(Y_train.shape)
print(" \n get train data - finished")
return X_train,Y_train
except:
X_train=None
Y_train=None
print(" \n get train data exception- finished")
return X_train,Y_train
def get_train_data_08_02(chunk, img_row, img_col):
print(" \n get train data - running")
X_train = []
Y_train = []
with open("/home/amit/Desktop/vignesh/08_02_backsub.pickle",'rb') as f1:
spatial_train_data=pickle.load(f1)
try:
for imgname in chunk:
filename = "/home/amit/Desktop/vignesh/08_02_merge/"+str(imgname)+'.jpg'
if os.path.exists(filename) == True:
print(filename)
# img = cv2.imread(filename)
# img = np.rollaxis(cv2.resize(img,(img_row,img_col)).astype(np.float32),2)
a = Image.open(filename)
print("image opened")
a = a.resize((img_row,img_col), PIL.Image.ANTIALIAS)
print("resized")
img = np.asarray(a)
print("converted")
img = np.rollaxis(img.astype(np.float32),2)
print("rolled")
X_train.append(img)
print("image appended")
# print("X_train shape is {0}".format(len(X_train)))
Y_train.append(spatial_train_data[imgname])
# print("Y_train shape is {0}".format(len(Y_train)))
X_train = np.asarray(X_train)
Y_train = np.asarray(Y_train)
print(Y_train.shape)
print(" \n get train data - finished")
return X_train,Y_train
except:
X_train=None
Y_train=None
print(" \n get train data exception- finished")
return X_train,Y_train
def get_test_data(chunk, img_row, img_col):
# print(" \n get test data - running")
# print(len(chunk))
X_test = []
Y_test = []
with open("/home/amit/Desktop/vignesh/allmerge2.pickle",'rb') as f1:
spatial_test_data=pickle.load(f1)
try:
for imgname in chunk:
filename = "/home/amit/Desktop/vignesh/allmerge/"+imgname+'.jpg'
if os.path.exists(filename) == True:
# print(filename)
# img = cv2.imread(filename)
# img = np.rollaxis(cv2.resize(img,(img_row,img_col)).astype(np.float32),2)
a = Image.open(filename)
a = a.resize((img_row,img_col), PIL.Image.ANTIALIAS)
img = np.asarray(a)
img = np.rollaxis(img.astype(np.float32),2)
X_test.append(img)
Y_test.append(spatial_test_data[imgname])
X_test = np.asarray(X_test)
Y_test = np.asarray(Y_test)
# print(" \n get test data - finished")
return X_test,Y_test
except:
X_test=None
Y_test=None
print(" \n get test data exception - finished")
return X_test,Y_test
if __name__ == '__main__':
gc.collect()
| [
"numpy.asarray",
"PIL.Image.open",
"pickle.load"
] | [((342, 357), 'pickle.load', 'pickle.load', (['f1'], {}), '(f1)\n', (353, 357), False, 'import pickle\n'), ((1352, 1371), 'numpy.asarray', 'np.asarray', (['X_train'], {}), '(X_train)\n', (1362, 1371), True, 'import numpy as np\n'), ((1390, 1409), 'numpy.asarray', 'np.asarray', (['Y_train'], {}), '(Y_train)\n', (1400, 1409), True, 'import numpy as np\n'), ((1894, 1909), 'pickle.load', 'pickle.load', (['f1'], {}), '(f1)\n', (1905, 1909), False, 'import pickle\n'), ((2895, 2914), 'numpy.asarray', 'np.asarray', (['X_train'], {}), '(X_train)\n', (2905, 2914), True, 'import numpy as np\n'), ((2933, 2952), 'numpy.asarray', 'np.asarray', (['Y_train'], {}), '(Y_train)\n', (2943, 2952), True, 'import numpy as np\n'), ((3435, 3450), 'pickle.load', 'pickle.load', (['f1'], {}), '(f1)\n', (3446, 3450), False, 'import pickle\n'), ((4436, 4455), 'numpy.asarray', 'np.asarray', (['X_train'], {}), '(X_train)\n', (4446, 4455), True, 'import numpy as np\n'), ((4474, 4493), 'numpy.asarray', 'np.asarray', (['Y_train'], {}), '(Y_train)\n', (4484, 4493), True, 'import numpy as np\n'), ((4974, 4989), 'pickle.load', 'pickle.load', (['f1'], {}), '(f1)\n', (4985, 4989), False, 'import pickle\n'), ((5975, 5994), 'numpy.asarray', 'np.asarray', (['X_train'], {}), '(X_train)\n', (5985, 5994), True, 'import numpy as np\n'), ((6013, 6032), 'numpy.asarray', 'np.asarray', (['Y_train'], {}), '(Y_train)\n', (6023, 6032), True, 'import numpy as np\n'), ((6524, 6539), 'pickle.load', 'pickle.load', (['f1'], {}), '(f1)\n', (6535, 6539), False, 'import pickle\n'), ((7221, 7239), 'numpy.asarray', 'np.asarray', (['X_test'], {}), '(X_test)\n', (7231, 7239), True, 'import numpy as np\n'), ((7257, 7275), 'numpy.asarray', 'np.asarray', (['Y_test'], {}), '(Y_test)\n', (7267, 7275), True, 'import numpy as np\n'), ((740, 760), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (750, 760), False, 'from PIL import Image\n'), ((919, 932), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (929, 932), True, 'import numpy as np\n'), ((2293, 2313), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (2303, 2313), False, 'from PIL import Image\n'), ((2468, 2481), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (2478, 2481), True, 'import numpy as np\n'), ((3834, 3854), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (3844, 3854), False, 'from PIL import Image\n'), ((4009, 4022), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (4019, 4022), True, 'import numpy as np\n'), ((5373, 5393), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (5383, 5393), False, 'from PIL import Image\n'), ((5548, 5561), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (5558, 5561), True, 'import numpy as np\n'), ((6917, 6937), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (6927, 6937), False, 'from PIL import Image\n'), ((7026, 7039), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (7036, 7039), True, 'import numpy as np\n')] |
import numpy as np
def braille():
return {
'a' : np.array([[1, 0], [0, 0], [0, 0]], dtype=bool),
'b' : np.array([[1, 0], [1, 0], [0, 0]], dtype=bool),
'c' : np.array([[1, 1], [0, 0], [0, 0]], dtype=bool),
'd' : np.array([[1, 1], [0, 1], [0, 0]], dtype=bool),
'e' : np.array([[1, 0], [0, 1], [0, 0]], dtype=bool),
'f' : np.array([[1, 1], [1, 0], [0, 0]], dtype=bool),
'g' : np.array([[1, 1], [1, 1], [0, 0]], dtype=bool),
'h' : np.array([[1, 0], [1, 1], [0, 0]], dtype=bool),
'i' : np.array([[0, 1], [1, 0], [0, 0]], dtype=bool),
'j' : np.array([[0, 1], [1, 1], [0, 0]], dtype=bool),
'k' : np.array([[1, 0], [0, 0], [1, 0]], dtype=bool),
'l' : np.array([[1, 0], [1, 0], [1, 0]], dtype=bool),
'm' : np.array([[1, 1], [0, 0], [1, 0]], dtype=bool),
'n' : np.array([[1, 1], [0, 1], [1, 0]], dtype=bool),
'o' : np.array([[1, 0], [0, 1], [1, 0]], dtype=bool),
'p' : np.array([[1, 1], [1, 0], [1, 0]], dtype=bool),
'q' : np.array([[1, 1], [1, 1], [1, 0]], dtype=bool),
'r' : np.array([[1, 0], [1, 1], [1, 0]], dtype=bool),
's' : np.array([[0, 1], [1, 0], [1, 0]], dtype=bool),
't' : np.array([[0, 1], [1, 1], [1, 0]], dtype=bool),
'u' : np.array([[1, 0], [0, 0], [1, 1]], dtype=bool),
'v' : np.array([[1, 0], [1, 0], [1, 1]], dtype=bool),
'w' : np.array([[0, 1], [1, 1], [0, 1]], dtype=bool),
'x' : np.array([[1, 1], [0, 0], [1, 1]], dtype=bool),
'y' : np.array([[1, 1], [0, 1], [1, 1]], dtype=bool),
'z' : np.array([[1, 0], [0, 1], [1, 1]], dtype=bool),
'1' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[1, 0], [0, 0], [0, 0]], dtype=bool)],
'2' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[1, 0], [1, 0], [0, 0]], dtype=bool)],
'3' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[1, 1], [0, 0], [0, 0]], dtype=bool)],
'4' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[1, 1], [1, 0], [0, 0]], dtype=bool)],
'5' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[1, 0], [0, 1], [0, 0]], dtype=bool)],
'6' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[1, 1], [1, 0], [0, 0]], dtype=bool)],
'7' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[1, 1], [1, 1], [0, 0]], dtype=bool)],
'8' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[1, 0], [1, 1], [0, 0]], dtype=bool)],
'9' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[0, 1], [1, 0], [0, 0]], dtype=bool)],
'0' : [np.array([[0, 1], [0, 1], [1, 1]], dtype=bool), np.array([[0, 1], [1, 1], [0, 0]], dtype=bool)],
} | [
"numpy.array"
] | [((58, 104), 'numpy.array', 'np.array', (['[[1, 0], [0, 0], [0, 0]]'], {'dtype': 'bool'}), '([[1, 0], [0, 0], [0, 0]], dtype=bool)\n', (66, 104), True, 'import numpy as np\n'), ((118, 164), 'numpy.array', 'np.array', (['[[1, 0], [1, 0], [0, 0]]'], {'dtype': 'bool'}), '([[1, 0], [1, 0], [0, 0]], dtype=bool)\n', (126, 164), True, 'import numpy as np\n'), ((178, 224), 'numpy.array', 'np.array', (['[[1, 1], [0, 0], [0, 0]]'], {'dtype': 'bool'}), '([[1, 1], [0, 0], [0, 0]], dtype=bool)\n', (186, 224), True, 'import numpy as np\n'), ((238, 284), 'numpy.array', 'np.array', (['[[1, 1], [0, 1], [0, 0]]'], {'dtype': 'bool'}), '([[1, 1], [0, 1], [0, 0]], dtype=bool)\n', (246, 284), True, 'import numpy as np\n'), ((298, 344), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [0, 0]]'], {'dtype': 'bool'}), '([[1, 0], [0, 1], [0, 0]], dtype=bool)\n', (306, 344), True, 'import numpy as np\n'), ((358, 404), 'numpy.array', 'np.array', (['[[1, 1], [1, 0], [0, 0]]'], {'dtype': 'bool'}), '([[1, 1], [1, 0], [0, 0]], dtype=bool)\n', (366, 404), True, 'import numpy as np\n'), ((418, 464), 'numpy.array', 'np.array', (['[[1, 1], [1, 1], [0, 0]]'], {'dtype': 'bool'}), '([[1, 1], [1, 1], [0, 0]], dtype=bool)\n', (426, 464), True, 'import numpy as np\n'), ((478, 524), 'numpy.array', 'np.array', (['[[1, 0], [1, 1], [0, 0]]'], {'dtype': 'bool'}), '([[1, 0], [1, 1], [0, 0]], dtype=bool)\n', (486, 524), True, 'import numpy as np\n'), ((538, 584), 'numpy.array', 'np.array', (['[[0, 1], [1, 0], [0, 0]]'], {'dtype': 'bool'}), '([[0, 1], [1, 0], [0, 0]], dtype=bool)\n', (546, 584), True, 'import numpy as np\n'), ((598, 644), 'numpy.array', 'np.array', (['[[0, 1], [1, 1], [0, 0]]'], {'dtype': 'bool'}), '([[0, 1], [1, 1], [0, 0]], dtype=bool)\n', (606, 644), True, 'import numpy as np\n'), ((658, 704), 'numpy.array', 'np.array', (['[[1, 0], [0, 0], [1, 0]]'], {'dtype': 'bool'}), '([[1, 0], [0, 0], [1, 0]], dtype=bool)\n', (666, 704), True, 'import numpy as np\n'), ((718, 764), 'numpy.array', 'np.array', (['[[1, 0], [1, 0], [1, 0]]'], {'dtype': 'bool'}), '([[1, 0], [1, 0], [1, 0]], dtype=bool)\n', (726, 764), True, 'import numpy as np\n'), ((778, 824), 'numpy.array', 'np.array', (['[[1, 1], [0, 0], [1, 0]]'], {'dtype': 'bool'}), '([[1, 1], [0, 0], [1, 0]], dtype=bool)\n', (786, 824), True, 'import numpy as np\n'), ((838, 884), 'numpy.array', 'np.array', (['[[1, 1], [0, 1], [1, 0]]'], {'dtype': 'bool'}), '([[1, 1], [0, 1], [1, 0]], dtype=bool)\n', (846, 884), True, 'import numpy as np\n'), ((898, 944), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [1, 0]]'], {'dtype': 'bool'}), '([[1, 0], [0, 1], [1, 0]], dtype=bool)\n', (906, 944), True, 'import numpy as np\n'), ((958, 1004), 'numpy.array', 'np.array', (['[[1, 1], [1, 0], [1, 0]]'], {'dtype': 'bool'}), '([[1, 1], [1, 0], [1, 0]], dtype=bool)\n', (966, 1004), True, 'import numpy as np\n'), ((1018, 1064), 'numpy.array', 'np.array', (['[[1, 1], [1, 1], [1, 0]]'], {'dtype': 'bool'}), '([[1, 1], [1, 1], [1, 0]], dtype=bool)\n', (1026, 1064), True, 'import numpy as np\n'), ((1078, 1124), 'numpy.array', 'np.array', (['[[1, 0], [1, 1], [1, 0]]'], {'dtype': 'bool'}), '([[1, 0], [1, 1], [1, 0]], dtype=bool)\n', (1086, 1124), True, 'import numpy as np\n'), ((1138, 1184), 'numpy.array', 'np.array', (['[[0, 1], [1, 0], [1, 0]]'], {'dtype': 'bool'}), '([[0, 1], [1, 0], [1, 0]], dtype=bool)\n', (1146, 1184), True, 'import numpy as np\n'), ((1198, 1244), 'numpy.array', 'np.array', (['[[0, 1], [1, 1], [1, 0]]'], {'dtype': 'bool'}), '([[0, 1], [1, 1], [1, 0]], dtype=bool)\n', (1206, 1244), True, 'import numpy as np\n'), ((1258, 1304), 'numpy.array', 'np.array', (['[[1, 0], [0, 0], [1, 1]]'], {'dtype': 'bool'}), '([[1, 0], [0, 0], [1, 1]], dtype=bool)\n', (1266, 1304), True, 'import numpy as np\n'), ((1318, 1364), 'numpy.array', 'np.array', (['[[1, 0], [1, 0], [1, 1]]'], {'dtype': 'bool'}), '([[1, 0], [1, 0], [1, 1]], dtype=bool)\n', (1326, 1364), True, 'import numpy as np\n'), ((1378, 1424), 'numpy.array', 'np.array', (['[[0, 1], [1, 1], [0, 1]]'], {'dtype': 'bool'}), '([[0, 1], [1, 1], [0, 1]], dtype=bool)\n', (1386, 1424), True, 'import numpy as np\n'), ((1438, 1484), 'numpy.array', 'np.array', (['[[1, 1], [0, 0], [1, 1]]'], {'dtype': 'bool'}), '([[1, 1], [0, 0], [1, 1]], dtype=bool)\n', (1446, 1484), True, 'import numpy as np\n'), ((1498, 1544), 'numpy.array', 'np.array', (['[[1, 1], [0, 1], [1, 1]]'], {'dtype': 'bool'}), '([[1, 1], [0, 1], [1, 1]], dtype=bool)\n', (1506, 1544), True, 'import numpy as np\n'), ((1558, 1604), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [1, 1]]'], {'dtype': 'bool'}), '([[1, 0], [0, 1], [1, 1]], dtype=bool)\n', (1566, 1604), True, 'import numpy as np\n'), ((1626, 1672), 'numpy.array', 'np.array', (['[[0, 1], [0, 1], [1, 1]]'], {'dtype': 'bool'}), '([[0, 1], [0, 1], [1, 1]], dtype=bool)\n', (1634, 1672), True, 'import numpy as np\n'), ((1674, 1720), 'numpy.array', 'np.array', (['[[1, 0], [0, 0], [0, 0]]'], {'dtype': 'bool'}), '([[1, 0], [0, 0], [0, 0]], dtype=bool)\n', (1682, 1720), True, 'import numpy as np\n'), ((1736, 1782), 'numpy.array', 'np.array', (['[[0, 1], [0, 1], [1, 1]]'], {'dtype': 'bool'}), '([[0, 1], [0, 1], [1, 1]], dtype=bool)\n', (1744, 1782), True, 'import numpy as np\n'), ((1784, 1830), 'numpy.array', 'np.array', (['[[1, 0], [1, 0], [0, 0]]'], {'dtype': 'bool'}), '([[1, 0], [1, 0], [0, 0]], dtype=bool)\n', (1792, 1830), True, 'import numpy as np\n'), ((1846, 1892), 'numpy.array', 'np.array', (['[[0, 1], [0, 1], [1, 1]]'], {'dtype': 'bool'}), '([[0, 1], [0, 1], [1, 1]], dtype=bool)\n', (1854, 1892), True, 'import numpy as np\n'), ((1894, 1940), 'numpy.array', 'np.array', (['[[1, 1], [0, 0], [0, 0]]'], {'dtype': 'bool'}), '([[1, 1], [0, 0], [0, 0]], dtype=bool)\n', (1902, 1940), True, 'import numpy as np\n'), ((1956, 2002), 'numpy.array', 'np.array', (['[[0, 1], [0, 1], [1, 1]]'], {'dtype': 'bool'}), '([[0, 1], [0, 1], [1, 1]], dtype=bool)\n', (1964, 2002), True, 'import numpy as np\n'), ((2004, 2050), 'numpy.array', 'np.array', (['[[1, 1], [1, 0], [0, 0]]'], {'dtype': 'bool'}), '([[1, 1], [1, 0], [0, 0]], dtype=bool)\n', (2012, 2050), True, 'import numpy as np\n'), ((2066, 2112), 'numpy.array', 'np.array', (['[[0, 1], [0, 1], [1, 1]]'], {'dtype': 'bool'}), '([[0, 1], [0, 1], [1, 1]], dtype=bool)\n', (2074, 2112), True, 'import numpy as np\n'), ((2114, 2160), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [0, 0]]'], {'dtype': 'bool'}), '([[1, 0], [0, 1], [0, 0]], dtype=bool)\n', (2122, 2160), True, 'import numpy as np\n'), ((2176, 2222), 'numpy.array', 'np.array', (['[[0, 1], [0, 1], [1, 1]]'], {'dtype': 'bool'}), '([[0, 1], [0, 1], [1, 1]], dtype=bool)\n', (2184, 2222), True, 'import numpy as np\n'), ((2224, 2270), 'numpy.array', 'np.array', (['[[1, 1], [1, 0], [0, 0]]'], {'dtype': 'bool'}), '([[1, 1], [1, 0], [0, 0]], dtype=bool)\n', (2232, 2270), True, 'import numpy as np\n'), ((2286, 2332), 'numpy.array', 'np.array', (['[[0, 1], [0, 1], [1, 1]]'], {'dtype': 'bool'}), '([[0, 1], [0, 1], [1, 1]], dtype=bool)\n', (2294, 2332), True, 'import numpy as np\n'), ((2334, 2380), 'numpy.array', 'np.array', (['[[1, 1], [1, 1], [0, 0]]'], {'dtype': 'bool'}), '([[1, 1], [1, 1], [0, 0]], dtype=bool)\n', (2342, 2380), True, 'import numpy as np\n'), ((2396, 2442), 'numpy.array', 'np.array', (['[[0, 1], [0, 1], [1, 1]]'], {'dtype': 'bool'}), '([[0, 1], [0, 1], [1, 1]], dtype=bool)\n', (2404, 2442), True, 'import numpy as np\n'), ((2444, 2490), 'numpy.array', 'np.array', (['[[1, 0], [1, 1], [0, 0]]'], {'dtype': 'bool'}), '([[1, 0], [1, 1], [0, 0]], dtype=bool)\n', (2452, 2490), True, 'import numpy as np\n'), ((2506, 2552), 'numpy.array', 'np.array', (['[[0, 1], [0, 1], [1, 1]]'], {'dtype': 'bool'}), '([[0, 1], [0, 1], [1, 1]], dtype=bool)\n', (2514, 2552), True, 'import numpy as np\n'), ((2554, 2600), 'numpy.array', 'np.array', (['[[0, 1], [1, 0], [0, 0]]'], {'dtype': 'bool'}), '([[0, 1], [1, 0], [0, 0]], dtype=bool)\n', (2562, 2600), True, 'import numpy as np\n'), ((2616, 2662), 'numpy.array', 'np.array', (['[[0, 1], [0, 1], [1, 1]]'], {'dtype': 'bool'}), '([[0, 1], [0, 1], [1, 1]], dtype=bool)\n', (2624, 2662), True, 'import numpy as np\n'), ((2664, 2710), 'numpy.array', 'np.array', (['[[0, 1], [1, 1], [0, 0]]'], {'dtype': 'bool'}), '([[0, 1], [1, 1], [0, 0]], dtype=bool)\n', (2672, 2710), True, 'import numpy as np\n')] |
"""
Plotly - Sparklines
===================
"""
# -------------------
# Main
# -------------------
# https://chart-studio.plotly.com/~empet/13748/sparklines/#/code
# https://omnipotent.net/jquery.sparkline/#s-about
# https://chart-studio.plotly.com/create/?fid=Dreamshot:8025#/
# Libraries
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from pandas.tseries.offsets import DateOffset
from plotly.subplots import make_subplots
# Constants
colors = px.colors.sequential.Viridis_r
# Size
S = 100
N = 7
# Create data
x = np.arange(S)
y = np.random.randint(low=1, high=100, size=(S, N))
# Create DataFrame
data = pd.DataFrame(y)
data.columns = ['c%s'%i for i in data.columns]
# Create timedelta
data['timedelta'] = \
pd.to_timedelta(data.index / 1, unit='D')
# Create datetimes (if needed)
today = pd.to_datetime('today').normalize()
data['dates'] = pd.to_datetime(today)
data['dates'] += pd.to_timedelta(data.index / 1, unit='D')
# Set dates as index
data = data.set_index('dates')
# Drop timedelta
data = data.drop(columns='timedelta')
# Show data
print("\nData:")
print(data)
# ----------------
# Visualize
# ----------------
# Create layout
layout = {
"font": {"family": "Georgia, serif"},
"title": "Sparklines",
"width": 500,
"height": 500,
"margin": {"t": 80},
"paper_bgcolor": 'rgba(0,0,0,0)', # transparent
"plot_bgcolor": 'rgba(0,0,0,0)', # transparent
"autosize": False,
"hovermode": "closest",
"showlegend": False,
}
# Create figure
fig = make_subplots(rows=N, cols=1,
subplot_titles=None)
# Add traces
for i, column in enumerate(data.columns):
# Colors
c = colors[i]
x = data.index
y = data[column]
# Add trace
fig.add_trace(go.Scatter(x=x, y=y,
name=column.upper(),
mode='lines', fill='tozeroy',
line=dict(color=c, width=0.5),
xaxis='x%s' % (i+1), yaxis='y%s' % (i+1)),
row=i+1, col=1)
# Update axes
fig.update_yaxes(title_text=column, row=i+1, col=1)
# Add to layout
layout["xaxis%s" % (i+1)] = {
"ticks": "",
"anchor": 'y%s' % (i+1),
"domain": [0.0, 1.0],
"mirror": False,
"showgrid": False,
"showline": False,
"zeroline": False,
"showticklabels": False
}
layout["yaxis%s" % (i+1)] = {
"ticks": "",
"anchor": 'x%s' % (i+1),
#"domain": [0.08416666666666667, 0.15833333333333333],
"mirror": False,
"showgrid": False,
"showline": False,
"zeroline": False,
"showticklabels": False
}
# Update layout
fig.update_layout(layout)
# Show
#fig.show()
fig | [
"pandas.to_timedelta",
"plotly.subplots.make_subplots",
"numpy.arange",
"numpy.random.randint",
"pandas.DataFrame",
"pandas.to_datetime"
] | [((575, 587), 'numpy.arange', 'np.arange', (['S'], {}), '(S)\n', (584, 587), True, 'import numpy as np\n'), ((592, 639), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(100)', 'size': '(S, N)'}), '(low=1, high=100, size=(S, N))\n', (609, 639), True, 'import numpy as np\n'), ((667, 682), 'pandas.DataFrame', 'pd.DataFrame', (['y'], {}), '(y)\n', (679, 682), True, 'import pandas as pd\n'), ((776, 817), 'pandas.to_timedelta', 'pd.to_timedelta', (['(data.index / 1)'], {'unit': '"""D"""'}), "(data.index / 1, unit='D')\n", (791, 817), True, 'import pandas as pd\n'), ((910, 931), 'pandas.to_datetime', 'pd.to_datetime', (['today'], {}), '(today)\n', (924, 931), True, 'import pandas as pd\n'), ((949, 990), 'pandas.to_timedelta', 'pd.to_timedelta', (['(data.index / 1)'], {'unit': '"""D"""'}), "(data.index / 1, unit='D')\n", (964, 990), True, 'import pandas as pd\n'), ((1539, 1589), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': 'N', 'cols': '(1)', 'subplot_titles': 'None'}), '(rows=N, cols=1, subplot_titles=None)\n', (1552, 1589), False, 'from plotly.subplots import make_subplots\n'), ((858, 881), 'pandas.to_datetime', 'pd.to_datetime', (['"""today"""'], {}), "('today')\n", (872, 881), True, 'import pandas as pd\n')] |
import os.path
import re
from numpy.distutils.core import setup, Extension
from numpy.distutils.system_info import get_info
def find_version(*paths):
fname = os.path.join(os.path.dirname(__file__), *paths)
with open(fname) as fp:
code = fp.read()
match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", code, re.M)
if match:
return match.group(1)
raise RuntimeError("Unable to find version string.")
# ext_cpwt = Extension(name='plateflex.cpwt',
# sources=['src/cpwt/cpwt.f90', 'src/cpwt/cpwt_sub.f90'],
# libraries=['gfortran'],
# library_dirs=get_info('gfortran').get('library_dirs'))
# ext_flex = Extension(name='plateflex.flex',
# sources=['src/flex/flex.f90'],
# libraries=['gfortran'],
# library_dirs=get_info('gfortran').get('library_dirs'))
ext_cpwt = Extension(name='plateflex.cpwt',
sources=['src/cpwt/cpwt.f90', 'src/cpwt/cpwt_sub.f90'])
ext_flex = Extension(name='plateflex.flex',
sources=['src/flex/flex.f90'])
setup(
name='plateflex',
version=find_version('plateflex', '__init__.py'),
description='Python package for estimating lithospheric elastic thickness',
author='<NAME>',
maintainer='<NAME>',
author_email='<EMAIL>',
url='https://github.com/paudetseis/PlateFlex',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Fortran',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'],
install_requires=['numpy>=1.15', 'pymc3', 'matplotlib', 'seaborn'],
python_requires='>=3.5',
tests_require=['pytest'],
ext_modules=[ext_cpwt, ext_flex],
packages=['plateflex'],
package_data={
'plateflex': [
'examples/data.zip',
'examples/Notebooks/*.ipynb']
}
)
| [
"numpy.distutils.core.Extension",
"re.search"
] | [((926, 1018), 'numpy.distutils.core.Extension', 'Extension', ([], {'name': '"""plateflex.cpwt"""', 'sources': "['src/cpwt/cpwt.f90', 'src/cpwt/cpwt_sub.f90']"}), "(name='plateflex.cpwt', sources=['src/cpwt/cpwt.f90',\n 'src/cpwt/cpwt_sub.f90'])\n", (935, 1018), False, 'from numpy.distutils.core import setup, Extension\n'), ((1047, 1110), 'numpy.distutils.core.Extension', 'Extension', ([], {'name': '"""plateflex.flex"""', 'sources': "['src/flex/flex.f90']"}), "(name='plateflex.flex', sources=['src/flex/flex.f90'])\n", (1056, 1110), False, 'from numpy.distutils.core import setup, Extension\n'), ((276, 341), 're.search', 're.search', (['"""^__version__ = [\'\\\\"]([^\'\\\\"]*)[\'\\\\"]"""', 'code', 're.M'], {}), '(\'^__version__ = [\\\'\\\\"]([^\\\'\\\\"]*)[\\\'\\\\"]\', code, re.M)\n', (285, 341), False, 'import re\n')] |
import numpy as np
from PIL import Image
import tensorflow as tf
import re
#ref: https://github.com/tensorflow/models/blob/1af55e018eebce03fb61bba9959a04672536107d/tutorials/image/imagenet/classify_image.py
class NodeLookup(object):
"""Converts integer node ID's to human readable labels."""
def __init__(self,
label_lookup_path=None,
uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = 'models/imagenet_2012_challenge_label_map_proto.pbtxt'
if not uid_lookup_path:
uid_lookup_path = 'models/imagenet_synset_to_human_label_map.txt'
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.
"""
if not tf.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
session=tf.Session()
adv = tf.get_variable(name="adv", shape=[1,100,100,3], dtype=tf.float32, initializer=tf.zeros_initializer)
#x = tf.placeholder(tf.float32, shape=[1,100,100,3])
target = tf.placeholder(tf.int32)
#assign_op=tf.assign(adv, x)
def create_graph(dirname):
with tf.gfile.FastGFile(dirname, 'rb') as f:
graph_def = session.graph_def
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='adv',
input_map={"ExpandDims:0":adv} )
create_graph("models/classify_image_graph_def.pb")
session.run(tf.global_variables_initializer())
tensorlist=[n.name for n in session.graph_def.node]
#print(tensorlist)
softmax_tensor = session.graph.get_tensor_by_name('adv_1/softmax:0')
#input_tensor=session.graph.get_tensor_by_name('ExpandDims:0')
logits_tensor=session.graph.get_tensor_by_name('adv_1/softmax/logits:0')
#imagename="panda.jpg"
imagename="imagen/n07768694_513_pomegranate.jpg"
image=np.array(Image.open(imagename).convert('RGB').resize((100, 100), Image.BILINEAR)).astype(np.float32)
#[100,100,3]->[1,100,100,3]
image=np.expand_dims(image, axis=0)
predictions = session.run(softmax_tensor,
{adv: image})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = NodeLookup()
#top 3
top_k = predictions.argsort()[-3:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print('%s (score = %.5f)(id = %d)' % (human_string, score,node_id))
epochs=500
lr=0.1
target_label=123
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_tensor, labels=[target])
#optimizer = tf.train.GradientDescentOptimizer(lr)
optimizer = tf.train.AdamOptimizer(lr)
train_step=optimizer.minimize(loss=cross_entropy,var_list=[adv])
session.run(tf.global_variables_initializer())
session.run(tf.assign(adv, image))
for epoch in range(epochs):
loss,_,adv_img,predictions=session.run([cross_entropy,train_step,adv,softmax_tensor],{target:target_label})
predictions = np.squeeze(predictions)
label=np.argmax(predictions)
print("epoch={} loss={} label={}".format(epoch,loss,label))
if label == target_label:
top_k = predictions.argsort()[-3:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print('%s (score = %.5f)(id = %d)' % (human_string, score,node_id))
break | [
"PIL.Image.open",
"tensorflow.gfile.Exists",
"tensorflow.get_variable",
"re.compile",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.argmax",
"tensorflow.gfile.FastGFile",
"numpy.squeeze",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.global_variables_initializer"... | [((2500, 2512), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2510, 2512), True, 'import tensorflow as tf\n'), ((2521, 2628), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""adv"""', 'shape': '[1, 100, 100, 3]', 'dtype': 'tf.float32', 'initializer': 'tf.zeros_initializer'}), "(name='adv', shape=[1, 100, 100, 3], dtype=tf.float32,\n initializer=tf.zeros_initializer)\n", (2536, 2628), True, 'import tensorflow as tf\n'), ((2686, 2710), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {}), '(tf.int32)\n', (2700, 2710), True, 'import tensorflow as tf\n'), ((3617, 3646), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (3631, 3646), True, 'import numpy as np\n'), ((3745, 3768), 'numpy.squeeze', 'np.squeeze', (['predictions'], {}), '(predictions)\n', (3755, 3768), True, 'import numpy as np\n'), ((4124, 4214), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'logits_tensor', 'labels': '[target]'}), '(logits=logits_tensor, labels\n =[target])\n', (4170, 4214), True, 'import tensorflow as tf\n'), ((4273, 4299), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr'], {}), '(lr)\n', (4295, 4299), True, 'import tensorflow as tf\n'), ((3087, 3120), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3118, 3120), True, 'import tensorflow as tf\n'), ((4379, 4412), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4410, 4412), True, 'import tensorflow as tf\n'), ((4428, 4449), 'tensorflow.assign', 'tf.assign', (['adv', 'image'], {}), '(adv, image)\n', (4437, 4449), True, 'import tensorflow as tf\n'), ((4620, 4643), 'numpy.squeeze', 'np.squeeze', (['predictions'], {}), '(predictions)\n', (4630, 4643), True, 'import numpy as np\n'), ((4654, 4676), 'numpy.argmax', 'np.argmax', (['predictions'], {}), '(predictions)\n', (4663, 4676), True, 'import numpy as np\n'), ((1389, 1418), 're.compile', 're.compile', (['"""[n\\\\d]*[ \\\\S,]*"""'], {}), "('[n\\\\d]*[ \\\\S,]*')\n", (1399, 1418), False, 'import re\n'), ((2777, 2810), 'tensorflow.gfile.FastGFile', 'tf.gfile.FastGFile', (['dirname', '"""rb"""'], {}), "(dirname, 'rb')\n", (2795, 2810), True, 'import tensorflow as tf\n'), ((2912, 2987), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '"""adv"""', 'input_map': "{'ExpandDims:0': adv}"}), "(graph_def, name='adv', input_map={'ExpandDims:0': adv})\n", (2931, 2987), True, 'import tensorflow as tf\n'), ((1011, 1043), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['uid_lookup_path'], {}), '(uid_lookup_path)\n', (1026, 1043), True, 'import tensorflow as tf\n'), ((1051, 1110), 'tensorflow.logging.fatal', 'tf.logging.fatal', (['"""File does not exist %s"""', 'uid_lookup_path'], {}), "('File does not exist %s', uid_lookup_path)\n", (1067, 1110), True, 'import tensorflow as tf\n'), ((1122, 1156), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['label_lookup_path'], {}), '(label_lookup_path)\n', (1137, 1156), True, 'import tensorflow as tf\n'), ((1164, 1225), 'tensorflow.logging.fatal', 'tf.logging.fatal', (['"""File does not exist %s"""', 'label_lookup_path'], {}), "('File does not exist %s', label_lookup_path)\n", (1180, 1225), True, 'import tensorflow as tf\n'), ((1315, 1346), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['uid_lookup_path'], {}), '(uid_lookup_path)\n', (1329, 1346), True, 'import tensorflow as tf\n'), ((1699, 1732), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['label_lookup_path'], {}), '(label_lookup_path)\n', (1713, 1732), True, 'import tensorflow as tf\n'), ((2223, 2268), 'tensorflow.logging.fatal', 'tf.logging.fatal', (['"""Failed to locate: %s"""', 'val'], {}), "('Failed to locate: %s', val)\n", (2239, 2268), True, 'import tensorflow as tf\n'), ((3491, 3512), 'PIL.Image.open', 'Image.open', (['imagename'], {}), '(imagename)\n', (3501, 3512), False, 'from PIL import Image\n')] |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.4
# kernelspec:
# display_name: wtte-dev
# language: python
# name: wtte-dev
# ---
# %% [markdown]
# # WTTE-RNN in PyTorch
#
# <NAME>
#
# Based on original Keras version written by <NAME>:
# https://github.com/ragulpr/wtte-rnn/blob/master/examples/keras/simple_example.ipynb
# MIT license
#
# For details, check out
# https://ragulpr.github.io/2016/12/22/WTTE-RNN-Hackless-churn-modeling/
# https://github.com/ragulpr/wtte-rnn
# %%
# %matplotlib inline
import sys
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import TensorDataset, DataLoader
import matplotlib.pyplot as plt
sys.path.append("..")
from torch_wtte import losses
np.random.seed(11)
torch.manual_seed(11)
# %%
def get_data(n_timesteps, every_nth, n_repeats, noise_level, n_features, use_censored=True):
def get_equal_spaced(n, every_nth):
# create some simple data of evenly spaced events recurring every_nth step
# Each is on (time,batch)-format
events = np.array([np.array(range(n)) for _ in range(every_nth)])
events = events + np.array(range(every_nth)).reshape(every_nth, 1) + 1
tte_actual = every_nth - 1 - events % every_nth
was_event = (events % every_nth == 0) * 1.0
was_event[:, 0] = 0.0
events = tte_actual == 0
is_censored = (events[:, ::-1].cumsum(1)[:, ::-1] == 0) * 1
tte_censored = is_censored[:, ::-1].cumsum(1)[:, ::-1] * is_censored
tte_censored = tte_censored + (1 - is_censored) * tte_actual
events = np.copy(events.T * 1.0)
tte_actual = np.copy(tte_actual.T * 1.0)
tte_censored = np.copy(tte_censored.T * 1.0)
was_event = np.copy(was_event.T * 1.0)
not_censored = 1 - np.copy(is_censored.T * 1.0)
return tte_censored, not_censored, was_event, events, tte_actual
tte_censored, not_censored, was_event, events, tte_actual = get_equal_spaced(
n=n_timesteps, every_nth=every_nth
)
# From https://keras.io/layers/recurrent/
# input shape rnn recurrent if return_sequences: (nb_samples, timesteps, input_dim)
u_train = not_censored.T.reshape(n_sequences, n_timesteps, 1)
x_train = was_event.T.reshape(n_sequences, n_timesteps, 1)
tte_censored = tte_censored.T.reshape(n_sequences, n_timesteps, 1)
y_train = np.append(tte_censored, u_train, axis=2) # (n_sequences,n_timesteps,2)
u_test = np.ones(shape=(n_sequences, n_timesteps, 1))
x_test = np.copy(x_train)
tte_actual = tte_actual.T.reshape(n_sequences, n_timesteps, 1)
y_test = np.append(tte_actual, u_test, axis=2) # (n_sequences,n_timesteps,2)
if not use_censored:
x_train = np.copy(x_test)
y_train = np.copy(y_test)
# Since the above is deterministic perfect fit is feasible.
# More noise->more fun so add noise to the training data:
x_train = np.tile(x_train.T, n_repeats).T
y_train = np.tile(y_train.T, n_repeats).T
# Try with more than one feature TODO
x_train_new = np.zeros([x_train.shape[0], x_train.shape[1], n_features])
x_test_new = np.zeros([x_test.shape[0], x_test.shape[1], n_features])
for f in range(n_features):
x_train_new[:, :, f] = x_train[:, :, 0]
x_test_new[:, :, f] = x_test[:, :, 0]
x_train = x_train_new
x_test = x_test_new
# xtrain is signal XOR noise with probability noise_level
noise = np.random.binomial(1, noise_level, size=x_train.shape)
x_train = x_train + noise - x_train * noise
return y_train, x_train, y_test, x_test, events
# %% [markdown]
# ### Generate some data
#
# * The true event-sequence is evenly spaced points (but we start anywhere in the sequence)
# * The true feature is (binary) if there was an event in last step
# * In the training data the feature has added noise
# * Training TTE is censored. Testing TTE is uncensored.
# %%
n_timesteps = 200
n_sequences = every_nth = 80
n_features = 1
n_repeats = 1000
noise_level = 0.005
use_censored = True
y_train, x_train, y_test, x_test, events = get_data(
n_timesteps, every_nth, n_repeats, noise_level, n_features, use_censored
)
# %%
#### Plots
print("test shape", x_test.shape, y_test.shape)
plt.imshow(x_test[:, :, :].sum(axis=2) > 0, interpolation="none", cmap="Accent", aspect="auto")
plt.title("x_test (lagged/deterministic event indicator)")
plt.show()
plt.imshow(y_test[:, :, 0], interpolation="none", cmap="jet", aspect="auto")
plt.title("y_test[:,:,0] actual tte")
plt.show()
print("train shape", x_train.shape, y_train.shape)
plt.imshow(
x_train[:every_nth, :, :].mean(axis=2), interpolation="none", cmap="Accent", aspect="auto"
)
plt.title("x_train[:every_nth,:,0] (lagged/noisy event indicator)")
plt.show()
plt.imshow(y_train[:every_nth, :, 0], interpolation="none", cmap="jet", aspect="auto")
plt.title("y_train[:every_nth,:,0] censored tte")
plt.show()
plt.imshow(y_train[:every_nth, :, 1], interpolation="none", cmap="Accent", aspect="auto")
plt.title("y_train[:every_nth,:,1] u (non-censoring indicator)")
plt.show()
## Example TTE:
print("Example TTEs")
plt.plot(
y_train[every_nth // 4, :, 0],
label="censored tte (train)",
color="black",
linestyle="dashed",
linewidth=2,
drawstyle="steps-post",
)
plt.plot(
y_test[every_nth // 4, :, 0],
label="actual tte (test)",
color="black",
linestyle="solid",
linewidth=2,
drawstyle="steps-post",
)
plt.xlim(0, n_timesteps)
plt.xlabel("time")
plt.ylabel("time to event")
plt.title("Example TTEs")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# %% [markdown]
# # Train a WTTE-RNN
# ### Things to try out:
# * have fun with data paramaters:
# * every_nth to control event frequency
# * noise_level to make it more noisy
# * n_timesteps
# * n_features to get more noisy input
# * Generate more interesting temporal relationships
# * Here we use the smallest possible GRU. Try different learning rates, network architectures, initializations.
# * Try Implementing multivariate distributions, other distributions, data pipelines etc.
# * Invent better output activation layer
# * Invent ways to overcome instability with lots of censoring
# * ETC and have fun!
# %%
# Paramaeters for output activation layer initialization.
# Start at naive geometric (beta=1) MLE:
tte_mean_train = np.nanmean(y_train[:, :, 0])
init_alpha = -1.0 / np.log(1.0 - 1.0 / (tte_mean_train + 1.0))
mean_u = np.nanmean(y_train[:, :, 1])
init_alpha = init_alpha / mean_u
print("init_alpha: ", init_alpha, "mean uncensored: ", mean_u)
### Uncomment if you have varying length sequences that is nanpadded to the right:
# mask_value = -1.3371337 # Use some improbable but not nan-causing telltale value
# x_train[:,:,:][np.isnan(x_train)] = mask_value
# y_train[:,:,0][np.isnan(y_train[:,:,0])] = tte_mean_train
# y_train[:,:,1][np.isnan(y_train[:,:,1])] = 0.5
# sample_weights = (x_train[:,:,0]!=mask_value)*1.
# %%
class WTTERNN(nn.Module):
def __init__(self, discrete):
super().__init__()
self.epoch = 0
self.layers = nn.ModuleList(
[
nn.GRU(input_size=n_features, hidden_size=2, batch_first=True),
nn.Tanh(),
losses.WeibullActivation(init_alpha=init_alpha, max_beta=4.0),
]
)
self.criterion = losses.WeibullCensoredNLLLoss(discrete=discrete)
def forward(self, x):
x, _ = self.layers[0](x) # discard GRU hidden state output
x = self.layers[1](x)
x = self.layers[2](x)
return x
def fit(self, optimizer, train_loader, device="cpu"):
num_batches = (
len(train_loader.dataset) + train_loader.batch_size - 1
) // train_loader.batch_size
self.to(device)
self.train()
train_losses = []
for batch_idx, (data, labels) in enumerate(train_loader):
data, labels = data.to(device), labels.to(device)
optimizer.zero_grad()
output = self(data).squeeze()
loss = self.criterion(output, labels).sum()
loss.backward()
optimizer.step()
train_losses.append(loss.item())
avg_loss = loss / len(data)
print(
f"Epoch: {self.epoch} [batch {batch_idx+1}/{num_batches}]\tLoss: {loss.item():.6f} Avg Loss: {avg_loss:.6f}",
end="\r",
)
avg_train_loss = np.sum(train_losses) / len(train_loader.dataset)
print()
self.epoch += 1
return avg_train_loss
def score(self, valid_loader, device="cpu"):
self.to(device)
self.eval()
valid_loss = 0
correct = 0
with torch.no_grad():
for data, labels in valid_loader:
data, labels = data.to(device), labels.to(device)
output = self(data).squeeze()
tte = labels[..., 0]
uncensored = labels[..., 1]
alpha = output[..., 0]
beta = output[..., 1]
valid_loss = self.criterion(tte, uncensored, alpha, beta).sum().item()
pred = output.data.round()
correct += pred.eq(labels.data.view_as(pred)).sum()
n = len(valid_loader.dataset)
valid_loss /= n
print(f"Validation: avg loss: {valid_loss:.4f}")
return valid_loss
# %%
def run(x_train, y_train, x_test, y_test, epochs, device):
print(f"Using device {device}")
x_train = torch.from_numpy(x_train.astype("float32")).to(device)
x_test = torch.from_numpy(x_test.astype("float32")).to(device)
y_train = torch.from_numpy(y_train.astype("float32")).to(device)
y_test = torch.from_numpy(y_test.astype("float32")).to(device)
train_data = TensorDataset(x_train, y_train)
test_data = TensorDataset(x_test, y_test)
batch_size = x_train.shape[0] // 10
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False)
model = WTTERNN(discrete=True)
optimizer = optim.Adam(model.parameters(), lr=0.01)
train_loss_history = []
valid_loss_history = []
for _ in range(epochs):
train_loss = model.fit(optimizer, train_loader, device=device)
train_loss_history.append(train_loss)
valid_loss = model.score(test_loader, device=device)
valid_loss_history.append(valid_loss)
return model, train_loss_history, valid_loss_history
# %%
device = "cuda:0" if torch.cuda.is_available() else "cpu"
model, train_losses, valid_losses = run(x_train, y_train, x_test, y_test, epochs=60, device=device)
# %%
plt.plot(train_losses, label="training")
plt.plot(valid_losses, label="validation")
plt.title("loss")
plt.legend()
# %% [markdown]
# # Predictions
# Try out training the model with different levels of noise. With more noise confidence gets lower (smaller beta). With less noise beta goes to maximum value and the predicted mode/peak probability is centered around the actual TTE.
# %%
# Make some parametric predictions
print("TESTING (no noise in features)")
print("(each horizontal line is a sequence)")
predicted = model(torch.from_numpy(x_test.astype("float32")).to(device)).detach().cpu().numpy()
print(predicted.shape)
plt.imshow(predicted[:, :, 0], interpolation="none", cmap="jet", aspect="auto")
plt.title("predicted[:,:,0] (alpha)")
plt.colorbar(orientation="horizontal")
plt.show()
plt.imshow(predicted[:, :, 1], interpolation="none", cmap="jet", aspect="auto")
plt.title("predicted[:,:,1] (beta)")
plt.colorbar(orientation="horizontal")
plt.show()
print("TRAINING (Noisy features)")
predicted = (
model(torch.from_numpy(x_train[:every_nth, :, :].astype("float32")).to(device))
.detach()
.cpu()
.numpy()
)
print(predicted.shape)
plt.imshow(predicted[:, :, 0], interpolation="none", cmap="jet", aspect="auto")
plt.title("predicted[:,:,0] (alpha)")
plt.colorbar(orientation="horizontal")
plt.show()
plt.imshow(predicted[:, :, 1], interpolation="none", cmap="jet", aspect="auto")
plt.title("predicted[:,:,1] (beta)")
plt.colorbar(orientation="horizontal")
plt.show()
| [
"torch_wtte.losses.WeibullCensoredNLLLoss",
"torch.nn.Tanh",
"matplotlib.pyplot.ylabel",
"torch_wtte.losses.WeibullActivation",
"numpy.log",
"numpy.nanmean",
"torch.cuda.is_available",
"sys.path.append",
"numpy.random.binomial",
"torch.nn.GRU",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.xl... | [((799, 820), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (814, 820), False, 'import sys\n'), ((852, 870), 'numpy.random.seed', 'np.random.seed', (['(11)'], {}), '(11)\n', (866, 870), True, 'import numpy as np\n'), ((871, 892), 'torch.manual_seed', 'torch.manual_seed', (['(11)'], {}), '(11)\n', (888, 892), False, 'import torch\n'), ((4459, 4517), 'matplotlib.pyplot.title', 'plt.title', (['"""x_test (lagged/deterministic event indicator)"""'], {}), "('x_test (lagged/deterministic event indicator)')\n", (4468, 4517), True, 'import matplotlib.pyplot as plt\n'), ((4518, 4528), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4526, 4528), True, 'import matplotlib.pyplot as plt\n'), ((4529, 4605), 'matplotlib.pyplot.imshow', 'plt.imshow', (['y_test[:, :, 0]'], {'interpolation': '"""none"""', 'cmap': '"""jet"""', 'aspect': '"""auto"""'}), "(y_test[:, :, 0], interpolation='none', cmap='jet', aspect='auto')\n", (4539, 4605), True, 'import matplotlib.pyplot as plt\n'), ((4606, 4643), 'matplotlib.pyplot.title', 'plt.title', (['"""y_test[:,:,0] actual tte"""'], {}), "('y_test[:,:,0] actual tte')\n", (4615, 4643), True, 'import matplotlib.pyplot as plt\n'), ((4644, 4654), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4652, 4654), True, 'import matplotlib.pyplot as plt\n'), ((4816, 4883), 'matplotlib.pyplot.title', 'plt.title', (['"""x_train[:every_nth,:,0] (lagged/noisy event indicator)"""'], {}), "('x_train[:every_nth,:,0] (lagged/noisy event indicator)')\n", (4825, 4883), True, 'import matplotlib.pyplot as plt\n'), ((4884, 4894), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4892, 4894), True, 'import matplotlib.pyplot as plt\n'), ((4895, 4985), 'matplotlib.pyplot.imshow', 'plt.imshow', (['y_train[:every_nth, :, 0]'], {'interpolation': '"""none"""', 'cmap': '"""jet"""', 'aspect': '"""auto"""'}), "(y_train[:every_nth, :, 0], interpolation='none', cmap='jet',\n aspect='auto')\n", (4905, 4985), True, 'import matplotlib.pyplot as plt\n'), ((4982, 5031), 'matplotlib.pyplot.title', 'plt.title', (['"""y_train[:every_nth,:,0] censored tte"""'], {}), "('y_train[:every_nth,:,0] censored tte')\n", (4991, 5031), True, 'import matplotlib.pyplot as plt\n'), ((5032, 5042), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5040, 5042), True, 'import matplotlib.pyplot as plt\n'), ((5043, 5136), 'matplotlib.pyplot.imshow', 'plt.imshow', (['y_train[:every_nth, :, 1]'], {'interpolation': '"""none"""', 'cmap': '"""Accent"""', 'aspect': '"""auto"""'}), "(y_train[:every_nth, :, 1], interpolation='none', cmap='Accent',\n aspect='auto')\n", (5053, 5136), True, 'import matplotlib.pyplot as plt\n'), ((5133, 5197), 'matplotlib.pyplot.title', 'plt.title', (['"""y_train[:every_nth,:,1] u (non-censoring indicator)"""'], {}), "('y_train[:every_nth,:,1] u (non-censoring indicator)')\n", (5142, 5197), True, 'import matplotlib.pyplot as plt\n'), ((5198, 5208), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5206, 5208), True, 'import matplotlib.pyplot as plt\n'), ((5248, 5394), 'matplotlib.pyplot.plot', 'plt.plot', (['y_train[every_nth // 4, :, 0]'], {'label': '"""censored tte (train)"""', 'color': '"""black"""', 'linestyle': '"""dashed"""', 'linewidth': '(2)', 'drawstyle': '"""steps-post"""'}), "(y_train[every_nth // 4, :, 0], label='censored tte (train)', color\n ='black', linestyle='dashed', linewidth=2, drawstyle='steps-post')\n", (5256, 5394), True, 'import matplotlib.pyplot as plt\n'), ((5417, 5558), 'matplotlib.pyplot.plot', 'plt.plot', (['y_test[every_nth // 4, :, 0]'], {'label': '"""actual tte (test)"""', 'color': '"""black"""', 'linestyle': '"""solid"""', 'linewidth': '(2)', 'drawstyle': '"""steps-post"""'}), "(y_test[every_nth // 4, :, 0], label='actual tte (test)', color=\n 'black', linestyle='solid', linewidth=2, drawstyle='steps-post')\n", (5425, 5558), True, 'import matplotlib.pyplot as plt\n'), ((5582, 5606), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'n_timesteps'], {}), '(0, n_timesteps)\n', (5590, 5606), True, 'import matplotlib.pyplot as plt\n'), ((5607, 5625), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (5617, 5625), True, 'import matplotlib.pyplot as plt\n'), ((5626, 5653), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""time to event"""'], {}), "('time to event')\n", (5636, 5653), True, 'import matplotlib.pyplot as plt\n'), ((5654, 5679), 'matplotlib.pyplot.title', 'plt.title', (['"""Example TTEs"""'], {}), "('Example TTEs')\n", (5663, 5679), True, 'import matplotlib.pyplot as plt\n'), ((5680, 5742), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '(2)', 'borderaxespad': '(0.0)'}), '(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)\n', (5690, 5742), True, 'import matplotlib.pyplot as plt\n'), ((5743, 5753), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5751, 5753), True, 'import matplotlib.pyplot as plt\n'), ((6552, 6580), 'numpy.nanmean', 'np.nanmean', (['y_train[:, :, 0]'], {}), '(y_train[:, :, 0])\n', (6562, 6580), True, 'import numpy as np\n'), ((6653, 6681), 'numpy.nanmean', 'np.nanmean', (['y_train[:, :, 1]'], {}), '(y_train[:, :, 1])\n', (6663, 6681), True, 'import numpy as np\n'), ((10888, 10928), 'matplotlib.pyplot.plot', 'plt.plot', (['train_losses'], {'label': '"""training"""'}), "(train_losses, label='training')\n", (10896, 10928), True, 'import matplotlib.pyplot as plt\n'), ((10929, 10971), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_losses'], {'label': '"""validation"""'}), "(valid_losses, label='validation')\n", (10937, 10971), True, 'import matplotlib.pyplot as plt\n'), ((10972, 10989), 'matplotlib.pyplot.title', 'plt.title', (['"""loss"""'], {}), "('loss')\n", (10981, 10989), True, 'import matplotlib.pyplot as plt\n'), ((10990, 11002), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11000, 11002), True, 'import matplotlib.pyplot as plt\n'), ((11516, 11595), 'matplotlib.pyplot.imshow', 'plt.imshow', (['predicted[:, :, 0]'], {'interpolation': '"""none"""', 'cmap': '"""jet"""', 'aspect': '"""auto"""'}), "(predicted[:, :, 0], interpolation='none', cmap='jet', aspect='auto')\n", (11526, 11595), True, 'import matplotlib.pyplot as plt\n'), ((11596, 11633), 'matplotlib.pyplot.title', 'plt.title', (['"""predicted[:,:,0] (alpha)"""'], {}), "('predicted[:,:,0] (alpha)')\n", (11605, 11633), True, 'import matplotlib.pyplot as plt\n'), ((11634, 11672), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'orientation': '"""horizontal"""'}), "(orientation='horizontal')\n", (11646, 11672), True, 'import matplotlib.pyplot as plt\n'), ((11673, 11683), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11681, 11683), True, 'import matplotlib.pyplot as plt\n'), ((11684, 11763), 'matplotlib.pyplot.imshow', 'plt.imshow', (['predicted[:, :, 1]'], {'interpolation': '"""none"""', 'cmap': '"""jet"""', 'aspect': '"""auto"""'}), "(predicted[:, :, 1], interpolation='none', cmap='jet', aspect='auto')\n", (11694, 11763), True, 'import matplotlib.pyplot as plt\n'), ((11764, 11800), 'matplotlib.pyplot.title', 'plt.title', (['"""predicted[:,:,1] (beta)"""'], {}), "('predicted[:,:,1] (beta)')\n", (11773, 11800), True, 'import matplotlib.pyplot as plt\n'), ((11801, 11839), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'orientation': '"""horizontal"""'}), "(orientation='horizontal')\n", (11813, 11839), True, 'import matplotlib.pyplot as plt\n'), ((11840, 11850), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11848, 11850), True, 'import matplotlib.pyplot as plt\n'), ((12049, 12128), 'matplotlib.pyplot.imshow', 'plt.imshow', (['predicted[:, :, 0]'], {'interpolation': '"""none"""', 'cmap': '"""jet"""', 'aspect': '"""auto"""'}), "(predicted[:, :, 0], interpolation='none', cmap='jet', aspect='auto')\n", (12059, 12128), True, 'import matplotlib.pyplot as plt\n'), ((12129, 12166), 'matplotlib.pyplot.title', 'plt.title', (['"""predicted[:,:,0] (alpha)"""'], {}), "('predicted[:,:,0] (alpha)')\n", (12138, 12166), True, 'import matplotlib.pyplot as plt\n'), ((12167, 12205), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'orientation': '"""horizontal"""'}), "(orientation='horizontal')\n", (12179, 12205), True, 'import matplotlib.pyplot as plt\n'), ((12206, 12216), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12214, 12216), True, 'import matplotlib.pyplot as plt\n'), ((12217, 12296), 'matplotlib.pyplot.imshow', 'plt.imshow', (['predicted[:, :, 1]'], {'interpolation': '"""none"""', 'cmap': '"""jet"""', 'aspect': '"""auto"""'}), "(predicted[:, :, 1], interpolation='none', cmap='jet', aspect='auto')\n", (12227, 12296), True, 'import matplotlib.pyplot as plt\n'), ((12297, 12333), 'matplotlib.pyplot.title', 'plt.title', (['"""predicted[:,:,1] (beta)"""'], {}), "('predicted[:,:,1] (beta)')\n", (12306, 12333), True, 'import matplotlib.pyplot as plt\n'), ((12334, 12372), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'orientation': '"""horizontal"""'}), "(orientation='horizontal')\n", (12346, 12372), True, 'import matplotlib.pyplot as plt\n'), ((12373, 12383), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12381, 12383), True, 'import matplotlib.pyplot as plt\n'), ((2501, 2541), 'numpy.append', 'np.append', (['tte_censored', 'u_train'], {'axis': '(2)'}), '(tte_censored, u_train, axis=2)\n', (2510, 2541), True, 'import numpy as np\n'), ((2587, 2631), 'numpy.ones', 'np.ones', ([], {'shape': '(n_sequences, n_timesteps, 1)'}), '(shape=(n_sequences, n_timesteps, 1))\n', (2594, 2631), True, 'import numpy as np\n'), ((2645, 2661), 'numpy.copy', 'np.copy', (['x_train'], {}), '(x_train)\n', (2652, 2661), True, 'import numpy as np\n'), ((2742, 2779), 'numpy.append', 'np.append', (['tte_actual', 'u_test'], {'axis': '(2)'}), '(tte_actual, u_test, axis=2)\n', (2751, 2779), True, 'import numpy as np\n'), ((3185, 3243), 'numpy.zeros', 'np.zeros', (['[x_train.shape[0], x_train.shape[1], n_features]'], {}), '([x_train.shape[0], x_train.shape[1], n_features])\n', (3193, 3243), True, 'import numpy as np\n'), ((3261, 3317), 'numpy.zeros', 'np.zeros', (['[x_test.shape[0], x_test.shape[1], n_features]'], {}), '([x_test.shape[0], x_test.shape[1], n_features])\n', (3269, 3317), True, 'import numpy as np\n'), ((3570, 3624), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'noise_level'], {'size': 'x_train.shape'}), '(1, noise_level, size=x_train.shape)\n', (3588, 3624), True, 'import numpy as np\n'), ((6601, 6643), 'numpy.log', 'np.log', (['(1.0 - 1.0 / (tte_mean_train + 1.0))'], {}), '(1.0 - 1.0 / (tte_mean_train + 1.0))\n', (6607, 6643), True, 'import numpy as np\n'), ((9985, 10016), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x_train', 'y_train'], {}), '(x_train, y_train)\n', (9998, 10016), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((10033, 10062), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x_test', 'y_test'], {}), '(x_test, y_test)\n', (10046, 10062), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((10122, 10181), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(train_data, batch_size=batch_size, shuffle=True)\n', (10132, 10181), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((10200, 10259), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(test_data, batch_size=batch_size, shuffle=False)\n', (10210, 10259), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((10744, 10769), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10767, 10769), False, 'import torch\n'), ((1716, 1739), 'numpy.copy', 'np.copy', (['(events.T * 1.0)'], {}), '(events.T * 1.0)\n', (1723, 1739), True, 'import numpy as np\n'), ((1761, 1788), 'numpy.copy', 'np.copy', (['(tte_actual.T * 1.0)'], {}), '(tte_actual.T * 1.0)\n', (1768, 1788), True, 'import numpy as np\n'), ((1812, 1841), 'numpy.copy', 'np.copy', (['(tte_censored.T * 1.0)'], {}), '(tte_censored.T * 1.0)\n', (1819, 1841), True, 'import numpy as np\n'), ((1862, 1888), 'numpy.copy', 'np.copy', (['(was_event.T * 1.0)'], {}), '(was_event.T * 1.0)\n', (1869, 1888), True, 'import numpy as np\n'), ((2855, 2870), 'numpy.copy', 'np.copy', (['x_test'], {}), '(x_test)\n', (2862, 2870), True, 'import numpy as np\n'), ((2889, 2904), 'numpy.copy', 'np.copy', (['y_test'], {}), '(y_test)\n', (2896, 2904), True, 'import numpy as np\n'), ((3046, 3075), 'numpy.tile', 'np.tile', (['x_train.T', 'n_repeats'], {}), '(x_train.T, n_repeats)\n', (3053, 3075), True, 'import numpy as np\n'), ((3092, 3121), 'numpy.tile', 'np.tile', (['y_train.T', 'n_repeats'], {}), '(y_train.T, n_repeats)\n', (3099, 3121), True, 'import numpy as np\n'), ((7556, 7604), 'torch_wtte.losses.WeibullCensoredNLLLoss', 'losses.WeibullCensoredNLLLoss', ([], {'discrete': 'discrete'}), '(discrete=discrete)\n', (7585, 7604), False, 'from torch_wtte import losses\n'), ((1916, 1944), 'numpy.copy', 'np.copy', (['(is_censored.T * 1.0)'], {}), '(is_censored.T * 1.0)\n', (1923, 1944), True, 'import numpy as np\n'), ((8648, 8668), 'numpy.sum', 'np.sum', (['train_losses'], {}), '(train_losses)\n', (8654, 8668), True, 'import numpy as np\n'), ((8917, 8932), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8930, 8932), False, 'import torch\n'), ((7337, 7399), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': 'n_features', 'hidden_size': '(2)', 'batch_first': '(True)'}), '(input_size=n_features, hidden_size=2, batch_first=True)\n', (7343, 7399), False, 'from torch import nn, optim\n'), ((7417, 7426), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (7424, 7426), False, 'from torch import nn, optim\n'), ((7444, 7505), 'torch_wtte.losses.WeibullActivation', 'losses.WeibullActivation', ([], {'init_alpha': 'init_alpha', 'max_beta': '(4.0)'}), '(init_alpha=init_alpha, max_beta=4.0)\n', (7468, 7505), False, 'from torch_wtte import losses\n')] |
import numpy as np
from points import Points
from dataloader import loader
def distance(p1, p2):
return np.sum((p1 - p2) ** 2)
def initial_cluster(data, k):
'''
initialized the centers for K-means++
inputs:
data - numpy array
k - number of clusters
'''
centers = []
centers_indices = []
size = data.shape[0]
dist = np.zeros(size)
indices = np.arange(size)
# plot(data, np.array(centers))
first_center_id = np.random.choice(indices, 1)[0]
first_center = data[first_center_id]
centers_indices.append(first_center_id)
centers.append(first_center)
for nnn in range(k - 1):
for i in range(size):
dist[i] = min(distance(c, data[i]) for c in centers) # Improvement can be done here
weights = dist / sum(dist)
## select data point with maximum distance as our next centroid
next_center_id = np.random.choice(indices, 1, p=weights)[0]
next_center = data[next_center_id]
centers_indices.append(next_center_id)
centers.append(next_center)
# plot(data, np.array(centers))
return centers, centers_indices
def _compute_sigma_x(points, centers):
size = points.shape[0]
dist = np.zeros(size)
assign = np.zeros(size, dtype=np.int) # array to store which center was assigned to each point
cluster_size = np.zeros(len(centers)) # dict to store how many points in clusters of every center
for i in range(size):
cur_dis = np.array([distance(c, points[i]) for c in centers])
center_id = np.argmin(cur_dis) # belonged center id for this point
dist[i] = cur_dis[center_id]
assign[i] = center_id
cluster_size[center_id] += 1
c_apx_x = np.array([cluster_size[c] for c in assign])
total_sum = dist.sum()
sigma_x = dist / total_sum + 1 / c_apx_x
return sigma_x
def compute_coreset(points, k, N):
'''
Implement the core algorithm of generation of coreset
:param points:weighted points
:param k: the amount of initialized centers, caculated by k-means++ method
:param N: size of coreset
:return: coreset that generated from points
'''
data_size, dimension = points.shape
assert data_size > N, 'Setting size of coreset is greater or equal to the original data size, please alter it'
centers, _ = initial_cluster(points, k)
sigma_x = _compute_sigma_x(points, centers)
prob_x = sigma_x / sum(sigma_x)
weights_x = 1 / (N * prob_x)
samples_idx = np.random.choice(np.arange(data_size), N, p=prob_x)
samples = np.take(points, samples_idx, axis=0)
weights = np.take(weights_x, samples_idx, axis=0)
coreset = Points(N, dimension)
coreset.fill_points(samples, weights)
return coreset
if __name__ == '__main__':
#data = np.random.randint(0, 100, (10000, 8))
data = loader(filename='hayes-roth.csv')
centers, ids = initial_cluster(data, 5)
coreset = compute_coreset(data, 5, 50)
print(centers)
print(ids)
print(coreset.get_values())
print(coreset.get_weights())
| [
"numpy.random.choice",
"numpy.take",
"numpy.array",
"numpy.sum",
"numpy.zeros",
"dataloader.loader",
"numpy.argmin",
"points.Points",
"numpy.arange"
] | [((109, 131), 'numpy.sum', 'np.sum', (['((p1 - p2) ** 2)'], {}), '((p1 - p2) ** 2)\n', (115, 131), True, 'import numpy as np\n'), ((370, 384), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (378, 384), True, 'import numpy as np\n'), ((399, 414), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (408, 414), True, 'import numpy as np\n'), ((1250, 1264), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (1258, 1264), True, 'import numpy as np\n'), ((1278, 1306), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.int'}), '(size, dtype=np.int)\n', (1286, 1306), True, 'import numpy as np\n'), ((1759, 1802), 'numpy.array', 'np.array', (['[cluster_size[c] for c in assign]'], {}), '([cluster_size[c] for c in assign])\n', (1767, 1802), True, 'import numpy as np\n'), ((2601, 2637), 'numpy.take', 'np.take', (['points', 'samples_idx'], {'axis': '(0)'}), '(points, samples_idx, axis=0)\n', (2608, 2637), True, 'import numpy as np\n'), ((2652, 2691), 'numpy.take', 'np.take', (['weights_x', 'samples_idx'], {'axis': '(0)'}), '(weights_x, samples_idx, axis=0)\n', (2659, 2691), True, 'import numpy as np\n'), ((2706, 2726), 'points.Points', 'Points', (['N', 'dimension'], {}), '(N, dimension)\n', (2712, 2726), False, 'from points import Points\n'), ((2879, 2912), 'dataloader.loader', 'loader', ([], {'filename': '"""hayes-roth.csv"""'}), "(filename='hayes-roth.csv')\n", (2885, 2912), False, 'from dataloader import loader\n'), ((474, 502), 'numpy.random.choice', 'np.random.choice', (['indices', '(1)'], {}), '(indices, 1)\n', (490, 502), True, 'import numpy as np\n'), ((1584, 1602), 'numpy.argmin', 'np.argmin', (['cur_dis'], {}), '(cur_dis)\n', (1593, 1602), True, 'import numpy as np\n'), ((2552, 2572), 'numpy.arange', 'np.arange', (['data_size'], {}), '(data_size)\n', (2561, 2572), True, 'import numpy as np\n'), ((924, 963), 'numpy.random.choice', 'np.random.choice', (['indices', '(1)'], {'p': 'weights'}), '(indices, 1, p=weights)\n', (940, 963), True, 'import numpy as np\n')] |
import itertools
import pytest
import numpy as np
import mmu
from mmu.commons._testing import generate_test_labels
from mmu.commons._testing import compute_reference_metrics
Y_DTYPES = [
bool,
np.bool_,
int,
np.int32,
np.int64,
float,
np.float32,
np.float64,
]
YHAT_DTYPES = [
bool,
np.bool_,
int,
np.int32,
np.int64,
float,
np.float32,
np.float64,
]
PROBA_DTYPES = [
float,
np.float32,
np.float64,
]
def test_binary_metrics_yhat():
"""Test confusion_matrix int64"""
for y_dtype, yhat_dtype in itertools.product(Y_DTYPES, YHAT_DTYPES):
_, yhat, y = generate_test_labels(
N=1000,
y_dtype=y_dtype,
yhat_dtype=yhat_dtype
)
sk_conf_mat, sk_metrics = compute_reference_metrics(y, yhat=yhat)
conf_mat, metrics = mmu.binary_metrics(y, yhat)
assert np.array_equal(conf_mat, sk_conf_mat), (
f"test failed for dtypes: {y_dtype}, {yhat_dtype}"
)
assert np.allclose(metrics, sk_metrics), (
f"test failed for dtypes: {y_dtype}, {yhat_dtype}"
)
def test_binary_metrics_yhat_shapes():
"""Check if different shapes are handled correctly."""
_, yhat, y = generate_test_labels(1000)
sk_conf_mat, sk_metrics = compute_reference_metrics(y, yhat=yhat)
y_shapes = [y, y[None, :], y[:, None]]
yhat_shapes = [yhat, yhat[None, :], yhat[:, None]]
for y_, yhat_ in itertools.product(y_shapes, yhat_shapes):
conf_mat, metrics = mmu.binary_metrics(y_, yhat_)
assert np.array_equal(conf_mat, sk_conf_mat), (
f"test failed for dtypes: {y_.shape}, {yhat_.shape}"
)
assert np.allclose(metrics, sk_metrics), (
f"test failed for dtypes: {y_.shape}, {yhat_.shape}"
)
# unequal length
with pytest.raises(ValueError):
mmu.binary_metrics(y, yhat[:100])
with pytest.raises(ValueError):
mmu.binary_metrics(y[:100], yhat)
# 2d with more than one row/column for the second dimension or 3d
y_shapes = [
np.tile(y[:, None], 2),
np.tile(y[None, :], (2, 1)),
]
yhat_shapes = [
np.tile(yhat[:, None], 2),
np.tile(yhat[None, :], (2, 1)),
]
for y_, yhat_ in itertools.product(y_shapes, yhat_shapes):
with pytest.raises(ValueError):
mmu.binary_metrics(y_, yhat_)
def test_binary_metrics_order():
"""Check that different orders and shapes are handled correctly."""
_, yhat, y = generate_test_labels(1000)
sk_conf_mat, sk_metrics = compute_reference_metrics(y, yhat=yhat)
y_orders = [
y.copy(order='C'),
y.copy(order='F'),
y[None, :].copy(order='C'),
y[:, None].copy(order='C'),
y[None, :].copy(order='F'),
y[:, None].copy(order='F'),
]
yhat_orders = [
yhat.copy(order='C'),
yhat.copy(order='F'),
yhat[None, :].copy(order='C'),
yhat[:, None].copy(order='C'),
yhat[None, :].copy(order='F'),
yhat[:, None].copy(order='F'),
]
for y_, yhat_ in itertools.product(y_orders, yhat_orders):
conf_mat, metrics = mmu.binary_metrics(y_, yhat_)
assert np.array_equal(conf_mat, sk_conf_mat), (
f"test failed for dtypes: {y_.shape}, {yhat_.shape}"
)
assert np.allclose(metrics, sk_metrics), (
f"test failed for dtypes: {y_.shape}, {yhat_.shape}"
)
def test_binary_metrics_proba():
"""Test confusion_matrix int64"""
thresholds = np.random.uniform(0, 1, 10)
for y_dtype, proba_dtype, threshold in itertools.product(
Y_DTYPES, PROBA_DTYPES, thresholds
):
proba, _, y = generate_test_labels(
N=1000,
y_dtype=y_dtype,
proba_dtype=proba_dtype
)
sk_conf_mat, sk_metrics = compute_reference_metrics(
y, proba=proba, threshold=threshold
)
conf_mat, metrics = mmu.binary_metrics(y, scores=proba, threshold=threshold)
assert np.array_equal(conf_mat, sk_conf_mat), (
f"test failed for dtypes: {y_dtype}, {proba_dtype}"
f" and threshold: {threshold}"
)
assert np.allclose(metrics, sk_metrics), (
f"test failed for dtypes: {y_dtype}, {proba_dtype}"
f" and threshold: {threshold}"
)
# test fill settings
proba, _, y = generate_test_labels(N=1000,)
thresholds = [1e7, 1. - 1e7]
fills = [0.0, 1.0]
threshold = 1e-7
for threshold, fill in itertools.product(thresholds, fills):
conf_mat, metrics = mmu.binary_metrics(
y, scores=proba, threshold=threshold, fill=fill
)
sk_conf_mat, sk_metrics = compute_reference_metrics(
y, proba=proba, threshold=threshold, fill=fill
)
assert np.array_equal(conf_mat, sk_conf_mat), (
f"test failed for threshold: {threshold}, fill: {fill}"
)
assert np.allclose(metrics, sk_metrics), (
f"test failed for threshold: {threshold}, fill: {fill}"
)
def test_binary_metrics_proba_shapes():
"""Check if different shapes are handled correctly."""
proba, _, y = generate_test_labels(1000)
y_shapes = [y, y[None, :], y[:, None]]
proba_shapes = [proba, proba[None, :], proba[:, None]]
sk_conf_mat, sk_metrics = compute_reference_metrics(
y, proba=proba, threshold=0.5
)
for y_, proba_ in itertools.product(y_shapes, proba_shapes):
conf_mat, metrics = mmu.binary_metrics(y_, scores=proba_, threshold=0.5)
assert np.array_equal(conf_mat, sk_conf_mat), (
f"test failed for shapes: {y_.shape}, {proba_.shape}"
)
assert np.allclose(metrics, sk_metrics), (
f"test failed for shapes: {y_.shape}, {proba_.shape}"
)
# unequal length
with pytest.raises(ValueError):
mmu.binary_metrics(y, scores=proba[:100], threshold=0.5)
with pytest.raises(ValueError):
mmu.binary_metrics(y[:100], scores=proba, threshold=0.5)
# 2d with more than one row/column for the second dimension or 3d
y_shapes = [
np.tile(y[:, None], 2),
np.tile(y[None, :], (2, 1)),
]
proba_shapes = [
np.tile(proba[:, None], 2),
np.tile(proba[None, :], (2, 1)),
]
for y_, proba_ in itertools.product(y_shapes, proba_shapes):
with pytest.raises(ValueError):
mmu.binary_metrics(y_, scores=proba_, threshold=0.5)
def test_binary_metrics_proba_order():
"""Check that different orders and shapes are handled correctly."""
proba, _, y = generate_test_labels(1000)
y_orders = [
y.copy(order='C'),
y.copy(order='F'),
y[None, :].copy(order='C'),
y[:, None].copy(order='C'),
y[None, :].copy(order='F'),
y[:, None].copy(order='F'),
]
proba_orders = [
proba.copy(order='C'),
proba.copy(order='F'),
proba[None, :].copy(order='C'),
proba[:, None].copy(order='C'),
proba[None, :].copy(order='F'),
proba[:, None].copy(order='F'),
]
sk_conf_mat, sk_metrics = compute_reference_metrics(
y, proba=proba, threshold=0.5
)
for y_, proba_ in itertools.product(y_orders, proba_orders):
conf_mat, metrics = mmu.binary_metrics(y_, scores=proba_, threshold=0.5)
assert np.array_equal(conf_mat, sk_conf_mat), (
f"test failed for shapes: {y_.shape}, {proba_.shape}"
)
assert np.allclose(metrics, sk_metrics), (
f"test failed for shapes: {y_.shape}, {proba_.shape}"
)
| [
"numpy.tile",
"numpy.allclose",
"mmu.commons._testing.compute_reference_metrics",
"itertools.product",
"numpy.random.uniform",
"numpy.array_equal",
"pytest.raises",
"mmu.binary_metrics",
"mmu.commons._testing.generate_test_labels"
] | [((584, 624), 'itertools.product', 'itertools.product', (['Y_DTYPES', 'YHAT_DTYPES'], {}), '(Y_DTYPES, YHAT_DTYPES)\n', (601, 624), False, 'import itertools\n'), ((1262, 1288), 'mmu.commons._testing.generate_test_labels', 'generate_test_labels', (['(1000)'], {}), '(1000)\n', (1282, 1288), False, 'from mmu.commons._testing import generate_test_labels\n'), ((1319, 1358), 'mmu.commons._testing.compute_reference_metrics', 'compute_reference_metrics', (['y'], {'yhat': 'yhat'}), '(y, yhat=yhat)\n', (1344, 1358), False, 'from mmu.commons._testing import compute_reference_metrics\n'), ((1480, 1520), 'itertools.product', 'itertools.product', (['y_shapes', 'yhat_shapes'], {}), '(y_shapes, yhat_shapes)\n', (1497, 1520), False, 'import itertools\n'), ((2301, 2341), 'itertools.product', 'itertools.product', (['y_shapes', 'yhat_shapes'], {}), '(y_shapes, yhat_shapes)\n', (2318, 2341), False, 'import itertools\n'), ((2549, 2575), 'mmu.commons._testing.generate_test_labels', 'generate_test_labels', (['(1000)'], {}), '(1000)\n', (2569, 2575), False, 'from mmu.commons._testing import generate_test_labels\n'), ((2606, 2645), 'mmu.commons._testing.compute_reference_metrics', 'compute_reference_metrics', (['y'], {'yhat': 'yhat'}), '(y, yhat=yhat)\n', (2631, 2645), False, 'from mmu.commons._testing import compute_reference_metrics\n'), ((3133, 3173), 'itertools.product', 'itertools.product', (['y_orders', 'yhat_orders'], {}), '(y_orders, yhat_orders)\n', (3150, 3173), False, 'import itertools\n'), ((3580, 3607), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (3597, 3607), True, 'import numpy as np\n'), ((3651, 3704), 'itertools.product', 'itertools.product', (['Y_DTYPES', 'PROBA_DTYPES', 'thresholds'], {}), '(Y_DTYPES, PROBA_DTYPES, thresholds)\n', (3668, 3704), False, 'import itertools\n'), ((4449, 4477), 'mmu.commons._testing.generate_test_labels', 'generate_test_labels', ([], {'N': '(1000)'}), '(N=1000)\n', (4469, 4477), False, 'from mmu.commons._testing import generate_test_labels\n'), ((4584, 4620), 'itertools.product', 'itertools.product', (['thresholds', 'fills'], {}), '(thresholds, fills)\n', (4601, 4620), False, 'import itertools\n'), ((5251, 5277), 'mmu.commons._testing.generate_test_labels', 'generate_test_labels', (['(1000)'], {}), '(1000)\n', (5271, 5277), False, 'from mmu.commons._testing import generate_test_labels\n'), ((5411, 5467), 'mmu.commons._testing.compute_reference_metrics', 'compute_reference_metrics', (['y'], {'proba': 'proba', 'threshold': '(0.5)'}), '(y, proba=proba, threshold=0.5)\n', (5436, 5467), False, 'from mmu.commons._testing import compute_reference_metrics\n'), ((5505, 5546), 'itertools.product', 'itertools.product', (['y_shapes', 'proba_shapes'], {}), '(y_shapes, proba_shapes)\n', (5522, 5546), False, 'import itertools\n'), ((6402, 6443), 'itertools.product', 'itertools.product', (['y_shapes', 'proba_shapes'], {}), '(y_shapes, proba_shapes)\n', (6419, 6443), False, 'import itertools\n'), ((6681, 6707), 'mmu.commons._testing.generate_test_labels', 'generate_test_labels', (['(1000)'], {}), '(1000)\n', (6701, 6707), False, 'from mmu.commons._testing import generate_test_labels\n'), ((7210, 7266), 'mmu.commons._testing.compute_reference_metrics', 'compute_reference_metrics', (['y'], {'proba': 'proba', 'threshold': '(0.5)'}), '(y, proba=proba, threshold=0.5)\n', (7235, 7266), False, 'from mmu.commons._testing import compute_reference_metrics\n'), ((7304, 7345), 'itertools.product', 'itertools.product', (['y_orders', 'proba_orders'], {}), '(y_orders, proba_orders)\n', (7321, 7345), False, 'import itertools\n'), ((647, 715), 'mmu.commons._testing.generate_test_labels', 'generate_test_labels', ([], {'N': '(1000)', 'y_dtype': 'y_dtype', 'yhat_dtype': 'yhat_dtype'}), '(N=1000, y_dtype=y_dtype, yhat_dtype=yhat_dtype)\n', (667, 715), False, 'from mmu.commons._testing import generate_test_labels\n'), ((796, 835), 'mmu.commons._testing.compute_reference_metrics', 'compute_reference_metrics', (['y'], {'yhat': 'yhat'}), '(y, yhat=yhat)\n', (821, 835), False, 'from mmu.commons._testing import compute_reference_metrics\n'), ((865, 892), 'mmu.binary_metrics', 'mmu.binary_metrics', (['y', 'yhat'], {}), '(y, yhat)\n', (883, 892), False, 'import mmu\n'), ((908, 945), 'numpy.array_equal', 'np.array_equal', (['conf_mat', 'sk_conf_mat'], {}), '(conf_mat, sk_conf_mat)\n', (922, 945), True, 'import numpy as np\n'), ((1037, 1069), 'numpy.allclose', 'np.allclose', (['metrics', 'sk_metrics'], {}), '(metrics, sk_metrics)\n', (1048, 1069), True, 'import numpy as np\n'), ((1550, 1579), 'mmu.binary_metrics', 'mmu.binary_metrics', (['y_', 'yhat_'], {}), '(y_, yhat_)\n', (1568, 1579), False, 'import mmu\n'), ((1595, 1632), 'numpy.array_equal', 'np.array_equal', (['conf_mat', 'sk_conf_mat'], {}), '(conf_mat, sk_conf_mat)\n', (1609, 1632), True, 'import numpy as np\n'), ((1726, 1758), 'numpy.allclose', 'np.allclose', (['metrics', 'sk_metrics'], {}), '(metrics, sk_metrics)\n', (1737, 1758), True, 'import numpy as np\n'), ((1868, 1893), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1881, 1893), False, 'import pytest\n'), ((1903, 1936), 'mmu.binary_metrics', 'mmu.binary_metrics', (['y', 'yhat[:100]'], {}), '(y, yhat[:100])\n', (1921, 1936), False, 'import mmu\n'), ((1946, 1971), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1959, 1971), False, 'import pytest\n'), ((1981, 2014), 'mmu.binary_metrics', 'mmu.binary_metrics', (['y[:100]', 'yhat'], {}), '(y[:100], yhat)\n', (1999, 2014), False, 'import mmu\n'), ((2111, 2133), 'numpy.tile', 'np.tile', (['y[:, None]', '(2)'], {}), '(y[:, None], 2)\n', (2118, 2133), True, 'import numpy as np\n'), ((2143, 2170), 'numpy.tile', 'np.tile', (['y[None, :]', '(2, 1)'], {}), '(y[None, :], (2, 1))\n', (2150, 2170), True, 'import numpy as np\n'), ((2207, 2232), 'numpy.tile', 'np.tile', (['yhat[:, None]', '(2)'], {}), '(yhat[:, None], 2)\n', (2214, 2232), True, 'import numpy as np\n'), ((2242, 2272), 'numpy.tile', 'np.tile', (['yhat[None, :]', '(2, 1)'], {}), '(yhat[None, :], (2, 1))\n', (2249, 2272), True, 'import numpy as np\n'), ((3203, 3232), 'mmu.binary_metrics', 'mmu.binary_metrics', (['y_', 'yhat_'], {}), '(y_, yhat_)\n', (3221, 3232), False, 'import mmu\n'), ((3248, 3285), 'numpy.array_equal', 'np.array_equal', (['conf_mat', 'sk_conf_mat'], {}), '(conf_mat, sk_conf_mat)\n', (3262, 3285), True, 'import numpy as np\n'), ((3379, 3411), 'numpy.allclose', 'np.allclose', (['metrics', 'sk_metrics'], {}), '(metrics, sk_metrics)\n', (3390, 3411), True, 'import numpy as np\n'), ((3742, 3812), 'mmu.commons._testing.generate_test_labels', 'generate_test_labels', ([], {'N': '(1000)', 'y_dtype': 'y_dtype', 'proba_dtype': 'proba_dtype'}), '(N=1000, y_dtype=y_dtype, proba_dtype=proba_dtype)\n', (3762, 3812), False, 'from mmu.commons._testing import generate_test_labels\n'), ((3893, 3955), 'mmu.commons._testing.compute_reference_metrics', 'compute_reference_metrics', (['y'], {'proba': 'proba', 'threshold': 'threshold'}), '(y, proba=proba, threshold=threshold)\n', (3918, 3955), False, 'from mmu.commons._testing import compute_reference_metrics\n'), ((4007, 4063), 'mmu.binary_metrics', 'mmu.binary_metrics', (['y'], {'scores': 'proba', 'threshold': 'threshold'}), '(y, scores=proba, threshold=threshold)\n', (4025, 4063), False, 'import mmu\n'), ((4079, 4116), 'numpy.array_equal', 'np.array_equal', (['conf_mat', 'sk_conf_mat'], {}), '(conf_mat, sk_conf_mat)\n', (4093, 4116), True, 'import numpy as np\n'), ((4252, 4284), 'numpy.allclose', 'np.allclose', (['metrics', 'sk_metrics'], {}), '(metrics, sk_metrics)\n', (4263, 4284), True, 'import numpy as np\n'), ((4650, 4717), 'mmu.binary_metrics', 'mmu.binary_metrics', (['y'], {'scores': 'proba', 'threshold': 'threshold', 'fill': 'fill'}), '(y, scores=proba, threshold=threshold, fill=fill)\n', (4668, 4717), False, 'import mmu\n'), ((4774, 4847), 'mmu.commons._testing.compute_reference_metrics', 'compute_reference_metrics', (['y'], {'proba': 'proba', 'threshold': 'threshold', 'fill': 'fill'}), '(y, proba=proba, threshold=threshold, fill=fill)\n', (4799, 4847), False, 'from mmu.commons._testing import compute_reference_metrics\n'), ((4885, 4922), 'numpy.array_equal', 'np.array_equal', (['conf_mat', 'sk_conf_mat'], {}), '(conf_mat, sk_conf_mat)\n', (4899, 4922), True, 'import numpy as np\n'), ((5019, 5051), 'numpy.allclose', 'np.allclose', (['metrics', 'sk_metrics'], {}), '(metrics, sk_metrics)\n', (5030, 5051), True, 'import numpy as np\n'), ((5576, 5628), 'mmu.binary_metrics', 'mmu.binary_metrics', (['y_'], {'scores': 'proba_', 'threshold': '(0.5)'}), '(y_, scores=proba_, threshold=0.5)\n', (5594, 5628), False, 'import mmu\n'), ((5644, 5681), 'numpy.array_equal', 'np.array_equal', (['conf_mat', 'sk_conf_mat'], {}), '(conf_mat, sk_conf_mat)\n', (5658, 5681), True, 'import numpy as np\n'), ((5776, 5808), 'numpy.allclose', 'np.allclose', (['metrics', 'sk_metrics'], {}), '(metrics, sk_metrics)\n', (5787, 5808), True, 'import numpy as np\n'), ((5919, 5944), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5932, 5944), False, 'import pytest\n'), ((5954, 6010), 'mmu.binary_metrics', 'mmu.binary_metrics', (['y'], {'scores': 'proba[:100]', 'threshold': '(0.5)'}), '(y, scores=proba[:100], threshold=0.5)\n', (5972, 6010), False, 'import mmu\n'), ((6020, 6045), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6033, 6045), False, 'import pytest\n'), ((6055, 6111), 'mmu.binary_metrics', 'mmu.binary_metrics', (['y[:100]'], {'scores': 'proba', 'threshold': '(0.5)'}), '(y[:100], scores=proba, threshold=0.5)\n', (6073, 6111), False, 'import mmu\n'), ((6208, 6230), 'numpy.tile', 'np.tile', (['y[:, None]', '(2)'], {}), '(y[:, None], 2)\n', (6215, 6230), True, 'import numpy as np\n'), ((6240, 6267), 'numpy.tile', 'np.tile', (['y[None, :]', '(2, 1)'], {}), '(y[None, :], (2, 1))\n', (6247, 6267), True, 'import numpy as np\n'), ((6305, 6331), 'numpy.tile', 'np.tile', (['proba[:, None]', '(2)'], {}), '(proba[:, None], 2)\n', (6312, 6331), True, 'import numpy as np\n'), ((6341, 6372), 'numpy.tile', 'np.tile', (['proba[None, :]', '(2, 1)'], {}), '(proba[None, :], (2, 1))\n', (6348, 6372), True, 'import numpy as np\n'), ((7375, 7427), 'mmu.binary_metrics', 'mmu.binary_metrics', (['y_'], {'scores': 'proba_', 'threshold': '(0.5)'}), '(y_, scores=proba_, threshold=0.5)\n', (7393, 7427), False, 'import mmu\n'), ((7443, 7480), 'numpy.array_equal', 'np.array_equal', (['conf_mat', 'sk_conf_mat'], {}), '(conf_mat, sk_conf_mat)\n', (7457, 7480), True, 'import numpy as np\n'), ((7575, 7607), 'numpy.allclose', 'np.allclose', (['metrics', 'sk_metrics'], {}), '(metrics, sk_metrics)\n', (7586, 7607), True, 'import numpy as np\n'), ((2356, 2381), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2369, 2381), False, 'import pytest\n'), ((2395, 2424), 'mmu.binary_metrics', 'mmu.binary_metrics', (['y_', 'yhat_'], {}), '(y_, yhat_)\n', (2413, 2424), False, 'import mmu\n'), ((6458, 6483), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6471, 6483), False, 'import pytest\n'), ((6497, 6549), 'mmu.binary_metrics', 'mmu.binary_metrics', (['y_'], {'scores': 'proba_', 'threshold': '(0.5)'}), '(y_, scores=proba_, threshold=0.5)\n', (6515, 6549), False, 'import mmu\n')] |
# import libraries
import os
import numpy as np
import pandas as pd
import xarray as xray
import ftplib, wget, urllib
import dask as da
from dask.diagnostics import ProgressBar
from multiprocessing.pool import ThreadPool
import matplotlib.pyplot as plt
import shapely.ops
from shapely.geometry import box, Polygon
from mpl_toolkits.basemap import Basemap
import geopandas as gpd
import ogh
import landlab.grid.raster as r
def compile_x_wrfnnrp_raw_Salathe2014_locations(time_increments):
"""
Compile a list of file URLs for Salathe et al., 2014 raw WRF NNRP data
time_increments: (list) a list of dates that identify each netcdf file
"""
locations = []
domain = 'http://cses.washington.edu'
subdomain = 'rocinante/WRF/NNRP/vic_16d/PNW_1970_1999/WRF_NNRP_noBC/netcdf_daily'
for ind, yearmo in enumerate(time_increments):
basename = 'WRF_NNRP_noBC.{0}.nc'.format(yearmo)
url = os.path.join(domain, subdomain, basename)
locations.append(url)
return(locations)
def compile_x_dailymet_Livneh2013_raw_locations(time_increments):
"""
Compile a list of file URLs for Livneh et al., 2013 raw MET data
time_increments: (list) a list of dates that identify each netcdf file
"""
locations = []
domain = 'ftp://livnehpublicstorage.colorado.edu'
subdomain = 'public/Livneh.2013.CONUS.Dataset/Meteorology.nc.v.1.2.1915.2011.bz2'
for ind, yearmo in enumerate(time_increments):
if yearmo.startswith('1915') or (yearmo == '191601'):
basename = 'Meteorology_Livneh_CONUSExt_v.1.2_2013.{0}.nc'.format(yearmo)
else:
basename = 'Meteorology_Livneh_CONUSExt_v.1.2_2013.{0}.nc.bz2'.format(yearmo)
url = os.path.join(domain, subdomain, basename)
locations.append(url)
return(locations)
def wget_x_download_spSubset(fileurl,
spatialbounds,
file_prefix='sp_',
rename_timelatlong_names={'LAT': 'LAT', 'LON': 'LON'},
replace_file=True):
"""
Download files from an http domain
fileurl: (str) a urls to request a netcdf file
spatialbounds: (dict) dictionary providing the minx, miny, maxx, and maxy of the spatial region
file_prefix: (str) a string to mark the output file as a spatial subset
rename_timelatlong_names: (dict) a dictionary to standardize latitude/longitude synonyms to LAT/LON, respectively
replace_file: (logic) If True, the existing file will be replaced; if False, the file download is skipped
"""
# check if the file path already exists; if so, apply replace_file logic
basename = os.path.basename(fileurl)
if os.path.isfile(basename):
os.remove(basename)
if os.path.isfile(file_prefix+basename) and replace_file:
os.remove(file_prefix+basename)
elif os.path.isfile(file_prefix+basename) and not replace_file:
# replace_file is False; return file path and skip
return(os.path.join(os.getcwd(), file_prefix+basename))
# try the file connection
try:
ping = urllib.request.urlopen(fileurl)
# if the file exists, download it
if ping.getcode() != 404:
ping.close()
wget.download(fileurl)
# open the parent netcdf file
ds = xray.open_dataset(basename, engine='netcdf4')
# rename latlong if they are not LAT and LON, respectively
if not isinstance(rename_timelatlong_names, type(None)):
ds = ds.rename(rename_timelatlong_names)
# slice by the bounding box
spSubset = ds.sel(LON=slice(spatialbounds['minx'], spatialbounds['maxx']),
LAT=slice(spatialbounds['miny'], spatialbounds['maxy']))
# print the spatial subset
spSubset.to_netcdf(file_prefix+basename)
# remove the parent
ds.close()
os.remove(basename)
return(os.path.join(os.getcwd(), file_prefix+basename))
else:
ping.close()
except:
print('File does not exist at this URL: ' + basename)
def ftp_x_download_spSubset(fileurl,
spatialbounds,
file_prefix='sp_',
rename_timelatlong_names={'LAT': 'LAT', 'LON': 'LON', 'TIME': 'TIME'},
replace_file=True):
"""
Download files from an http domain
fileurl: (str) a urls to request a netcdf file
spatialbounds: (dict) dictionary providing the minx, miny, maxx, and maxy of the spatial region
file_prefix: (str) a string to mark the output file as a spatial subset
rename_timelatlong_names: (dict) a dictionary to standardize latitude/longitude synonyms to LAT/LON, respectively
replace_file: (logic) If True, the existing file will be replaced; if False, the file download is skipped
"""
# establish path info
fileurl = fileurl.replace('ftp://', '') # fileurl is url with the domain appended
ipaddress = fileurl.split('/', 1)[0] # ip address
path = os.path.dirname(fileurl.split('/', 1)[1]) # folder path
filename = os.path.basename(fileurl)
# check if the file path already exists; if so, apply replace_file logic
if os.path.isfile(filename):
os.remove(filename)
if os.path.isfile(file_prefix+filename) and replace_file:
os.remove(file_prefix+filename)
elif os.path.isfile(file_prefix+filename) and not replace_file:
# replace_file is False; return file path and skip
return(os.path.join(os.getcwd(), file_prefix+filename))
# download the file from the ftp server
ftp = ftplib.FTP(ipaddress)
ftp.login()
ftp.cwd(path)
try:
# try the file connection
ftp.retrbinary('RETR ' + filename, open(filename, 'wb').write)
ftp.close()
# decompress the file
if filename.endswith('.bz2'):
ogh.decompbz2(filename)
filename = filename.replace('.bz2', '')
# open the parent netcdf file
ds = xray.open_dataset(filename, engine='netcdf4')
if not isinstance(rename_timelatlong_names, type(None)):
ds = ds.rename(rename_timelatlong_names)
# slice by the bounding box
spSubset = ds.sel(LON=slice(spatialbounds['minx'], spatialbounds['maxx']),
LAT=slice(spatialbounds['miny'], spatialbounds['maxy']))
# print the spatial subset
spSubset.to_netcdf(file_prefix+filename)
# remove the parent
ds.close()
os.remove(filename)
return(os.path.join(os.getcwd(), file_prefix+filename))
except:
# os.remove(filename)
print('File does not exist at this URL: '+fileurl)
def get_x_dailywrf_Salathe2014(homedir,
spatialbounds,
subdir='salathe2014/Daily_WRF_1970_1999/noBC',
nworkers=4,
start_date='1970-01-01',
end_date='1989-12-31',
rename_timelatlong_names={'LAT': 'LAT', 'LON': 'LON', 'TIME': 'TIME'},
file_prefix='sp_',
replace_file=True):
"""
get Daily WRF data from Salathe et al. (2014) using xarray on netcdf files
"""
# check and generate DailyMET livneh 2013 data directory
filedir = os.path.join(homedir, subdir)
ogh.ensure_dir(filedir)
# modify each month between start_date and end_date to year-month
dates = [x.strftime('%Y%m') for x in pd.date_range(start=start_date, end=end_date, freq='M')]
# initialize parallel workers
da.set_options(pool=ThreadPool(nworkers))
ProgressBar().register()
# generate the list of files to download
filelist = compile_x_wrfnnrp_raw_Salathe2014_locations(dates)
# download files of interest
NetCDFs = []
for url in filelist:
NetCDFs.append(da.delayed(wget_x_download_spSubset)(fileurl=url,
spatialbounds=spatialbounds,
file_prefix=file_prefix,
rename_timelatlong_names=rename_timelatlong_names,
replace_file=replace_file))
# run operations
outputfiles = da.compute(NetCDFs)[0]
# reset working directory
os.chdir(homedir)
return(outputfiles)
def get_x_dailymet_Livneh2013_raw(homedir,
spatialbounds,
subdir='livneh2013/Daily_MET_1915_2011/raw_netcdf',
nworkers=4,
start_date='1915-01-01',
end_date='2011-12-31',
rename_timelatlong_names={'lat': 'LAT', 'lon': 'LON', 'time': 'TIME'},
file_prefix='sp_',
replace_file=True):
"""
get Daily MET data from Livneh et al. (2013) using xarray on netcdf files
"""
# check and generate DailyMET livneh 2013 data directory
filedir = os.path.join(homedir, subdir)
ogh.ensure_dir(filedir)
# modify each month between start_date and end_date to year-month
dates = [x.strftime('%Y%m') for x in pd.date_range(start=start_date, end=end_date, freq='M')]
# initialize parallel workers
da.set_options(pool=ThreadPool(nworkers))
ProgressBar().register()
# generate the list of files to download
filelist = compile_x_dailymet_Livneh2013_raw_locations(dates)
# download files of interest
NetCDFs = []
for url in filelist:
NetCDFs.append(da.delayed(ftp_x_download_spSubset)(fileurl=url,
spatialbounds=spatialbounds,
file_prefix=file_prefix,
rename_timelatlong_names=rename_timelatlong_names,
replace_file=replace_file))
# run operations
outputfiles = da.compute(NetCDFs)[0]
# reset working directory
os.chdir(homedir)
return(outputfiles)
def netcdf_to_ascii(homedir, subdir, source_directory, mappingfile, catalog_label, meta_file,
temporal_resolution='D', netcdfs=None, variable_list=None):
# initialize list of dataframe outputs
outfiledict = {}
# generate destination folder
filedir = os.path.join(homedir, subdir)
ogh.ensure_dir(filedir)
# connect with collection of netcdfs
if isinstance(netcdfs, type(None)):
netcdfs = [os.path.join(source_directory, file) for file in os.listdir(source_directory) if file.endswith('.nc')]
ds_mf = xray.open_mfdataset(netcdfs, engine='netcdf4').sortby('TIME')
# generate list of variables
if not isinstance(variable_list, type(None)):
ds_vars = variable_list.copy()
else:
ds_vars = [ds_var for ds_var in dict(ds_mf.variables).keys()
if ds_var not in ['YEAR', 'MONTH', 'DAY', 'TIME', 'LAT', 'LON']]
# convert netcdfs to pandas.Panel API
ds_pan = ds_mf.to_dataframe()[ds_vars]
# read in gridded cells of interest
maptable, nstation = ogh.mappingfileToDF(mappingfile, colvar=None, summary=False)
# at each latlong of interest
for ind, eachrow in maptable.iterrows():
# generate ASCII time-series
ds_df = ds_pan.loc[eachrow['LAT'], eachrow['LONG_'], :].reset_index(drop=True, level=[0, 1])
# create file name
outfilename = os.path.join(filedir, 'data_{0}_{1}'.format(eachrow['LAT'], eachrow['LONG_']))
# save ds_df
outfiledict[outfilename] = da.delayed(ds_df.to_csv)(path_or_buf=outfilename, sep='\t', header=False, index=False)
# compute ASCII time-series files
ProgressBar().register()
outfiledict = da.compute(outfiledict)[0]
# annotate metadata file
meta_file[catalog_label] = dict(ds_mf.attrs)
meta_file[catalog_label]['variable_list'] = list(np.array(ds_vars))
meta_file[catalog_label]['delimiter'] = '\t'
meta_file[catalog_label]['start_date'] = pd.Series(ds_mf.TIME).sort_values().iloc[0].strftime('%Y-%m-%d %H:%M:%S')
meta_file[catalog_label]['end_date'] = pd.Series(ds_mf.TIME).sort_values().iloc[-1].strftime('%Y-%m-%d %H:%M:%S')
meta_file[catalog_label]['temporal_resolution'] = temporal_resolution
meta_file[catalog_label]['variable_info'] = dict(ds_mf.variables)
# catalog the output files
ogh.addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
os.chdir(homedir)
return(list(outfiledict.keys()))
def calculateUTMbounds(mappingfile, mappingfile_crs={'init': 'epsg:4326'}, spatial_resolution=0.06250):
# read in the mappingfile
map_df, nstation = ogh.mappingfileToDF(mappingfile)
# loop though each LAT/LONG_ +/-0.06250 centroid into gridded cells
geom = []
midpt = spatial_resolution/2
for ind in map_df.index:
mid = map_df.loc[ind]
geom.append(box(mid.LONG_-midpt, mid.LAT-midpt, mid.LONG_+midpt, mid.LAT+midpt, ccw=True))
# generate the GeoDataFrame
test = gpd.GeoDataFrame(map_df, crs=mappingfile_crs, geometry=geom)
# compile gridded cells to extract bounding box
test['shapeName'] = 1
# dissolve shape into new shapefile
newShape = test.dissolve(by='shapeName').reset_index()
print(newShape.bounds)
# take the minx and miny, and centroid_x and centroid_y
minx, miny, maxx, maxy = newShape.bounds.loc[0]
lon0, lat0 = np.array(newShape.centroid[0])
# generate the basemap raster
fig = plt.figure(figsize=(10, 10), dpi=500)
ax1 = plt.subplot2grid((1, 1), (0, 0))
m = Basemap(projection='tmerc', resolution='h', ax=ax1, lat_0=lat0, lon_0=lon0,
llcrnrlon=minx, llcrnrlat=miny, urcrnrlon=maxx, urcrnrlat=maxy)
# transform each polygon to the utm basemap projection
for ind in newShape.index:
eachpol = newShape.loc[ind]
newShape.loc[ind, 'g2'] = shapely.ops.transform(m, eachpol['geometry'])
# transform each polygon to the utm basemap projection
newShape['g2'] = newShape.apply(lambda x: shapely.ops.transform(m, x['geometry']), axis=1)
# remove the plot
plt.gcf().clear()
# establish the UTM basemap bounding box dimensions
minx2, miny2, maxx2, maxy2 = newShape['g2'].iloc[0].bounds
return(minx2, miny2, maxx2, maxy2)
def calculateUTMcells(mappingfile, mappingfile_crs={'init': 'epsg:4326'}, spatial_resolution=0.06250):
# read in the mappingfile
map_df, nstation = ogh.mappingfileToDF(mappingfile)
# loop though each LAT/LONG_ +/-0.06250 centroid into gridded cells
geom = []
midpt = spatial_resolution/2
for ind in map_df.index:
mid = map_df.loc[ind]
geom.append(box(mid.LONG_-midpt, mid.LAT-midpt, mid.LONG_+midpt, mid.LAT+midpt, ccw=True))
# generate the GeoDataFrame
test = gpd.GeoDataFrame(map_df, crs=mappingfile_crs, geometry=geom)
# compile gridded cells to extract bounding box
test['shapeName'] = 1
# dissolve shape into new shapefile
newShape = test.dissolve(by='shapeName').reset_index()
# take the minx and miny, and centroid_x and centroid_y
minx, miny, maxx, maxy = newShape.bounds.loc[0]
lon0, lat0 = np.array(newShape.centroid[0])
# generate the basemap raster
fig = plt.figure(figsize=(10, 10), dpi=500)
ax1 = plt.subplot2grid((1, 1), (0, 0))
m = Basemap(projection='tmerc', resolution='h', ax=ax1, lat_0=lat0, lon_0=lon0,
llcrnrlon=minx, llcrnrlat=miny, urcrnrlon=maxx, urcrnrlat=maxy)
# transform each polygon to the utm basemap projection
test['geometry'] = test.apply(lambda x: shapely.ops.transform(m, x['geometry']), axis=1)
test = test.drop('shapeName', axis=1)
# remove the plot
plt.gcf().clear()
# return the geodataframe and the spatial transformation from WGS84
return(test, m)
def rasterDimensions(maxx, maxy, minx=0, miny=0, dy=100, dx=100):
# construct the range
x = pd.Series(range(int(minx), int(maxx)+1, 1))
y = pd.Series(range(int(miny), int(maxy)+1, 1))
# filter for values that meet the increment or is the last value
cols = pd.Series(x.index).apply(lambda x1: x[x1] if x1 % dx == 0 or x1 == x[0] or x1 == x.index[-1] else None)
rows = pd.Series(y.index).apply(lambda y1: y[y1] if y1 % dy == 0 or y1 == y[0] or y1 == y.index[-1] else None)
# construct the indices
row_list = np.array(rows.loc[pd.notnull(rows)])
col_list = np.array(cols.loc[pd.notnull(cols)])
# construct the raster
raster = r.RasterModelGrid((len(row_list), len(col_list)), spacing=(dy, dx))
raster.add_zeros
return(raster, row_list, col_list)
def mappingfileToRaster(mappingfile, maxx, maxy, minx=0, miny=0, dx=100, dy=100,
spatial_resolution=0.06250, mappingfile_crs={'init': 'epsg:4326'}, raster_crs={'init': 'epsg:3857'}):
# generate the mappingfile with UTM cells
UTMmappingfile, m = calculateUTMcells(mappingfile=mappingfile,
mappingfile_crs=mappingfile_crs,
spatial_resolution=spatial_resolution)
# construct the raster
raster, row_list, col_list = rasterDimensions(maxx, maxy, minx=minx, miny=miny, dy=dy, dx=dx)
# initialize node list
df_list = []
# loop through the raster nodes (bottom to top arrays)
for row_index, nodelist in enumerate(raster.nodes):
# index bottom to top arrays with ordered Latitude
lat = row_list[row_index]
# index left to right with ordered Longitude
for nodeid, long_ in zip(nodelist, col_list):
df_list.append([nodeid,
box(long_, lat, long_+dx, lat+dy, ccw=True),
box(long_, lat, long_+dx, lat+dy, ccw=True).centroid])
# convert to dataframe
df = pd.DataFrame.from_records(df_list).rename(columns={0: 'nodeid', 1: 'raster_geom', 2: 'raster_centroid'})
raster_map = gpd.GeoDataFrame(df, geometry='raster_centroid', crs=raster_crs)
# identify raster nodeid and equivalent mappingfile FID
raster_df = gpd.sjoin(raster_map, UTMmappingfile, how='left', op='intersects')
raster_df = raster_df.drop('raster_centroid', axis=1).set_geometry('raster_geom')
# return the raster node to mappingfile FID cross-map, and the rastermodelgrid
return(raster_df, raster, m)
def temporalSlice(vardf, vardf_dateindex):
values = vardf.loc[vardf_dateindex, :].reset_index(level=0)
values = values.rename(columns={'level_0': 'FID', vardf_dateindex: 'value'}).reset_index(drop=True)
return(values)
def rasterVector(vardf, vardf_dateindex, crossmap, nodata=-9999):
values = temporalSlice(vardf=vardf, vardf_dateindex=vardf_dateindex)
vector = crossmap.merge(values, on='FID', how='left').fillna(nodata)['value']
return(vector)
def valueRange(listOfDf):
all_values = pd.concat(listOfDf, axis=1).as_matrix()
return(all_values)
def rasterVectorToWGS(value_vector, nodeXmap, UTM_transformer):
# name the vector column
t1 = value_vector.reset_index().rename(columns={'index': 'nodeid'})
# reduce the nodeXmap
t2 = nodeXmap[pd.notnull(nodeXmap.FID)]
# merge the node vector information with the crossmap
t3 = pd.merge(t1, t2, how='right', on='nodeid')
# transform raster_geom into WGS84
ids = []
newpol = []
for ind, eachpoly in t3.iterrows():
# reverse polygon centroid mapping to WGS84
ras_x, ras_y = np.array(eachpoly['raster_geom'].centroid)
newcent = UTM_transformer(ras_x, ras_y, inverse=True)
# indexed by nodeid, LAT, LON
ids.append(tuple([eachpoly['nodeid'], newcent[1], newcent[0]]))
# reverse polygon mapping to WGS84
newpol.append(Polygon([UTM_transformer(x, y, inverse=True)
for x, y in eachpoly['raster_geom'].__geo_interface__['coordinates'][0]]))
# index each raster node by nodeid, LAT, LON
t4 = t3.set_index(pd.MultiIndex.from_tuples(ids, names=['', '', '']))
t4['wgs_raster'] = newpol
t4 = t4.set_geometry('wgs_raster')
# assimilate t5 as wide table
t5 = t4[['value']].T.reset_index(drop=True)
return(t4, t5)
def compile_x_wrfpnnl2018_raw_locations(time_increments,
domain='http://cses.washington.edu',
subdomain='rocinante/WRF/PNNL_NARR_6km'):
"""
Compile a list of file URLs for PNNL 2018 raw WRF data
time_increments: (list) a list of dates that identify each netcdf file
"""
locations = []
for ind, ymd in enumerate(time_increments):
subfolder = '{0}'.format(ymd.strftime('%Y'))
basename = 'data.{0}.nc'.format(ymd.strftime('%Y-%m-%d'))
url = os.path.join(domain, subdomain, subfolder, basename)
locations.append(url)
return(locations)
def wget_x_download_spSubset_PNNL(fileurl,
filedate,
spatialbounds,
time_resolution='H',
time_steps=24,
file_prefix='sp_',
rename_timelatlong_names={'south_north': 'SN', 'west_east': 'WE'},
replace_file=True):
"""
Download files from an http domain
fileurl: (str) a urls to request a netcdf file
spatialbounds: (dict) dict providing the minx, miny, maxx, and maxy of the spatial region
file_prefix: (str) a string to mark the output file as a spatial subset
rename_latlong_names: (dict) a dict to standardize latitude/longitude synonyms to LAT/LON, respectively
replace_file: (logic) If True, the existing file will be replaced; if False, the file download is skipped
"""
# check if the file path already exists; if so, apply replace_file logic
basename = os.path.basename(fileurl)
if os.path.isfile(basename):
os.remove(basename)
if os.path.isfile(file_prefix+basename) and replace_file:
os.remove(file_prefix+basename)
elif os.path.isfile(file_prefix+basename) and not replace_file:
# replace_file is False; return file path and skip
return(os.path.join(os.getcwd(), file_prefix+basename))
# try the file connection
# print('connecting to: '+basename)
try:
ping = urllib.request.urlopen(fileurl)
# if the file exists, download it
if ping.getcode() != 404:
ping.close()
wget.download(fileurl)
# open the parent netcdf file
ds = xray.open_dataset(basename, engine='netcdf4')
# print('file read in')
# rename latlong if they are not LAT and LON, respectively
if not isinstance(rename_timelatlong_names, type(None)):
ds = ds.rename(rename_timelatlong_names)
# print('renamed columns')
# slice by the bounding box NOTE:dataframe slice includes last index
ds = ds.assign_coords(SN=ds.SN, WE=ds.WE)
spSubset = ds.sel(WE=slice(spatialbounds['minx'], spatialbounds['maxx']),
SN=slice(spatialbounds['miny'], spatialbounds['maxy']))
# print('cropped')
# change time to datetimeindex
hour = [x.strftime('%Y-%m-%d %H:%M:%S') for x in pd.date_range(start=filedate,
periods=time_steps,
freq=time_resolution)]
spSubset['TIME'] = pd.DatetimeIndex(hour)
# print the spatial subset
spSubset.to_netcdf(file_prefix+basename)
print('downloaded: spatial subset of '+basename)
# remove the parent
ds.close()
os.remove(basename)
# print('closed')
return(os.path.join(os.getcwd(), file_prefix+basename))
else:
ping.close()
except:
print('File does not exist at this URL: ' + basename)
def get_x_hourlywrf_PNNL2018(homedir,
spatialbounds,
subdir='PNNL2018/Hourly_WRF_1981_2015/SaukSpatialBounds',
nworkers=4,
start_date='2005-01-01',
end_date='2007-12-31',
time_resolution='H',
time_steps=24,
file_prefix='sp_',
rename_timelatlong_names={'south_north': 'SN', 'west_east': 'WE', 'time': 'TIME'},
replace_file=True):
"""
get hourly WRF data from a 2018 PNNL WRF run using xarray on netcdf files
"""
# check and generate data directory
filedir = os.path.join(homedir, subdir)
ogh.ensure_dir(filedir)
# modify each month between start_date and end_date to year-month
dates = [x.strftime('%Y%m%d') for x in pd.date_range(start=start_date, end=end_date, freq='D')]
# initialize parallel workers
da.set_options(pool=ThreadPool(nworkers))
ProgressBar().register()
# generate the list of files to download
filelist = compile_x_wrfpnnl2018_raw_locations(dates)
# download files of interest
NetCDFs = []
for url, date in zip(filelist, dates):
NetCDFs.append(da.delayed(wget_x_download_spSubset_PNNL)(fileurl=url,
filedate=date,
time_resolution=time_resolution,
time_steps=time_steps,
spatialbounds=spatialbounds,
file_prefix=file_prefix,
rename_timelatlong_names=rename_timelatlong_names,
replace_file=replace_file))
# run operations
outputfiles = da.compute(NetCDFs)[0]
# reset working directory
os.chdir(homedir)
return(outputfiles) | [
"wget.download",
"shapely.geometry.box",
"numpy.array",
"pandas.MultiIndex.from_tuples",
"pandas.notnull",
"xarray.open_mfdataset",
"pandas.date_range",
"os.remove",
"ogh.ensure_dir",
"ftplib.FTP",
"os.listdir",
"multiprocessing.pool.ThreadPool",
"geopandas.GeoDataFrame",
"urllib.request.u... | [((2690, 2715), 'os.path.basename', 'os.path.basename', (['fileurl'], {}), '(fileurl)\n', (2706, 2715), False, 'import os\n'), ((2723, 2747), 'os.path.isfile', 'os.path.isfile', (['basename'], {}), '(basename)\n', (2737, 2747), False, 'import os\n'), ((5215, 5240), 'os.path.basename', 'os.path.basename', (['fileurl'], {}), '(fileurl)\n', (5231, 5240), False, 'import os\n'), ((5326, 5350), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (5340, 5350), False, 'import os\n'), ((5729, 5750), 'ftplib.FTP', 'ftplib.FTP', (['ipaddress'], {}), '(ipaddress)\n', (5739, 5750), False, 'import ftplib, wget, urllib\n'), ((7514, 7543), 'os.path.join', 'os.path.join', (['homedir', 'subdir'], {}), '(homedir, subdir)\n', (7526, 7543), False, 'import os\n'), ((7548, 7571), 'ogh.ensure_dir', 'ogh.ensure_dir', (['filedir'], {}), '(filedir)\n', (7562, 7571), False, 'import ogh\n'), ((8583, 8600), 'os.chdir', 'os.chdir', (['homedir'], {}), '(homedir)\n', (8591, 8600), False, 'import os\n'), ((9348, 9377), 'os.path.join', 'os.path.join', (['homedir', 'subdir'], {}), '(homedir, subdir)\n', (9360, 9377), False, 'import os\n'), ((9382, 9405), 'ogh.ensure_dir', 'ogh.ensure_dir', (['filedir'], {}), '(filedir)\n', (9396, 9405), False, 'import ogh\n'), ((10412, 10429), 'os.chdir', 'os.chdir', (['homedir'], {}), '(homedir)\n', (10420, 10429), False, 'import os\n'), ((10743, 10772), 'os.path.join', 'os.path.join', (['homedir', 'subdir'], {}), '(homedir, subdir)\n', (10755, 10772), False, 'import os\n'), ((10777, 10800), 'ogh.ensure_dir', 'ogh.ensure_dir', (['filedir'], {}), '(filedir)\n', (10791, 10800), False, 'import ogh\n'), ((11517, 11577), 'ogh.mappingfileToDF', 'ogh.mappingfileToDF', (['mappingfile'], {'colvar': 'None', 'summary': '(False)'}), '(mappingfile, colvar=None, summary=False)\n', (11536, 11577), False, 'import ogh\n'), ((12800, 12917), 'ogh.addCatalogToMap', 'ogh.addCatalogToMap', ([], {'outfilepath': 'mappingfile', 'maptable': 'maptable', 'folderpath': 'filedir', 'catalog_label': 'catalog_label'}), '(outfilepath=mappingfile, maptable=maptable, folderpath=\n filedir, catalog_label=catalog_label)\n', (12819, 12917), False, 'import ogh\n'), ((12917, 12934), 'os.chdir', 'os.chdir', (['homedir'], {}), '(homedir)\n', (12925, 12934), False, 'import os\n'), ((13131, 13163), 'ogh.mappingfileToDF', 'ogh.mappingfileToDF', (['mappingfile'], {}), '(mappingfile)\n', (13150, 13163), False, 'import ogh\n'), ((13486, 13546), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['map_df'], {'crs': 'mappingfile_crs', 'geometry': 'geom'}), '(map_df, crs=mappingfile_crs, geometry=geom)\n', (13502, 13546), True, 'import geopandas as gpd\n'), ((13883, 13913), 'numpy.array', 'np.array', (['newShape.centroid[0]'], {}), '(newShape.centroid[0])\n', (13891, 13913), True, 'import numpy as np\n'), ((13959, 13996), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)', 'dpi': '(500)'}), '(figsize=(10, 10), dpi=500)\n', (13969, 13996), True, 'import matplotlib.pyplot as plt\n'), ((14007, 14039), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 1)', '(0, 0)'], {}), '((1, 1), (0, 0))\n', (14023, 14039), True, 'import matplotlib.pyplot as plt\n'), ((14048, 14191), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'projection': '"""tmerc"""', 'resolution': '"""h"""', 'ax': 'ax1', 'lat_0': 'lat0', 'lon_0': 'lon0', 'llcrnrlon': 'minx', 'llcrnrlat': 'miny', 'urcrnrlon': 'maxx', 'urcrnrlat': 'maxy'}), "(projection='tmerc', resolution='h', ax=ax1, lat_0=lat0, lon_0=lon0,\n llcrnrlon=minx, llcrnrlat=miny, urcrnrlon=maxx, urcrnrlat=maxy)\n", (14055, 14191), False, 'from mpl_toolkits.basemap import Basemap\n'), ((14928, 14960), 'ogh.mappingfileToDF', 'ogh.mappingfileToDF', (['mappingfile'], {}), '(mappingfile)\n', (14947, 14960), False, 'import ogh\n'), ((15283, 15343), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['map_df'], {'crs': 'mappingfile_crs', 'geometry': 'geom'}), '(map_df, crs=mappingfile_crs, geometry=geom)\n', (15299, 15343), True, 'import geopandas as gpd\n'), ((15653, 15683), 'numpy.array', 'np.array', (['newShape.centroid[0]'], {}), '(newShape.centroid[0])\n', (15661, 15683), True, 'import numpy as np\n'), ((15729, 15766), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)', 'dpi': '(500)'}), '(figsize=(10, 10), dpi=500)\n', (15739, 15766), True, 'import matplotlib.pyplot as plt\n'), ((15777, 15809), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 1)', '(0, 0)'], {}), '((1, 1), (0, 0))\n', (15793, 15809), True, 'import matplotlib.pyplot as plt\n'), ((15818, 15961), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'projection': '"""tmerc"""', 'resolution': '"""h"""', 'ax': 'ax1', 'lat_0': 'lat0', 'lon_0': 'lon0', 'llcrnrlon': 'minx', 'llcrnrlat': 'miny', 'urcrnrlon': 'maxx', 'urcrnrlat': 'maxy'}), "(projection='tmerc', resolution='h', ax=ax1, lat_0=lat0, lon_0=lon0,\n llcrnrlon=minx, llcrnrlat=miny, urcrnrlon=maxx, urcrnrlat=maxy)\n", (15825, 15961), False, 'from mpl_toolkits.basemap import Basemap\n'), ((18426, 18490), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['df'], {'geometry': '"""raster_centroid"""', 'crs': 'raster_crs'}), "(df, geometry='raster_centroid', crs=raster_crs)\n", (18442, 18490), True, 'import geopandas as gpd\n'), ((18568, 18634), 'geopandas.sjoin', 'gpd.sjoin', (['raster_map', 'UTMmappingfile'], {'how': '"""left"""', 'op': '"""intersects"""'}), "(raster_map, UTMmappingfile, how='left', op='intersects')\n", (18577, 18634), True, 'import geopandas as gpd\n'), ((19726, 19768), 'pandas.merge', 'pd.merge', (['t1', 't2'], {'how': '"""right"""', 'on': '"""nodeid"""'}), "(t1, t2, how='right', on='nodeid')\n", (19734, 19768), True, 'import pandas as pd\n'), ((22434, 22459), 'os.path.basename', 'os.path.basename', (['fileurl'], {}), '(fileurl)\n', (22450, 22459), False, 'import os\n'), ((22471, 22495), 'os.path.isfile', 'os.path.isfile', (['basename'], {}), '(basename)\n', (22485, 22495), False, 'import os\n'), ((25583, 25612), 'os.path.join', 'os.path.join', (['homedir', 'subdir'], {}), '(homedir, subdir)\n', (25595, 25612), False, 'import os\n'), ((25617, 25640), 'ogh.ensure_dir', 'ogh.ensure_dir', (['filedir'], {}), '(filedir)\n', (25631, 25640), False, 'import ogh\n'), ((26955, 26972), 'os.chdir', 'os.chdir', (['homedir'], {}), '(homedir)\n', (26963, 26972), False, 'import os\n'), ((927, 968), 'os.path.join', 'os.path.join', (['domain', 'subdomain', 'basename'], {}), '(domain, subdomain, basename)\n', (939, 968), False, 'import os\n'), ((1727, 1768), 'os.path.join', 'os.path.join', (['domain', 'subdomain', 'basename'], {}), '(domain, subdomain, basename)\n', (1739, 1768), False, 'import os\n'), ((2757, 2776), 'os.remove', 'os.remove', (['basename'], {}), '(basename)\n', (2766, 2776), False, 'import os\n'), ((2785, 2823), 'os.path.isfile', 'os.path.isfile', (['(file_prefix + basename)'], {}), '(file_prefix + basename)\n', (2799, 2823), False, 'import os\n'), ((2848, 2881), 'os.remove', 'os.remove', (['(file_prefix + basename)'], {}), '(file_prefix + basename)\n', (2857, 2881), False, 'import os\n'), ((3126, 3157), 'urllib.request.urlopen', 'urllib.request.urlopen', (['fileurl'], {}), '(fileurl)\n', (3148, 3157), False, 'import ftplib, wget, urllib\n'), ((5360, 5379), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (5369, 5379), False, 'import os\n'), ((5388, 5426), 'os.path.isfile', 'os.path.isfile', (['(file_prefix + filename)'], {}), '(file_prefix + filename)\n', (5402, 5426), False, 'import os\n'), ((5451, 5484), 'os.remove', 'os.remove', (['(file_prefix + filename)'], {}), '(file_prefix + filename)\n', (5460, 5484), False, 'import os\n'), ((6128, 6173), 'xarray.open_dataset', 'xray.open_dataset', (['filename'], {'engine': '"""netcdf4"""'}), "(filename, engine='netcdf4')\n", (6145, 6173), True, 'import xarray as xray\n'), ((6637, 6656), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (6646, 6656), False, 'import os\n'), ((8525, 8544), 'dask.compute', 'da.compute', (['NetCDFs'], {}), '(NetCDFs)\n', (8535, 8544), True, 'import dask as da\n'), ((10354, 10373), 'dask.compute', 'da.compute', (['NetCDFs'], {}), '(NetCDFs)\n', (10364, 10373), True, 'import dask as da\n'), ((12156, 12179), 'dask.compute', 'da.compute', (['outfiledict'], {}), '(outfiledict)\n', (12166, 12179), True, 'import dask as da\n'), ((12315, 12332), 'numpy.array', 'np.array', (['ds_vars'], {}), '(ds_vars)\n', (12323, 12332), True, 'import numpy as np\n'), ((19632, 19656), 'pandas.notnull', 'pd.notnull', (['nodeXmap.FID'], {}), '(nodeXmap.FID)\n', (19642, 19656), True, 'import pandas as pd\n'), ((19954, 19996), 'numpy.array', 'np.array', (["eachpoly['raster_geom'].centroid"], {}), "(eachpoly['raster_geom'].centroid)\n", (19962, 19996), True, 'import numpy as np\n'), ((20459, 20509), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['ids'], {'names': "['', '', '']"}), "(ids, names=['', '', ''])\n", (20484, 20509), True, 'import pandas as pd\n'), ((21251, 21303), 'os.path.join', 'os.path.join', (['domain', 'subdomain', 'subfolder', 'basename'], {}), '(domain, subdomain, subfolder, basename)\n', (21263, 21303), False, 'import os\n'), ((22509, 22528), 'os.remove', 'os.remove', (['basename'], {}), '(basename)\n', (22518, 22528), False, 'import os\n'), ((22541, 22579), 'os.path.isfile', 'os.path.isfile', (['(file_prefix + basename)'], {}), '(file_prefix + basename)\n', (22555, 22579), False, 'import os\n'), ((22608, 22641), 'os.remove', 'os.remove', (['(file_prefix + basename)'], {}), '(file_prefix + basename)\n', (22617, 22641), False, 'import os\n'), ((22955, 22986), 'urllib.request.urlopen', 'urllib.request.urlopen', (['fileurl'], {}), '(fileurl)\n', (22977, 22986), False, 'import ftplib, wget, urllib\n'), ((26897, 26916), 'dask.compute', 'da.compute', (['NetCDFs'], {}), '(NetCDFs)\n', (26907, 26916), True, 'import dask as da\n'), ((2889, 2927), 'os.path.isfile', 'os.path.isfile', (['(file_prefix + basename)'], {}), '(file_prefix + basename)\n', (2903, 2927), False, 'import os\n'), ((3272, 3294), 'wget.download', 'wget.download', (['fileurl'], {}), '(fileurl)\n', (3285, 3294), False, 'import ftplib, wget, urllib\n'), ((3355, 3400), 'xarray.open_dataset', 'xray.open_dataset', (['basename'], {'engine': '"""netcdf4"""'}), "(basename, engine='netcdf4')\n", (3372, 3400), True, 'import xarray as xray\n'), ((3975, 3994), 'os.remove', 'os.remove', (['basename'], {}), '(basename)\n', (3984, 3994), False, 'import os\n'), ((5492, 5530), 'os.path.isfile', 'os.path.isfile', (['(file_prefix + filename)'], {}), '(file_prefix + filename)\n', (5506, 5530), False, 'import os\n'), ((6000, 6023), 'ogh.decompbz2', 'ogh.decompbz2', (['filename'], {}), '(filename)\n', (6013, 6023), False, 'import ogh\n'), ((6685, 6696), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6694, 6696), False, 'import os\n'), ((7684, 7739), 'pandas.date_range', 'pd.date_range', ([], {'start': 'start_date', 'end': 'end_date', 'freq': '"""M"""'}), "(start=start_date, end=end_date, freq='M')\n", (7697, 7739), True, 'import pandas as pd\n'), ((7800, 7820), 'multiprocessing.pool.ThreadPool', 'ThreadPool', (['nworkers'], {}), '(nworkers)\n', (7810, 7820), False, 'from multiprocessing.pool import ThreadPool\n'), ((7826, 7839), 'dask.diagnostics.ProgressBar', 'ProgressBar', ([], {}), '()\n', (7837, 7839), False, 'from dask.diagnostics import ProgressBar\n'), ((9518, 9573), 'pandas.date_range', 'pd.date_range', ([], {'start': 'start_date', 'end': 'end_date', 'freq': '"""M"""'}), "(start=start_date, end=end_date, freq='M')\n", (9531, 9573), True, 'import pandas as pd\n'), ((9634, 9654), 'multiprocessing.pool.ThreadPool', 'ThreadPool', (['nworkers'], {}), '(nworkers)\n', (9644, 9654), False, 'from multiprocessing.pool import ThreadPool\n'), ((9660, 9673), 'dask.diagnostics.ProgressBar', 'ProgressBar', ([], {}), '()\n', (9671, 9673), False, 'from dask.diagnostics import ProgressBar\n'), ((10902, 10938), 'os.path.join', 'os.path.join', (['source_directory', 'file'], {}), '(source_directory, file)\n', (10914, 10938), False, 'import os\n'), ((11017, 11063), 'xarray.open_mfdataset', 'xray.open_mfdataset', (['netcdfs'], {'engine': '"""netcdf4"""'}), "(netcdfs, engine='netcdf4')\n", (11036, 11063), True, 'import xarray as xray\n'), ((11983, 12007), 'dask.delayed', 'da.delayed', (['ds_df.to_csv'], {}), '(ds_df.to_csv)\n', (11993, 12007), True, 'import dask as da\n'), ((12113, 12126), 'dask.diagnostics.ProgressBar', 'ProgressBar', ([], {}), '()\n', (12124, 12126), False, 'from dask.diagnostics import ProgressBar\n'), ((13363, 13452), 'shapely.geometry.box', 'box', (['(mid.LONG_ - midpt)', '(mid.LAT - midpt)', '(mid.LONG_ + midpt)', '(mid.LAT + midpt)'], {'ccw': '(True)'}), '(mid.LONG_ - midpt, mid.LAT - midpt, mid.LONG_ + midpt, mid.LAT + midpt,\n ccw=True)\n', (13366, 13452), False, 'from shapely.geometry import box, Polygon\n'), ((14593, 14602), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (14600, 14602), True, 'import matplotlib.pyplot as plt\n'), ((15160, 15249), 'shapely.geometry.box', 'box', (['(mid.LONG_ - midpt)', '(mid.LAT - midpt)', '(mid.LONG_ + midpt)', '(mid.LAT + midpt)'], {'ccw': '(True)'}), '(mid.LONG_ - midpt, mid.LAT - midpt, mid.LONG_ + midpt, mid.LAT + midpt,\n ccw=True)\n', (15163, 15249), False, 'from shapely.geometry import box, Polygon\n'), ((16196, 16205), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (16203, 16205), True, 'import matplotlib.pyplot as plt\n'), ((16586, 16604), 'pandas.Series', 'pd.Series', (['x.index'], {}), '(x.index)\n', (16595, 16604), True, 'import pandas as pd\n'), ((16701, 16719), 'pandas.Series', 'pd.Series', (['y.index'], {}), '(y.index)\n', (16710, 16719), True, 'import pandas as pd\n'), ((16867, 16883), 'pandas.notnull', 'pd.notnull', (['rows'], {}), '(rows)\n', (16877, 16883), True, 'import pandas as pd\n'), ((16919, 16935), 'pandas.notnull', 'pd.notnull', (['cols'], {}), '(cols)\n', (16929, 16935), True, 'import pandas as pd\n'), ((18304, 18338), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['df_list'], {}), '(df_list)\n', (18329, 18338), True, 'import pandas as pd\n'), ((19357, 19384), 'pandas.concat', 'pd.concat', (['listOfDf'], {'axis': '(1)'}), '(listOfDf, axis=1)\n', (19366, 19384), True, 'import pandas as pd\n'), ((22653, 22691), 'os.path.isfile', 'os.path.isfile', (['(file_prefix + basename)'], {}), '(file_prefix + basename)\n', (22667, 22691), False, 'import os\n'), ((23117, 23139), 'wget.download', 'wget.download', (['fileurl'], {}), '(fileurl)\n', (23130, 23139), False, 'import ftplib, wget, urllib\n'), ((23208, 23253), 'xarray.open_dataset', 'xray.open_dataset', (['basename'], {'engine': '"""netcdf4"""'}), "(basename, engine='netcdf4')\n", (23225, 23253), True, 'import xarray as xray\n'), ((24289, 24311), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['hour'], {}), '(hour)\n', (24305, 24311), True, 'import pandas as pd\n'), ((24558, 24577), 'os.remove', 'os.remove', (['basename'], {}), '(basename)\n', (24567, 24577), False, 'import os\n'), ((25755, 25810), 'pandas.date_range', 'pd.date_range', ([], {'start': 'start_date', 'end': 'end_date', 'freq': '"""D"""'}), "(start=start_date, end=end_date, freq='D')\n", (25768, 25810), True, 'import pandas as pd\n'), ((25871, 25891), 'multiprocessing.pool.ThreadPool', 'ThreadPool', (['nworkers'], {}), '(nworkers)\n', (25881, 25891), False, 'from multiprocessing.pool import ThreadPool\n'), ((25897, 25910), 'dask.diagnostics.ProgressBar', 'ProgressBar', ([], {}), '()\n', (25908, 25910), False, 'from dask.diagnostics import ProgressBar\n'), ((3035, 3046), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3044, 3046), False, 'import os\n'), ((4027, 4038), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4036, 4038), False, 'import os\n'), ((5638, 5649), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5647, 5649), False, 'import os\n'), ((8062, 8098), 'dask.delayed', 'da.delayed', (['wget_x_download_spSubset'], {}), '(wget_x_download_spSubset)\n', (8072, 8098), True, 'import dask as da\n'), ((9896, 9931), 'dask.delayed', 'da.delayed', (['ftp_x_download_spSubset'], {}), '(ftp_x_download_spSubset)\n', (9906, 9931), True, 'import dask as da\n'), ((10951, 10979), 'os.listdir', 'os.listdir', (['source_directory'], {}), '(source_directory)\n', (10961, 10979), False, 'import os\n'), ((22807, 22818), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (22816, 22818), False, 'import os\n'), ((24648, 24659), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (24657, 24659), False, 'import os\n'), ((26143, 26184), 'dask.delayed', 'da.delayed', (['wget_x_download_spSubset_PNNL'], {}), '(wget_x_download_spSubset_PNNL)\n', (26153, 26184), True, 'import dask as da\n'), ((18139, 18186), 'shapely.geometry.box', 'box', (['long_', 'lat', '(long_ + dx)', '(lat + dy)'], {'ccw': '(True)'}), '(long_, lat, long_ + dx, lat + dy, ccw=True)\n', (18142, 18186), False, 'from shapely.geometry import box, Polygon\n'), ((24023, 24094), 'pandas.date_range', 'pd.date_range', ([], {'start': 'filedate', 'periods': 'time_steps', 'freq': 'time_resolution'}), '(start=filedate, periods=time_steps, freq=time_resolution)\n', (24036, 24094), True, 'import pandas as pd\n'), ((18212, 18259), 'shapely.geometry.box', 'box', (['long_', 'lat', '(long_ + dx)', '(lat + dy)'], {'ccw': '(True)'}), '(long_, lat, long_ + dx, lat + dy, ccw=True)\n', (18215, 18259), False, 'from shapely.geometry import box, Polygon\n'), ((12428, 12449), 'pandas.Series', 'pd.Series', (['ds_mf.TIME'], {}), '(ds_mf.TIME)\n', (12437, 12449), True, 'import pandas as pd\n'), ((12545, 12566), 'pandas.Series', 'pd.Series', (['ds_mf.TIME'], {}), '(ds_mf.TIME)\n', (12554, 12566), True, 'import pandas as pd\n')] |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import shutil
import tempfile
import unittest
import numpy as np
from mleap.sklearn.preprocessing.data import LabelEncoder
from mleap.sklearn.preprocessing.data import OneHotEncoder
class TestOneHotEncoder(unittest.TestCase):
def setUp(self):
labels = ['a', 'b', 'c', 'a', 'b', 'b']
self.le = LabelEncoder(input_features=['label'], output_features='label_le_encoded')
self.oh_data = self.le.fit_transform(labels).reshape(-1, 1)
self.tmp_dir = tempfile.mkdtemp(prefix="mleap.python.tests")
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_one_hot_encoder_serialization_fails_on_multiple_feature_columns(self):
self.oh_data = np.hstack((self.oh_data, self.oh_data)) # make two feature columns
ohe = OneHotEncoder(handle_unknown='error')
ohe.mlinit(prior_tf=self.le, output_features='{}_one_hot_encoded'.format(self.le.output_features))
ohe.fit(self.oh_data)
with self.assertRaises(NotImplementedError):
ohe.serialize_to_bundle(self.tmp_dir, ohe.name)
def test_one_hot_encoder_serialization_fails_on_an_invalid_category_range(self):
self.oh_data[2][0] = 3 # make invalid category range
ohe = OneHotEncoder(handle_unknown='error')
ohe.mlinit(prior_tf=self.le, output_features='{}_one_hot_encoded'.format(self.le.output_features))
ohe.fit(self.oh_data)
with self.assertRaises(ValueError):
ohe.serialize_to_bundle(self.tmp_dir, ohe.name)
def test_one_hot_encoder_serialization_fails_when_using_the_drop_param(self):
ohe = OneHotEncoder(handle_unknown='error', drop='first') # try to use `drop` parameter
ohe.mlinit(prior_tf=self.le, output_features='{}_one_hot_encoded'.format(self.le.output_features))
ohe.fit(self.oh_data)
with self.assertRaises(NotImplementedError):
ohe.serialize_to_bundle(self.tmp_dir, ohe.name)
def test_one_hot_encoder_serialization_fails_when_using_the_dtype_param(self):
ohe = OneHotEncoder(handle_unknown='error', dtype=int) # try to use `dtype` parameter
ohe.mlinit(prior_tf=self.le, output_features='{}_one_hot_encoded'.format(self.le.output_features))
ohe.fit(self.oh_data)
with self.assertRaises(NotImplementedError):
ohe.serialize_to_bundle(self.tmp_dir, ohe.name)
def test_one_hot_encoder_serialization_succeeds_when_handle_unknown_is_set_to_error(self):
ohe = OneHotEncoder(handle_unknown='error')
ohe.mlinit(prior_tf=self.le, output_features='{}_one_hot_encoded'.format(self.le.output_features))
ohe.fit(self.oh_data)
ohe.serialize_to_bundle(self.tmp_dir, ohe.name)
with open("{}/{}.node/model.json".format(self.tmp_dir, ohe.name)) as json_data:
model = json.load(json_data)
self.assertEqual('one_hot_encoder', model['op'])
self.assertEqual(3, model['attributes']['size']['long'])
self.assertEqual('error', model['attributes']['handle_invalid']['string'])
self.assertEqual(False, model['attributes']['drop_last']['boolean'])
def test_one_hot_encoder_deserialization_succeeds_when_handle_unknown_is_set_to_error(self):
ohe = OneHotEncoder(handle_unknown='error')
ohe.mlinit(prior_tf=self.le, output_features='{}_one_hot_encoded'.format(self.le.output_features))
ohe.fit(self.oh_data)
ohe.serialize_to_bundle(self.tmp_dir, ohe.name)
node_name = "{}.node".format(ohe.name)
ohe_ds = OneHotEncoder()
ohe_ds.deserialize_from_bundle(self.tmp_dir, node_name)
self.oh_data[2][0] = 3 # Add an unknown category
with self.assertRaises(ValueError):
ohe_ds.transform(self.oh_data)
def test_one_hot_encoder_serialization_succeeds_when_handle_unknown_is_set_to_ignore(self):
labels = ['a', 'b', 'c', 'a', 'b', 'b']
le = LabelEncoder(input_features=['label'], output_features='label_le_encoded')
oh_data = le.fit_transform(labels).reshape(-1, 1)
ohe = OneHotEncoder(handle_unknown='ignore')
ohe.mlinit(prior_tf=le, output_features='{}_one_hot_encoded'.format(le.output_features))
ohe.fit(oh_data)
ohe.serialize_to_bundle(self.tmp_dir, ohe.name)
with open("{}/{}.node/model.json".format(self.tmp_dir, ohe.name)) as json_data:
model = json.load(json_data)
self.assertEqual('one_hot_encoder', model['op'])
self.assertEqual(3, model['attributes']['size']['long'])
self.assertEqual('keep', model['attributes']['handle_invalid']['string'])
self.assertEqual(True, model['attributes']['drop_last']['boolean'])
def test_one_hot_encoder_deserialization_succeeds_when_handle_unknown_is_set_to_ignore(self):
ohe = OneHotEncoder(handle_unknown='ignore')
ohe.mlinit(prior_tf=self.le, output_features='{}_one_hot_encoded'.format(self.le.output_features))
ohe.fit(self.oh_data)
ohe.serialize_to_bundle(self.tmp_dir, ohe.name)
node_name = "{}.node".format(ohe.name)
ohe_ds = OneHotEncoder()
ohe_ds.deserialize_from_bundle(self.tmp_dir, node_name)
self.oh_data[2][0] = 3 # Add an unknown category
expected = ohe.transform(self.oh_data).todense()
actual = ohe_ds.transform(self.oh_data)
np.testing.assert_array_equal(expected, actual)
| [
"numpy.hstack",
"mleap.sklearn.preprocessing.data.LabelEncoder",
"mleap.sklearn.preprocessing.data.OneHotEncoder",
"tempfile.mkdtemp",
"shutil.rmtree",
"json.load",
"numpy.testing.assert_array_equal"
] | [((1114, 1188), 'mleap.sklearn.preprocessing.data.LabelEncoder', 'LabelEncoder', ([], {'input_features': "['label']", 'output_features': '"""label_le_encoded"""'}), "(input_features=['label'], output_features='label_le_encoded')\n", (1126, 1188), False, 'from mleap.sklearn.preprocessing.data import LabelEncoder\n'), ((1280, 1325), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""mleap.python.tests"""'}), "(prefix='mleap.python.tests')\n", (1296, 1325), False, 'import tempfile\n'), ((1359, 1386), 'shutil.rmtree', 'shutil.rmtree', (['self.tmp_dir'], {}), '(self.tmp_dir)\n', (1372, 1386), False, 'import shutil\n'), ((1495, 1534), 'numpy.hstack', 'np.hstack', (['(self.oh_data, self.oh_data)'], {}), '((self.oh_data, self.oh_data))\n', (1504, 1534), True, 'import numpy as np\n'), ((1578, 1615), 'mleap.sklearn.preprocessing.data.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""error"""'}), "(handle_unknown='error')\n", (1591, 1615), False, 'from mleap.sklearn.preprocessing.data import OneHotEncoder\n'), ((2030, 2067), 'mleap.sklearn.preprocessing.data.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""error"""'}), "(handle_unknown='error')\n", (2043, 2067), False, 'from mleap.sklearn.preprocessing.data import OneHotEncoder\n'), ((2407, 2458), 'mleap.sklearn.preprocessing.data.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""error"""', 'drop': '"""first"""'}), "(handle_unknown='error', drop='first')\n", (2420, 2458), False, 'from mleap.sklearn.preprocessing.data import OneHotEncoder\n'), ((2839, 2887), 'mleap.sklearn.preprocessing.data.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""error"""', 'dtype': 'int'}), "(handle_unknown='error', dtype=int)\n", (2852, 2887), False, 'from mleap.sklearn.preprocessing.data import OneHotEncoder\n'), ((3281, 3318), 'mleap.sklearn.preprocessing.data.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""error"""'}), "(handle_unknown='error')\n", (3294, 3318), False, 'from mleap.sklearn.preprocessing.data import OneHotEncoder\n'), ((4037, 4074), 'mleap.sklearn.preprocessing.data.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""error"""'}), "(handle_unknown='error')\n", (4050, 4074), False, 'from mleap.sklearn.preprocessing.data import OneHotEncoder\n'), ((4334, 4349), 'mleap.sklearn.preprocessing.data.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (4347, 4349), False, 'from mleap.sklearn.preprocessing.data import OneHotEncoder\n'), ((4720, 4794), 'mleap.sklearn.preprocessing.data.LabelEncoder', 'LabelEncoder', ([], {'input_features': "['label']", 'output_features': '"""label_le_encoded"""'}), "(input_features=['label'], output_features='label_le_encoded')\n", (4732, 4794), False, 'from mleap.sklearn.preprocessing.data import LabelEncoder\n'), ((4868, 4906), 'mleap.sklearn.preprocessing.data.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (4881, 4906), False, 'from mleap.sklearn.preprocessing.data import OneHotEncoder\n'), ((5609, 5647), 'mleap.sklearn.preprocessing.data.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (5622, 5647), False, 'from mleap.sklearn.preprocessing.data import OneHotEncoder\n'), ((5907, 5922), 'mleap.sklearn.preprocessing.data.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (5920, 5922), False, 'from mleap.sklearn.preprocessing.data import OneHotEncoder\n'), ((6161, 6208), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected', 'actual'], {}), '(expected, actual)\n', (6190, 6208), True, 'import numpy as np\n'), ((3621, 3641), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (3630, 3641), False, 'import json\n'), ((5194, 5214), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (5203, 5214), False, 'import json\n')] |
import glob
import torch
import random
import numpy as np
import torch.nn as nn
import matplotlib.pyplot as plt
from src.models.utils import FragmentDataset, ScratchGAN, loss_fn_scaled_mse
def retrain(scratchgan, dataset, N, batch_size=1):
scratchgan.train()
# for n, (x, y) in enumerate(dataset.take(N, batch_size)):
x, y = next(dataset.take(1, batch_size))
for n in range(100):
scratchgan.optim.zero_grad()
x = x.to('cuda')
y = y.to('cuda')
loss = nn.MSELoss()(scratchgan(x), y)
# loss = loss_fn_scaled_mse(scratchgan(x), y)
loss.backward()
scratchgan.optim.step()
print('step', n, 'loss', loss)
def vase_generate(scratchgan, data_gen):
for frag, vase in data_gen.take(1, 1):
frag = frag.to('cuda')
with torch.no_grad():
# test_vase = pregan(frag)
test_vase = scratchgan(frag)
test_vase = test_vase.to('cpu').numpy()
print('max', np.max(test_vase), 'min', np.min(test_vase))
vase = vase.numpy()
plt.subplot(121)
plt.imshow(test_vase[0].transpose((1,2,0)))
plt.subplot(122)
plt.imshow(vase[0].transpose((1,2,0)))
plt.show()
def main():
scratchgan = ScratchGAN()
scratchgan.to('cuda')
data_gen = FragmentDataset()
# vase_generate(vaseGen, data_gen)
batch_size = 2
n_samples = 1000
retrain(scratchgan, data_gen, n_samples, batch_size)
while True:
vase_generate(scratchgan, data_gen)
if __name__ == '__main__':
main()
| [
"src.models.utils.ScratchGAN",
"numpy.max",
"torch.nn.MSELoss",
"src.models.utils.FragmentDataset",
"numpy.min",
"torch.no_grad",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((1249, 1261), 'src.models.utils.ScratchGAN', 'ScratchGAN', ([], {}), '()\n', (1259, 1261), False, 'from src.models.utils import FragmentDataset, ScratchGAN, loss_fn_scaled_mse\n'), ((1304, 1321), 'src.models.utils.FragmentDataset', 'FragmentDataset', ([], {}), '()\n', (1319, 1321), False, 'from src.models.utils import FragmentDataset, ScratchGAN, loss_fn_scaled_mse\n'), ((1058, 1074), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (1069, 1074), True, 'import matplotlib.pyplot as plt\n'), ((1135, 1151), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (1146, 1151), True, 'import matplotlib.pyplot as plt\n'), ((1207, 1217), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1215, 1217), True, 'import matplotlib.pyplot as plt\n'), ((502, 514), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (512, 514), True, 'import torch.nn as nn\n'), ((811, 826), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (824, 826), False, 'import torch\n'), ((977, 994), 'numpy.max', 'np.max', (['test_vase'], {}), '(test_vase)\n', (983, 994), True, 'import numpy as np\n'), ((1003, 1020), 'numpy.min', 'np.min', (['test_vase'], {}), '(test_vase)\n', (1009, 1020), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.