code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python
from __future__ import print_function
import rospy
import yaml
import numpy as np #np.dot
import os.path
from math import cos, sin
from sensor_msgs.msg import JointState
from integ_gkd_models.srv import Dynamic_inverse,Dynamic_inverseResponse
path=os.path.dirname(__file__)
with open(os.path.join(path,'RobotParam.yml')) as f :
yaml_dict = yaml.safe_load(f)
l1 = yaml_dict.get("l1")
l2 = yaml_dict.get("l2")
m1 = yaml_dict.get("m1")
m2 = yaml_dict.get("m2")
Iz1 = yaml_dict.get("Iz1")
Iz2 = yaml_dict.get("Iz2")
g = yaml_dict.get("g")
c1 = yaml_dict.get("c1")
c2 = yaml_dict.get("c2")
def handle_Dynamic_inverse(req):
theta = req.input.position
theta_d = req.input.velocity
efforts = req.input.effort
Z1 = m1*c1**2 + m2*(l1**2+c2**2+2*l1*c2*cos(theta[1])) + Iz1 + Iz2
Z2 = m2*(c2**2+l1*c2*cos(theta[1])) + Iz2
Z3 = m2*c2**2 + Iz2
Z4 = m2*c2*g*cos(theta[0]+theta[1])+(m1*c1+m2*l1)*g*cos(theta[0])
Z5 = m2*c2*g*cos(theta[0]+theta[1])
h = -m2*l1*c2*sin(theta[1])
D=[[ Z1 , Z2 ],[ Z2 , Z3 ]]
C=[[h * theta_d[1], h * (theta_d[0]+theta_d[1]) ],[ -h * theta_d[0], 0]]
G=[ Z4 , Z5 ]
output=JointState()
Gamma = np.linalg.inv(D)*(efforts - np.dot(C,theta_d) - G)
output.effort=Gamma
return Dynamic_inverseResponse(output) #????
def Dynamic_inverse_server():
rospy.init_node('Dynamic_inverse_server')
s = rospy.Service('Dynamic', Dynamic, handle_Dynamic_inverse)
print("Dynamic Model Indirect")
rospy.spin()
if __name__ == "__main__":
Dynamic_inverse_server()
| [
"integ_gkd_models.srv.Dynamic_inverseResponse",
"rospy.init_node",
"sensor_msgs.msg.JointState",
"rospy.Service",
"math.cos",
"yaml.safe_load",
"numpy.dot",
"numpy.linalg.inv",
"rospy.spin",
"math.sin"
] | [((367, 384), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (381, 384), False, 'import yaml\n'), ((1142, 1154), 'sensor_msgs.msg.JointState', 'JointState', ([], {}), '()\n', (1152, 1154), False, 'from sensor_msgs.msg import JointState\n'), ((1244, 1275), 'integ_gkd_models.srv.Dynamic_inverseResponse', 'Dynamic_inverseResponse', (['output'], {}), '(output)\n', (1267, 1275), False, 'from integ_gkd_models.srv import Dynamic_inverse, Dynamic_inverseResponse\n'), ((1318, 1359), 'rospy.init_node', 'rospy.init_node', (['"""Dynamic_inverse_server"""'], {}), "('Dynamic_inverse_server')\n", (1333, 1359), False, 'import rospy\n'), ((1368, 1425), 'rospy.Service', 'rospy.Service', (['"""Dynamic"""', 'Dynamic', 'handle_Dynamic_inverse'], {}), "('Dynamic', Dynamic, handle_Dynamic_inverse)\n", (1381, 1425), False, 'import rospy\n'), ((1466, 1478), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1476, 1478), False, 'import rospy\n'), ((956, 980), 'math.cos', 'cos', (['(theta[0] + theta[1])'], {}), '(theta[0] + theta[1])\n', (959, 980), False, 'from math import cos, sin\n'), ((994, 1007), 'math.sin', 'sin', (['theta[1]'], {}), '(theta[1])\n', (997, 1007), False, 'from math import cos, sin\n'), ((1164, 1180), 'numpy.linalg.inv', 'np.linalg.inv', (['D'], {}), '(D)\n', (1177, 1180), True, 'import numpy as np\n'), ((889, 913), 'math.cos', 'cos', (['(theta[0] + theta[1])'], {}), '(theta[0] + theta[1])\n', (892, 913), False, 'from math import cos, sin\n'), ((928, 941), 'math.cos', 'cos', (['theta[0]'], {}), '(theta[0])\n', (931, 941), False, 'from math import cos, sin\n'), ((1192, 1210), 'numpy.dot', 'np.dot', (['C', 'theta_d'], {}), '(C, theta_d)\n', (1198, 1210), True, 'import numpy as np\n'), ((833, 846), 'math.cos', 'cos', (['theta[1]'], {}), '(theta[1])\n', (836, 846), False, 'from math import cos, sin\n'), ((784, 797), 'math.cos', 'cos', (['theta[1]'], {}), '(theta[1])\n', (787, 797), False, 'from math import cos, sin\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 29 18:13:24 2021
@author: tae-jun_yoon
"""
import numpy as np
from scipy.signal import savgol_filter
from scipy.optimize import newton
from PyOECP import References
def ListReferences():
AvailableReferences = dir(References)
for EachReference in AvailableReferences:
if '_' in EachReference and '__' not in EachReference:
print(EachReference)
def ReferenceDetail(ReferenceName):
from pprint import pprint
''' Print out detailed information about the reference spectra.
The Reference_name should be "string".
'''
import matplotlib.pyplot as plt
plt.figure(figsize=(5,5),dpi=250)
frequency = np.array([1e9])
data = eval('References.'+ReferenceName)(frequency)
minimum_frequency = data['minFREQ']
maximum_frequency = data['maxFREQ']
frequency = np.logspace(np.log10(minimum_frequency),np.log10(maximum_frequency),100)
data = eval('References.'+ReferenceName)(frequency)
epsilon = data['epsilon']
plt.semilogx(frequency,np.real(epsilon),'r')
plt.semilogx(frequency,-np.imag(epsilon),'b')
plt.title(ReferenceName)
plt.xlabel('frequency [Hz]')
plt.ylabel('Complex permittivity')
data.pop('epsilon')
data.pop('frequency')
pprint(data)
plt.show()
return None
class Capacitance:
''' Capacitance model based on Marsland & Evans's work. It is also denoted as M&E simple in some literature. '''
def __init__(self,frequency,S11m,S11r1,S11r2,S11r3,
m1='Short',m2=None,m3=None,temperature=25,
Window=None,concentrations=None):
self.frequency = frequency
self.S11m = S11m[:,1] + 1j*S11m[:,2]
self.S11r1 = S11r1[:,1] + 1j*S11r1[:,2]
self.S11r2 = S11r2[:,1] + 1j*S11r2[:,2]
self.S11r3 = S11r3[:,1] + 1j*S11r3[:,2]
self.m1 = m1
self.m2 = m2
self.m3 = m3
self.temperature = temperature
self.Window = Window
self.concentrations = concentrations
def Calculate(self):
if self.concentrations is None:
self.concentrations = -1 * np.ones((4,))
func = getattr(References,self.m2)
eps2 = func(self.frequency,self.temperature,self.concentrations[1])['epsilon']
func = getattr(References,self.m3)
eps3 = func(self.frequency,self.temperature,self.concentrations[2])['epsilon']
# Calculate Gn from four references
d13 = self.S11r1-self.S11r3
d21 = self.S11r2-self.S11r1
d32 = self.S11r3-self.S11r2
# Initial guess (Capacitance model)
dm1 = self.S11m - self.S11r1
dm2 = self.S11m - self.S11r2
dm3 = self.S11m - self.S11r3
epsilon = -(dm2*d13)/(dm1*d32)*eps3 - (dm3*d21)/(dm1*d32)*eps2
if self.Window is not None:
e1 = savgol_filter(np.real(epsilon),self.Window,3)
e2 = savgol_filter(np.imag(epsilon),self.Window,3)
epsilon = e1 + 1j*e2
return epsilon
class Antenna:
''' Antenna model
Since this model is more robust than capacitance model and faster than Komarov model,
this model is recommended to be a default model.
Citation Information
<NAME>., & <NAME>. (1987, August). Dielectric measurements with an open-ended coaxial probe. In IEE Proceedings H (Microwaves, Antennas and Propagation) (Vol. 134, No. 4, pp. 341-349). IET Digital Library.
To use the reference solutions (either binary or pure) conveniently, "concentrations" are always used as basic inputs.
If a function called does not require concentration (e.g., pure liquids), assign -1 for them. This is not required when all reference liquids are pure.
'''
def __init__(self,frequency,S11m,S11r1,S11r2,S11r3,S11r4,
m1='Short',m2=None,m3=None,m4=None,temperature=25,
Window=None,concentrations=None):
self.frequency = frequency
self.S11m = S11m[:,1] + 1j*S11m[:,2]
self.S11r1 = S11r1[:,1] + 1j*S11r1[:,2]
self.S11r2 = S11r2[:,1] + 1j*S11r2[:,2]
self.S11r3 = S11r3[:,1] + 1j*S11r3[:,2]
self.S11r4 = S11r4[:,1] + 1j*S11r4[:,2]
self.m1 = m1
self.m2 = m2
self.m3 = m3
self.m4 = m4
self.temperature = temperature
self.Window = Window
self.concentrations = concentrations
def Mother(self,x,a,b):
return a*x**(5/2) + x + b
def Son(self,x,a,b):
return a*(5/2)*x**(3/2) + 1
def Calculate(self):
if self.concentrations is None:
self.concentrations = -1 * np.ones((4,))
''' We don't need eps1 if we stick to Marsland-Evans model. (Short)
func = getattr(References,self.m1)
eps1 = func(self.temperature,self.concentrations[0])['epsilon']
'''
func = getattr(References,self.m2)
e2 = func(self.frequency,self.temperature,self.concentrations[1])['epsilon']
func = getattr(References,self.m3)
e3 = func(self.frequency,self.temperature,self.concentrations[2])['epsilon']
func = getattr(References,self.m4)
e4 = func(self.frequency,self.temperature,self.concentrations[3])['epsilon']
# Calculate Gn from four references
d13 = -self.S11r1+self.S11r3
d21 = -self.S11r2+self.S11r1
d32 = -self.S11r3+self.S11r2
d41 = -self.S11r4+self.S11r1
d42 = -self.S11r4+self.S11r2
d43 = -self.S11r4+self.S11r3
Gn = -(d41*d32*e4+d42*d13*e3+d43*d21*e2)/(d41*d32*e4**(5/2)+d42*d13*e3**(5/2)+d43*d21*e2**(5/2))
yy2 = e2 + Gn*e2**(5/2)
yy3 = e3 + Gn*e3**(5/2)
# Initial guess (Capacitance model)
dm1 = self.S11m - self.S11r1
dm2 = self.S11m - self.S11r2
dm3 = self.S11m - self.S11r3
em = -(dm2*d13)/(dm1*d32)*e3 - (dm3*d21)/(dm1*d32)*e2
b = dm2*d13/(dm1*d32)*yy3 + dm3*d21/(dm1*d32)*yy2
epsilon = np.copy(em)
for i in range(len(em)):
epsilon[i] = newton(self.Mother, em[i], args=(Gn[i],b[i]), maxiter=10000)
if self.Window is not None:
e1 = savgol_filter(np.real(epsilon),self.Window,3)
e2 = savgol_filter(np.imag(epsilon),self.Window,3)
epsilon = e1 + 1j*e2
return epsilon
| [
"numpy.copy",
"numpy.log10",
"numpy.ones",
"numpy.imag",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"scipy.optimize.newton",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.real",
"matplotlib.pyplot.title",
"pprint.pprint",
"matplotlib.pyplot.show"
] | [((688, 723), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)', 'dpi': '(250)'}), '(figsize=(5, 5), dpi=250)\n', (698, 723), True, 'import matplotlib.pyplot as plt\n'), ((738, 762), 'numpy.array', 'np.array', (['[1000000000.0]'], {}), '([1000000000.0])\n', (746, 762), True, 'import numpy as np\n'), ((1171, 1195), 'matplotlib.pyplot.title', 'plt.title', (['ReferenceName'], {}), '(ReferenceName)\n', (1180, 1195), True, 'import matplotlib.pyplot as plt\n'), ((1200, 1228), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""frequency [Hz]"""'], {}), "('frequency [Hz]')\n", (1210, 1228), True, 'import matplotlib.pyplot as plt\n'), ((1233, 1267), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Complex permittivity"""'], {}), "('Complex permittivity')\n", (1243, 1267), True, 'import matplotlib.pyplot as plt\n'), ((1322, 1334), 'pprint.pprint', 'pprint', (['data'], {}), '(data)\n', (1328, 1334), False, 'from pprint import pprint\n'), ((1339, 1349), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1347, 1349), True, 'import matplotlib.pyplot as plt\n'), ((918, 945), 'numpy.log10', 'np.log10', (['minimum_frequency'], {}), '(minimum_frequency)\n', (926, 945), True, 'import numpy as np\n'), ((946, 973), 'numpy.log10', 'np.log10', (['maximum_frequency'], {}), '(maximum_frequency)\n', (954, 973), True, 'import numpy as np\n'), ((1095, 1111), 'numpy.real', 'np.real', (['epsilon'], {}), '(epsilon)\n', (1102, 1111), True, 'import numpy as np\n'), ((6227, 6238), 'numpy.copy', 'np.copy', (['em'], {}), '(em)\n', (6234, 6238), True, 'import numpy as np\n'), ((1145, 1161), 'numpy.imag', 'np.imag', (['epsilon'], {}), '(epsilon)\n', (1152, 1161), True, 'import numpy as np\n'), ((6297, 6358), 'scipy.optimize.newton', 'newton', (['self.Mother', 'em[i]'], {'args': '(Gn[i], b[i])', 'maxiter': '(10000)'}), '(self.Mother, em[i], args=(Gn[i], b[i]), maxiter=10000)\n', (6303, 6358), False, 'from scipy.optimize import newton\n'), ((2200, 2213), 'numpy.ones', 'np.ones', (['(4,)'], {}), '((4,))\n', (2207, 2213), True, 'import numpy as np\n'), ((3013, 3029), 'numpy.real', 'np.real', (['epsilon'], {}), '(epsilon)\n', (3020, 3029), True, 'import numpy as np\n'), ((3076, 3092), 'numpy.imag', 'np.imag', (['epsilon'], {}), '(epsilon)\n', (3083, 3092), True, 'import numpy as np\n'), ((4804, 4817), 'numpy.ones', 'np.ones', (['(4,)'], {}), '((4,))\n', (4811, 4817), True, 'import numpy as np\n'), ((6446, 6462), 'numpy.real', 'np.real', (['epsilon'], {}), '(epsilon)\n', (6453, 6462), True, 'import numpy as np\n'), ((6509, 6525), 'numpy.imag', 'np.imag', (['epsilon'], {}), '(epsilon)\n', (6516, 6525), True, 'import numpy as np\n')] |
"""
Script for training the TempDPSOM model
Tensorboard instructions:
- from command line run: tensorboard --logdir="logs/{EXPERIMENT_NAME}/train" --port 8011
- go to: http://localhost:8011/
"""
import uuid
import sys
import timeit
from datetime import date
import numpy as np
try:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
except:
import tensorflow as tf
from tqdm import tqdm
import sacred
from sacred.stflow import LogFileWriter
import math
import h5py
from sklearn import metrics
from TempDPSOM_model import TDPSOM
from utils import compute_finance_labels, print_trainable_vars, get_gradients, find_nearest, compute_metrics
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import RobustScaler, StandardScaler, MinMaxScaler
import sklearn
import random
import pickle
ex = sacred.Experiment("hyperopt")
ex.observers.append(sacred.observers.FileStorageObserver("../sacred_runs_finance"))
ex.captured_out_filter = sacred.utils.apply_backspaces_and_linefeeds
@ex.config
def ex_config():
"""Sacred configuration for the experiment.
Params:
input_size (int): Length of the input vector.
num_epochs (int): Number of training epochs.
batch_size (int): Batch size for the training.
latent_dim (int): Dimensionality of the T-DPSOM's latent space.
som_dim (list): Dimensionality of the self-organizing map.
learning_rate (float): Learning rate for the optimization.
alpha (float): Student's t-distribution parameter.
gamma (float): Weight for the KL term of the T-DPSOM clustering loss.
beta (float): Weight for the SOM loss.
kappa (float): Weight for the smoothness loss.
theta (float): Weight for the VAE loss.
eta (float): Weight for the prediction loss.
epochs_pretrain (int): Number of VAE pretraining epochs.
decay_factor (float): Factor for the learning rate decay.
name (string): Name of the experiment.
ex_name (string): Unique name of this particular run.
logdir (path): Directory for the experiment logs.
modelpath (path): Path for the model checkpoints.
validation (bool): If "True" validation set is used for evaluation, otherwise test set is used.
dropout (float): Dropout factor for the feed-forward layers of the VAE.
prior (float): Weight of the regularization term of the ELBO.
val_epochs (bool): If "True" clustering results are saved every 10 epochs on default output files.
more_runs (bool): Indicator whether to run the job once (False) or multiple times (True) outputting mean and
variance.
"""
input_size = 7 # 98
num_epochs = 50
batch_size = 40 # 300
latent_dim = 5 # 50
som_dim = [2, 2] # [16,16]
learning_rate = 0.0001 # 0.001
alpha = 10.
beta = 0.1 # 10.
gamma = 2.5
kappa = 10. # 1.
theta = 1.
eta = 1.
epochs_pretrain = 10 # 50
epochs_finetuning_pred = 10
decay_factor = 0.99
name = ex.get_experiment_info()["name"]
ex_name = "{}_LSTM_{}_{}-{}_{}_{}".format(name, latent_dim, som_dim[0], som_dim[1], str(date.today()),
uuid.uuid4().hex[:5])
logdir = "../logs/{}".format(ex_name)
modelpath = "../models/{}/{}".format(ex_name, ex_name)
validation = False
dropout = 0.5
prior = 0.00001
annealtime = 200
lstm_dim = 20 # 200
val_epochs = False
more_runs = False
save_pretrain = False
use_saved_pretrain = False
benchmark=False # Benchmark train time per epoch and return
train_ratio=1.0 # If changed, use a subset of the training data
vae_nn_dim_1 = 50
vae_nn_dim_2 = 200
# finance TDPSOM params below
finance_data_path = "../data/yf_basic_price_features.p"
N_companies_train = 400
T_finance_data = 144
# TODO: implement rolling window scaling of time-series
scale_fin_data = StandardScaler() # [StandardScaler(), RobustScaler(), MinMaxScaler()]
# scale_fin_data = MinMaxScaler()
hyperparam_sweep_results = "fin_data_results_{}.txt".format(som_dim[0])
@ex.capture
def get_data(validation):
"""Load the saved data and split into training, validation and test set.
Args:
validation (bool): If "True" validation set is used for evaluation, otherwise test set is used.
Yields:
np.array: Training data.
np.array: Val/test data depending on validation value.
np.array: Training labels.
np.array: Val/test data depending on validation value.
np.array: Val/test labels."""
#TO DOWNLOAD THE DATA FIRST
hf = h5py.File('../data/eICU_data.csv', 'r')
#############################################
data_total = np.array(hf.get('x'))
endpoints_total = np.array(hf.get('y'))
hf.close()
data_train, data_val, y_train, endpoints_total_val = train_test_split(data_total[:int(len(data_total) * 0.85)],
endpoints_total[:int(len(data_total) * 0.85)],
test_size=0.20,
random_state=42)
if not validation:
data_val = data_total[int(len(data_total) * 0.85):]
endpoints_total_val = endpoints_total[int(len(data_total) * 0.85):]
return data_train, data_val, y_train, endpoints_total_val
@ex.capture
def get_data_finance(finance_data_path, N_companies_train, T_finance_data, scale_fin_data):
data = pickle.load(open(finance_data_path, 'rb'))
random.seed(42)
train_companies = random.sample(list(data.keys()), N_companies_train)
eval_companies = [x for x in list(data.keys()) if x not in train_companies]
# [16.3.] excluding BIIB for now in order to avoid nan loss
train_companies.remove("BIIB")
train_data, train_labels = [], []
for comp in train_companies:
data_comp, nr_labels = compute_finance_labels(data[comp])
assert not data_comp.isnull().values.any(), "Sanity check for input data."
if scale_fin_data:
train_data_comp = scale_fin_data.fit_transform(data_comp.iloc[-T_finance_data:, :-nr_labels].values)
else:
train_data_comp = data_comp.iloc[-T_finance_data:, :-nr_labels].values
train_data.append(train_data_comp)
train_labels.append(data_comp.iloc[-T_finance_data:, -nr_labels:].values)
train_data = np.stack(train_data)
train_labels = np.stack(train_labels)
eval_data, eval_labels = [], []
for comp in eval_companies:
data_comp, nr_labels = compute_finance_labels(data[comp])
assert not data_comp.isnull().values.any(), "Sanity check for input data."
if scale_fin_data:
eval_data_comp = scale_fin_data.fit_transform(data_comp.iloc[-T_finance_data:, :-nr_labels].values)
else:
eval_data_comp = data_comp.iloc[-T_finance_data:, :-nr_labels].values
eval_data.append(eval_data_comp)
eval_labels.append(data_comp.iloc[-T_finance_data:, -nr_labels:].values)
eval_data = np.stack(eval_data)
eval_labels = np.stack(eval_labels)
return train_data, eval_data, train_labels, eval_labels
def get_normalized_data(data, patientid, mins, scales):
return ((data[data['patientunitstayid'] == patientid] - mins) /
scales).drop(["patientunitstayid", "ts"], axis=1).fillna(0).values
@ex.capture
def get_data_synthetic(N_companies_train, T_finance_data, input_size):
# generate synthetic data
data = np.random.normal(loc=0, scale=2, size=(N_companies_train + 100, T_finance_data, input_size))
labels = np.random.normal(size=(N_companies_train + 100, T_finance_data, 2))
# normalize
data = (data - data.min(axis=0)) / (data.max(axis=0) - data.min(axis=0))
return data[:-100], data[-100:], labels[:-100], labels[-100:]
@ex.capture
def batch_generator(data_train, data_val, endpoints_total_val, batch_size, mode="train"):
"""Generator for the data batches.
Args:
data_train: training set.
data_val: validation/test set.
labels_val: labels of the validation set.
batch_size (int): Batch size for the training.
mode (str): Mode in ['train', 'val', 'test'] that decides which data set the generator
samples from (default: 'train').
Yields:
np.array: Data batch.
np.array: Labels batch.
int: Offset of the batch in dataset.
"""
while True:
if mode == "train":
for i in range(len(data_train) // batch_size):
# if (i + 1) != (len(data_train) // batch_size):
# time_series = data_train[i * batch_size: (i + 1) * batch_size]
# else:
# time_series = data_train[i * batch_size:]
time_series = data_train[i * batch_size: (i + 1) * batch_size]
yield time_series, i
elif mode == "val":
for i in range(len(data_val) // batch_size):
# if (i + 1) != (len(data_val) // batch_size):
# time_series = data_val[i * batch_size: (i + 1) * batch_size]
# time_series_endpoint = endpoints_total_val[i * batch_size: (i + 1) * batch_size]
# else:
# time_series = data_val[i * batch_size:]
# time_series_endpoint = endpoints_total_val[i * batch_size:]
time_series = data_val[i * batch_size: (i + 1) * batch_size]
time_series_endpoint = endpoints_total_val[i * batch_size: (i + 1) * batch_size]
yield time_series, time_series_endpoint, i
else:
raise ValueError("The mode has to be in {train, val}")
@ex.capture
def train_model(model, data_train, data_val, endpoints_total_val, lr_val, prior_val, num_epochs, batch_size, latent_dim,
som_dim, learning_rate, epochs_pretrain, ex_name, logdir, modelpath, val_epochs, save_pretrain,
use_saved_pretrain, benchmark, train_ratio, annealtime, lstm_dim, T_finance_data, epochs_finetuning_pred):
"""Trains the T-DPSOM model.
Params:
model (T-DPSOM): T-DPSOM model to train.
data_train (np.array): Training set.
data_val (np.array): Validation/test set.
endpoints_total_val (np.array): Validation/test labels.
lr_val (tf.Tensor): Placeholder for the learning rate value.
num_epochs (int): Number of training epochs.
batch_size (int): Batch size for the training.
latent_dim (int): Dimensionality of the T-DPSOM's latent space.
som_dim (list): Dimensionality of the self-organizing map.
learning_rate (float): Learning rate for the optimization.
epochs_pretrain (int): Number of VAE pretraining epochs.
ex_name (string): Unique name of this particular run.
logdir (path): Directory for the experiment logs.
modelpath (path): Path for the model checkpoints.
val_epochs (bool): If "True" clustering results are saved every 10 epochs on default output files.
T_finance_data (int): length of financial time series
"""
max_n_step = T_finance_data
epochs = 0
iterations = 0
pretrainpath = "../models/pretrain/LSTM"
len_data_train = len(data_train)
len_data_val = len(data_val)
num_batches = len_data_train // batch_size
train_gen = batch_generator(data_train, data_val, endpoints_total_val, mode="train")
val_gen = batch_generator(data_train, data_val, endpoints_total_val, mode="val")
saver = tf.train.Saver(max_to_keep=5)
summaries = tf.summary.merge_all()
# print trainable variables
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
print_trainable_vars(train_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
test_losses = []
test_losses_mean = []
with LogFileWriter(ex):
train_writer = tf.summary.FileWriter(logdir + "/train", sess.graph)
test_writer = tf.summary.FileWriter(logdir + "/test", sess.graph)
train_step_SOMVAE, train_step_ae, train_step_som, train_step_prob = model.optimize
x = model.inputs
p = model.p
is_training = model.is_training
graph = tf.get_default_graph()
init_1 = graph.get_tensor_by_name("prediction/next_state/init_state:0")
z_e_p = graph.get_tensor_by_name("prediction/next_state/input_lstm:0")
z_e_rec = graph.get_tensor_by_name('reconstruction_e/decoder/z_e:0')
training_dic = {is_training: True, z_e_p: np.zeros((max_n_step * batch_size, latent_dim)),
init_1: np.zeros((2, batch_size, lstm_dim)), z_e_rec: np.zeros((max_n_step * batch_size, latent_dim))}
pbar = tqdm(total=(num_epochs+epochs_pretrain*3) * (num_batches))
print("\n**********Starting job {}********* \n".format(ex_name))
a = np.zeros((batch_size*max_n_step, som_dim[0] * som_dim[1]))
dp = {p: a}
dp.update(training_dic)
if benchmark:
ttime_per_epoch=[]
ttime_ae_per_epoch=[]
ttime_som_per_epoch=[]
ttime_pred_per_epoch=[]
if use_saved_pretrain:
print("\n\nUsing Saved Pretraining...\n")
saver.restore(sess, pretrainpath)
else:
step_ = sess.run(model.global_step)
print("\n\nAutoencoder Pretraining (step: {})...\n".format(step_))
if benchmark:
t_begin_all=timeit.default_timer()
prior = 0
for epoch in range(epochs_pretrain):
if epoch > 10:
prior = min(prior + (1. / annealtime), 1.)
if benchmark:
t_begin=timeit.default_timer()
for i in range(num_batches):
batch_data, ii = next(train_gen)
f_dic = {x: batch_data, lr_val: learning_rate, prior_val: prior}
f_dic.update(dp)
train_step_ae.run(feed_dict=f_dic)
if i % 3 == 0:
batch_val, _, ii = next(val_gen)
f_dic = {x: batch_val}
f_dic.update(dp)
test_loss, summary = sess.run([model.loss_reconstruction_ze, summaries], feed_dict=f_dic)
test_writer.add_summary(summary, tf.train.global_step(sess, model.global_step))
f_dic = {x: batch_data}
f_dic.update(dp)
train_loss, summary = sess.run([model.loss_reconstruction_ze, summaries], feed_dict=f_dic)
train_writer.add_summary(summary, tf.train.global_step(sess, model.global_step))
pbar.set_postfix(epoch=epoch, train_loss=train_loss, test_loss=test_loss, refresh=False)
pbar.update(1)
if benchmark:
t_end=timeit.default_timer()
ttime_ae_per_epoch.append(t_end-t_begin)
if benchmark:
t_end_all=timeit.default_timer()
ttime_ae_pretrain=t_end_all-t_begin_all
step_= sess.run(model.global_step)
print("\n\nSOM initialization (step: {})...\n".format(step_))
if benchmark:
t_begin_all=timeit.default_timer()
for epoch in range(epochs_pretrain//3):
if benchmark:
t_begin=timeit.default_timer()
for i in range(num_batches):
batch_data, ii = next(train_gen)
f_dic = {x: batch_data, lr_val: 0.1}
f_dic.update(dp)
train_step_som.run(feed_dict=f_dic)
if i % 3 == 0:
batch_val, _, ii = next(val_gen)
f_dic = {x: batch_val}
f_dic.update(dp)
test_loss, summary = sess.run([model.loss_a, summaries], feed_dict=f_dic)
test_writer.add_summary(summary, tf.train.global_step(sess, model.global_step))
f_dic = {x: batch_data}
f_dic.update(dp)
train_loss, summary = sess.run([model.loss_a, summaries], feed_dict=f_dic)
train_writer.add_summary(summary, tf.train.global_step(sess, model.global_step))
pbar.set_postfix(epoch=epoch, train_loss=train_loss, test_loss=test_loss, refresh=False)
pbar.update(1)
if benchmark:
t_end=timeit.default_timer()
ttime_som_per_epoch.append(t_end-t_begin)
for epoch in range(epochs_pretrain//3):
if benchmark:
t_begin=timeit.default_timer()
for i in range(num_batches):
batch_data, ii = next(train_gen)
f_dic = {x: batch_data, lr_val: 0.01}
f_dic.update(dp)
train_step_som.run(feed_dict=f_dic)
if i % 3 == 0:
batch_val, _, ii = next(val_gen)
f_dic = {x: batch_val}
f_dic.update(dp)
test_loss, summary = sess.run([model.loss_a, summaries], feed_dict=f_dic)
test_writer.add_summary(summary, tf.train.global_step(sess, model.global_step))
f_dic = {x: batch_data}
f_dic.update(dp)
train_loss, summary = sess.run([model.loss_a, summaries], feed_dict=f_dic)
train_writer.add_summary(summary, tf.train.global_step(sess, model.global_step))
pbar.set_postfix(epoch=epoch, train_loss=train_loss, test_loss=test_loss, refresh=False)
pbar.update(1)
if benchmark:
t_end=timeit.default_timer()
ttime_som_per_epoch.append(t_end-t_begin)
for epoch in range(epochs_pretrain//3):
if benchmark:
t_begin=timeit.default_timer()
for i in range(num_batches):
batch_data, ii = next(train_gen)
f_dic = {x: batch_data, lr_val: 0.01}
f_dic.update(dp)
train_step_som.run(feed_dict=f_dic)
if i % 3 == 0:
batch_val, _, ii = next(val_gen)
f_dic = {x: batch_val}
f_dic.update(dp)
test_loss, summary = sess.run([model.loss_a, summaries], feed_dict=f_dic)
test_writer.add_summary(summary, tf.train.global_step(sess, model.global_step))
f_dic = {x: batch_data}
f_dic.update(dp)
train_loss, summary = sess.run([model.loss_a, summaries], feed_dict=f_dic)
train_writer.add_summary(summary, tf.train.global_step(sess, model.global_step))
pbar.set_postfix(epoch=epoch, train_loss=train_loss, test_loss=test_loss, refresh=False)
pbar.update(1)
if benchmark:
t_end=timeit.default_timer()
ttime_som_per_epoch.append(t_end-t_begin)
if benchmark:
t_end_all=timeit.default_timer()
ttime_som=t_end_all-t_begin_all
if save_pretrain:
saver.save(sess, pretrainpath)
step_ = sess.run(model.global_step)
print("\n\nTraining... (step: {})\n".format(step_))
if benchmark:
t_begin_all=timeit.default_timer()
prior = 0
for epoch in range(num_epochs):
if epoch > 10:
prior= min(prior + (1./ annealtime), 1.)
if benchmark:
t_begin=timeit.default_timer()
epochs += 1
# print("\n", epochs)
f_dic = {x: data_train}
f_dic.update(training_dic)
q = []
for t in range(19):
q.extend(sess.run(model.q, feed_dict={
x: data_train[int(len(data_train) / 20) * t: int(len(data_train) / 20) * (t + 1)]}))
q.extend(sess.run(model.q, feed_dict={x: data_train[int(len(data_train) / 20) * 19:]}))
q = np.array(q)
ppt = model.target_distribution(q)
q = []
f_dic = {x: data_val}
f_dic.update(training_dic)
for t in range(9):
q.extend(sess.run(model.q, feed_dict={
x: data_val[int(len(data_val) / 10) * t: int(len(data_val) / 10) * (t + 1)]}))
q.extend(sess.run(model.q, feed_dict={x: data_val[int(len(data_val) / 10) * 9:]}))
q = np.array(q)
ppv = model.target_distribution(q)
for i in range(num_batches):
iterations += 1
batch_data, ii = next(train_gen)
ftrain = {p: ppt[ii*batch_size*max_n_step: (ii + 1)*batch_size*max_n_step]}
f_dic = {x: batch_data, lr_val: learning_rate, prior_val: prior}
f_dic.update(ftrain)
f_dic.update(training_dic)
train_step_SOMVAE.run(feed_dict=f_dic)
train_step_prob.run(feed_dict=f_dic)
batch_val, _, ii = next(val_gen)
fval = {p: ppv[ii * batch_size*max_n_step: (ii + 1)*batch_size*max_n_step]}
f_dic = {x: batch_val}
f_dic.update(fval)
f_dic.update(training_dic)
test_loss, summary = sess.run([model.loss, summaries], feed_dict=f_dic)
test_losses.append(test_loss)
if i % 3 == 0:
test_writer.add_summary(summary, tf.train.global_step(sess, model.global_step))
f_dic = {x: batch_data}
f_dic.update(ftrain)
f_dic.update(training_dic)
train_loss, summary = sess.run([model.loss, summaries], feed_dict=f_dic)
if math.isnan(train_loss):
return None
train_writer.add_summary(summary, tf.train.global_step(sess, model.global_step))
if i % 1000 == 0:
test_loss_mean = np.mean(test_losses)
test_losses_mean.append(test_loss_mean)
test_losses = []
if len(test_losses_mean) > 0:
test_s = test_losses_mean[-1]
else:
test_s = test_losses_mean
pbar.set_postfix(epoch=epoch, train_loss=train_loss, test_loss=test_s, refresh=False)
pbar.update(1)
if val_epochs==True and epoch % 5 == 0:
path = "../models/exp/exp"+ str(epoch)+"/LSTM"
saver.save(sess, path)
#results = evaluate_model(model, x, val_gen, len_data_val, modelpath, epochs)
if benchmark:
t_end=timeit.default_timer()
ttime_per_epoch.append(t_end-t_begin)
if benchmark:
t_end_all=timeit.default_timer()
ttime_training=t_end_all-t_begin_all
step_ = sess.run(model.global_step)
print("\n\nPrediction Finetuning... (step: {})\n".format(step_))
if benchmark:
t_begin_all=timeit.default_timer()
for epoch in range(epochs_finetuning_pred):
if benchmark:
t_begin=timeit.default_timer()
for i in range(num_batches):
batch_data, ii = next(train_gen)
f_dic = {x: batch_data, lr_val: learning_rate, prior_val: prior}
f_dic.update(dp)
train_step_prob.run(feed_dict=f_dic)
if i % 3 == 0:
batch_val, _, ii = next(val_gen)
f_dic = {x: batch_val}
f_dic.update(dp)
test_loss, summary = sess.run([model.loss_prediction, summaries], feed_dict=f_dic)
test_writer.add_summary(summary, tf.train.global_step(sess, model.global_step))
f_dic = {x: batch_data}
f_dic.update(dp)
train_loss, summary = sess.run([model.loss_prediction, summaries], feed_dict=f_dic)
train_writer.add_summary(summary, tf.train.global_step(sess, model.global_step))
pbar.set_postfix(epoch=epoch, train_loss=train_loss, test_loss=test_loss, refresh=False)
pbar.update(1)
if benchmark:
t_end=timeit.default_timer()
ttime_pred_per_epoch.append(t_end-t_begin)
if benchmark:
t_end_all=timeit.default_timer()
ttime_pred=t_end_all-t_begin_all
saver.save(sess, modelpath)
# results = evaluate_model(model, x, val_gen, len_data_val, modelpath, epochs)
pbar.close()
if benchmark:
print("\nNumber of time series in train: {} %, {}".format(train_ratio, len(data_train)))
print("SOM init time: {:.3f}".format(ttime_som))
print("SOM init time per epoch: {:.3f}".format(np.mean(ttime_som_per_epoch)))
print("AE pretrain time: {:.3f}".format(ttime_ae_pretrain))
print("AE pretrain time per epoch: {:.3f}".format(np.mean(ttime_ae_per_epoch)))
print("Training time: {:.3f}".format(ttime_training))
print("Training time per epoch: {:.3f}".format(np.mean(ttime_per_epoch)))
print("Pred finetuning time: {:.3f}".format(ttime_pred))
print("Pred finetuning time per epoch: {:.3f}".format(np.mean(ttime_pred_per_epoch)))
sys.exit(0)
# return results
@ex.capture
def evaluate_model(model, x, val_gen, len_data_val, modelpath, epochs, batch_size, som_dim, learning_rate, alpha, gamma,
beta , theta, epochs_pretrain, ex_name, kappa, dropout, prior, latent_dim, eta, lstm_dim, T_finance_data):
"""Evaluates the performance of the trained model in terms of normalized
mutual information adjusted mutual information score and purity.
Args:
model (T-DPSOM): Trained T-DPSOM model to evaluate.
x (tf.Tensor): Input tensor or placeholder.
val_gen (generator): Val/Test generator for the batches.
len_data_val (int): Length of validation set.
modelpath (path): Path from which to restore the model.
epochs (int): number of epochs of training.
batch_size (int): Batch size for the training.
som_dim (list): Dimensionality of the self-organizing map.
learning_rate (float): Learning rate for the optimization.
alpha (float): Student's t-distribution parameter.
gamma (float): Weight for the KL term of the PSOM clustering loss.
beta (float): Weight for the SOM loss.
theta (float): Weight for the VAE loss.
epochs_pretrain (int): Number of VAE pretraining epochs.
ex_name (string): Unique name of this particular run.
kappa (float): Weight for the smoothness loss.
dropout (float): Dropout factor for the feed-forward layers of the VAE.
prior (float): Weight of the regularization term of the ELBO.
latent_dim (int): Dimensionality of the T-DPSOM's latent space.
eta (float): Weight for the prediction loss.
Returns:
dict: Dictionary of evaluation results (NMI, AMI, Purity).
"""
max_n_step = T_finance_data # length of the time-series
saver = tf.train.Saver(keep_checkpoint_every_n_hours=2.)
num_batches = len_data_val // batch_size
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, modelpath)
is_training = model.is_training
graph = tf.get_default_graph()
init_1 = graph.get_tensor_by_name("prediction/next_state/init_state:0")
z_e_p = graph.get_tensor_by_name("prediction/next_state/input_lstm:0")
z_e_rec = graph.get_tensor_by_name('reconstruction_e/decoder/z_e:0')
training_dic = {is_training: True, z_e_p: np.zeros((max_n_step * batch_size, latent_dim)),
init_1: np.zeros((2, batch_size, lstm_dim)), z_e_rec: np.zeros((max_n_step * batch_size, latent_dim))}
test_k_all = []
labels_val_all = []
z_q_all = []
z_e_all = []
print("Evaluation...")
for i in range(num_batches):
batch_data, batch_labels, ii = next(val_gen)
f_dic = {x: batch_data}
f_dic.update(training_dic)
test_k_all.extend(sess.run(model.k, feed_dict=f_dic))
labels_val_all.extend(batch_labels)
z_q_all.extend(sess.run(model.z_q, feed_dict=f_dic))
z_e_all.extend(sess.run(model.z_e_sample, feed_dict=f_dic))
labels_val_all = np.array(labels_val_all)
test_k_all = np.array(test_k_all)
labels_val_all = np.reshape(labels_val_all, (-1, labels_val_all.shape[-1]))
# print("Mean: {:.3f}, Std: {:.3f}".format(np.mean(labels_val_all[:,3]), np.std(labels_val_all[:,3])))
# NMI_24 = metrics.normalized_mutual_info_score(labels_val_all[:, 3], test_k_all, average_method='geometric')
NMI_12 = metrics.normalized_mutual_info_score(labels_val_all[:, 2], test_k_all, average_method='geometric')
NMI_6 = metrics.normalized_mutual_info_score(labels_val_all[:, 1], test_k_all, average_method='geometric')
NMI_1 = metrics.normalized_mutual_info_score(labels_val_all[:, 0], test_k_all, average_method='geometric')
AMI_1 = metrics.adjusted_mutual_info_score(test_k_all, labels_val_all[:, 0])
mean = np.sum(labels_val_all[:, 0]) / len(labels_val_all[:, 0])
ones = np.ones((len(np.reshape(test_k_all, (-1)))))
clust_matr1 = np.zeros(som_dim[0] * som_dim[1])
labels = labels_val_all[:, 0]
for i in range(som_dim[0] * som_dim[1]):
dd = np.sum(ones[np.where(np.reshape(test_k_all, (-1)) == i)])
if dd == 0:
s1 = 0
else:
s1 = np.sum(labels[np.where(np.reshape(test_k_all, (-1)) == i)]) / np.sum(
ones[np.where(np.reshape(test_k_all, (-1)) == i)])
clust_matr1[i] = s1
sd = som_dim[0]*som_dim[1]
k = np.arange(0, sd)
k1 = k // som_dim[0]
k2 = k % som_dim[1]
W = np.zeros((sd, sd))
for i in range(sd):
for j in range(sd):
d1 = np.abs((k1[i] - k1[j]))
d2 = np.abs((k2[i] - k2[j]))
d1 = min(som_dim[0] - d1, d1)
d2 = min(som_dim[1] - d2, d2)
W[i, j] = np.exp(-(d1 + d2))
M = 0
N_n = 0
for i in range(sd):
for j in range(sd):
M += (clust_matr1[i] - mean) * (clust_matr1[j] - mean) * W[i, j]
for i in range(sd):
N_n += (clust_matr1[i] - mean) ** 2
W_n = np.sum(W)
I = M * sd / (N_n * W_n)
results = {}
# results["NMI_24"] = NMI_24
results["NMI_12"] = NMI_12
results["NMI_6"] = NMI_6
results["NMI_1"] = NMI_1
results["AMI_1"] = AMI_1
results["MI"] = I
f = open("results_eICU.txt", "a+")
f.write("Epochs= %d, som_dim=[%d,%d], latent_dim= %d, batch_size= %d, learning_rate= %f, "
"theta= %f, eta= %f, beta= %f, alpha=%f, gamma=%f, epochs_pretrain=%d, dropout= %f, prior= %f"
% (epochs, som_dim[0], som_dim[1], latent_dim, batch_size, learning_rate, theta, eta, beta,
alpha, gamma, epochs_pretrain, dropout, prior))
f.write(", kappa= %f, NMI12: %f, NMI6: %f, NMI1: %f, AMI1: %f, I: %f.Name: %r \n"
% (kappa, results["NMI_12"], results["NMI_6"], results["NMI_1"], results["AMI_1"],
results["MI"], ex_name))
f.close()
return results
@ex.capture
def z_dist_flat(z_e, embeddings, som_dim, latent_dim):
"""Computes the distances between the encodings and the embeddings."""
emb = np.reshape(embeddings, (som_dim[0]*som_dim[1], -1))
z = np.reshape(z_e, (z_e.shape[0], 1, latent_dim))
z = np.tile(z, [1,som_dim[0]*som_dim[1], 1])
z_dist = np.square(z-emb)
z_dist_red = np.sum(z_dist, axis=-1)
return z_dist_red
@ex.automain
def main(input_size, latent_dim, som_dim, learning_rate, decay_factor, alpha, beta, gamma, theta, ex_name, kappa, prior,
more_runs, dropout, eta, epochs_pretrain, batch_size, num_epochs, train_ratio, annealtime, modelpath, lstm_dim,
T_finance_data, vae_nn_dim_1, vae_nn_dim_2, scale_fin_data, epochs_finetuning_pred, hyperparam_sweep_results):
input_channels = input_size
lr_val = tf.placeholder_with_default(learning_rate, [])
prior_val = tf.placeholder_with_default(prior, [])
model = TDPSOM(input_size=input_size, latent_dim=latent_dim, som_dim=som_dim, learning_rate=lr_val,
decay_factor=decay_factor, dropout=dropout, input_channels=input_channels, alpha=alpha, beta=beta,
eta=eta, kappa=kappa, theta=theta, gamma=gamma, prior=prior, lstm_dim=lstm_dim,
vae_nn_dim_1=vae_nn_dim_1, vae_nn_dim_2=vae_nn_dim_2)
# data_train, data_val, _, endpoints_total_val = get_data()
data_train, data_val, _, endpoints_total_val = get_data_finance()
# data_train, data_val, _, endpoints_total_val = get_data_synthetic()
if train_ratio<1.0:
data_train=data_train[:int(len(data_train)*train_ratio)]
# results = train_model(model, data_train, data_val, endpoints_total_val, lr_val, prior_val)
train_model(model, data_train, data_val, endpoints_total_val, lr_val, prior_val)
#################################################################################################################################################
tf.reset_default_graph()
val_gen = batch_generator(data_train, data_val, endpoints_total_val, mode="val")
train_gen = batch_generator(data_train, data_val, endpoints_total_val, mode="train")
num_batches = len(data_val) // batch_size
num_batches_train = len(data_train) // batch_size
num_pred = 6
som = som_dim[0] * som_dim[1]
max_n_step = T_finance_data # length of the time-series
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.import_meta_graph(modelpath + ".meta")
saver.restore(sess, modelpath)
graph = tf.get_default_graph()
k = graph.get_tensor_by_name("k/k:0")
z_e = graph.get_tensor_by_name("z_e_sample/z_e:0")
next_z_e = graph.get_tensor_by_name("prediction/next_z_e:0")
x = graph.get_tensor_by_name("inputs/x:0")
is_training = graph.get_tensor_by_name("is_training/is_training:0")
graph = tf.get_default_graph()
init_1 = graph.get_tensor_by_name("prediction/next_state/init_state:0")
z_e_p = graph.get_tensor_by_name("prediction/next_state/input_lstm:0")
state1 = graph.get_tensor_by_name("prediction/next_state/next_state:0")
q = graph.get_tensor_by_name("q/distribution/q:0")
embeddings = graph.get_tensor_by_name("embeddings/embeddings:0")
z_p = graph.get_tensor_by_name('reconstruction_e/decoder/z_e:0')
reconstruction = graph.get_tensor_by_name("reconstruction_e/x_hat:0")
z_dist_flat = graph.get_tensor_by_name("k/z_dist_flat/z_dist_flat:0")
print("Evaluation...")
training_dic = {is_training: True, z_e_p: np.zeros((max_n_step * len(data_val), latent_dim)),
init_1: np.zeros((2, batch_size, lstm_dim)),
z_p: np.zeros((max_n_step * len(data_val), latent_dim))}
save_dict = {}
# ============== save eval clusters/recons/preds ===========================
k_all = []
z_e_all = []
z_q_all = []
qq = []
x_rec = []
z_dist_flat_all = []
x_hat_all = []
for i in range(num_batches):
batch_data, batch_labels, ii = next(val_gen)
f_dic = {x: batch_data}
k_all.extend(sess.run(k, feed_dict=f_dic))
z_q_all.extend(sess.run(q, feed_dict=f_dic))
z_e_all.extend(sess.run(z_e, feed_dict=f_dic))
z_dist_flat_all.extend(sess.run(z_dist_flat, feed_dict=f_dic))
qq.extend(sess.run(q, feed_dict=f_dic))
f_dic.update(training_dic)
assert f_dic[is_training] is True
x_rec.extend(sess.run(reconstruction, feed_dict=f_dic))
# predictions
next_z_e_ = sess.run(next_z_e, feed_dict=f_dic)
f_dic.update({is_training: False, z_p: np.reshape(next_z_e_, (-1, latent_dim))})
x_hat_all.extend(sess.run(reconstruction, feed_dict=f_dic))
z_e_all = np.array(z_e_all)
k_all = np.array(k_all)
qq = np.array(qq)
x_rec = np.array(x_rec)
z_e_all = z_e_all.reshape((-1, max_n_step, latent_dim))
z_dist_flat_all = np.array(z_dist_flat_all)
x_hat_all = np.array(x_hat_all)
# k_all = k_all.reshape((-1, max_n_step))
save_dict["x_rec_eval"] = x_rec
save_dict["k_eval"] = k_all
save_dict["k_dist_eval"] = z_dist_flat_all
save_dict["x_preds_eval"] = x_hat_all
# =============================================================================
# ============== save train clusters/recons/preds ===========================
k_all_train = []
z_e_all_train = []
z_q_all_train = []
qq_train = []
x_rec_train = []
z_dist_flat_all_train = []
x_hat_all_train = []
for i in range(num_batches_train):
batch_data, ii = next(train_gen)
f_dic = {x: batch_data}
k_all_train.extend(sess.run(k, feed_dict=f_dic))
z_q_all_train.extend(sess.run(q, feed_dict=f_dic))
z_e_all_train.extend(sess.run(z_e, feed_dict=f_dic))
z_dist_flat_all_train.extend(sess.run(z_dist_flat, feed_dict=f_dic))
qq_train.extend(sess.run(q, feed_dict=f_dic))
f_dic.update(training_dic)
assert f_dic[is_training] is True
x_rec_train.extend(sess.run(reconstruction, feed_dict=f_dic))
# predictions
next_z_e_ = sess.run(next_z_e, feed_dict=f_dic)
f_dic.update({is_training: False, z_p: np.reshape(next_z_e_, (-1, latent_dim))})
x_hat_all_train.extend(sess.run(reconstruction, feed_dict=f_dic))
z_e_all_train = np.array(z_e_all_train)
k_all_train = np.array(k_all_train)
qq_train = np.array(qq_train)
x_rec_train = np.array(x_rec_train)
z_e_all_train = z_e_all_train.reshape((-1, max_n_step, latent_dim))
z_dist_flat_all_train = np.array(z_dist_flat_all_train)
# k_all_train = k_all_train.reshape((-1, max_n_step))
x_hat_all_train = np.array(x_hat_all_train)
save_dict["x_rec_train"] = x_rec_train
save_dict["k_train"] = k_all_train
save_dict["k_dist_train"] = z_dist_flat_all_train
save_dict["x_preds_train"] = x_hat_all_train
results_dict = compute_metrics(data_train, data_val, save_dict, T=T_finance_data, som_grid=som_dim)
f = open(hyperparam_sweep_results, "a+")
f.write("Epochs= %d, som_dim=[%d,%d], latent_dim= %d, batch_size= %d, learning_rate= %f, "
"theta= %f, eta= %f, beta= %f, alpha=%f, gamma=%f, epochs_pretrain=%d, dropout= %f, prior= %f, kapa= %f,"
"vae_dim_1=%f, vae_dim_2=%f, lstm_dim=%f, T=%f, epochs_finetuning_pred=%f, "
% (num_epochs, som_dim[0], som_dim[1], latent_dim, batch_size, learning_rate, theta, eta, beta,
alpha, gamma, epochs_pretrain, dropout, prior, kappa, vae_nn_dim_1, vae_nn_dim_2, lstm_dim,
T_finance_data, epochs_finetuning_pred))
f.write("scale_fin_data={}, results={}, Name={} \n".format(str(scale_fin_data), str(results_dict), ex_name))
# save recons/preds/clusters
with open('../logs/{}/output.p'.format(ex_name), 'wb') as file:
pickle.dump(save_dict, file)
# =============================================================================
# t = max_n_step - num_pred
#
# embeddings = sess.run(embeddings, feed_dict={x: data_val[:, :t, :]})
# embeddings = np.reshape(embeddings, (-1, latent_dim))
#
# z_e_o = z_e_all[:, :t, :]
# k_o = k_all[:, :t]
# k_eval = []
# next_z_e_o = []
# state1_o = []
# for i in range(num_batches):
# batch_data, batch_labels, ii = next(val_gen)
# batch_data = batch_data[:, :t, :]
# f_dic = {x: batch_data}
# f_dic.update(training_dic)
# next_z_e_o.extend(sess.run(next_z_e, feed_dict=f_dic))
# if i == 0:
# state1_o = sess.run(state1, feed_dict=f_dic)
# else:
# state1_o = np.concatenate([state1_o, sess.run(state1, feed_dict=f_dic)], axis=1)
# next_z_e_o = np.array(next_z_e_o)
# state1_o = np.array(state1_o)
#
# next_z_e_o_all = np.reshape(next_z_e_o[:, -1, :], (-1, 1, latent_dim))
# next_z_e_o = next_z_e_o[:, -1, :]
# k_next = np.argmin(z_dist_flat(next_z_e_o, embeddings), axis=-1)
# k_o = np.concatenate([k_o, np.expand_dims(k_next, 1)], axis=1)
# z_e_o = np.concatenate([z_e_o, np.expand_dims(next_z_e_o, 1)], axis=1)
# f_dic = {x: np.zeros((len(data_val), 1, input_size)), is_training: False,
# z_e_p: np.zeros((1 * len(data_val), latent_dim)),
# z_p: next_z_e_o, init_1: np.zeros((2, batch_size, lstm_dim))}
# x_pred_hat = np.reshape(sess.run(reconstruction, feed_dict=f_dic), (-1, 1, input_size))
#
# n_val = len(data_val)
# for i in range(num_pred - 1):
# print(i)
# inp = data_val[:n_val, (t + i), :]
# f_dic = {x: np.reshape(inp, (inp.shape[0], 1, inp.shape[1]))}
# val_dic = {is_training: False, z_e_p: next_z_e_o, init_1: state1_o,
# z_p: np.zeros((max_n_step * len(inp), latent_dim))}
# f_dic.update(val_dic)
# next_z_e_o = sess.run(next_z_e, feed_dict=f_dic)
# state1_o = sess.run(state1, feed_dict=f_dic)
# next_z_e_o_all = np.concatenate([next_z_e_o_all, next_z_e_o], axis=1)
# k_next = np.argmin(z_dist_flat(next_z_e_o, embeddings), axis=-1)
# k_o = np.concatenate([k_o, np.expand_dims(k_next, 1)], axis=1)
# z_e_o = np.concatenate([z_e_o, next_z_e_o], axis=1)
# next_z_e_o = np.reshape(next_z_e_o, (-1, latent_dim))
# f_dic = {x: np.zeros((len(data_val), 1, input_size)), is_training: False,
# z_e_p: np.zeros((max_n_step * len(data_val), latent_dim)),
# z_p: next_z_e_o, init_1: np.zeros((2, batch_size, lstm_dim))}
# final_x = sess.run(reconstruction, feed_dict=f_dic)
# x_pred_hat = np.concatenate([x_pred_hat, np.reshape(final_x, (-1, 1, input_size))], axis=1)
#
# f_dic = {x: np.zeros((n_val, 1, input_size)), is_training: False, z_e_p: np.zeros((max_n_step * n_val, latent_dim)),
# z_p: z_e_all[:, t - 1, :], init_1: np.zeros((2, batch_size, lstm_dim))}
# final_x = sess.run(reconstruction, feed_dict=f_dic)
#
# pred_ze = sklearn.metrics.mean_squared_error(np.reshape(next_z_e_o_all[:, :], (-1, latent_dim)),
# np.reshape(z_e_all[:, -num_pred:], (-1, latent_dim)))
# pred_rec = sklearn.metrics.mean_squared_error(np.reshape(x_rec, (-1, input_size)),
# np.reshape(data_val[:n_val, :], (-1, input_size)))
# pred_xhat = sklearn.metrics.mean_squared_error(np.reshape(x_pred_hat, (-1, input_size)),
# np.reshape(data_val[:n_val, -num_pred:], (-1, input_size)))
#
# f = open("results_eICU_pred.txt", "a+")
# f.write("Epochs= %d, som_dim=[%d,%d], latent_dim= %d, batch_size= %d, learning_rate= %f, "
# "theta= %f, eta= %f, beta= %f, alpha=%f, gamma=%f, epochs_pretrain=%d, dropout= %f, annealtime= %d, "
# % (num_epochs, som_dim[0], som_dim[1], latent_dim, batch_size, learning_rate, theta, eta, beta,
# alpha, gamma, epochs_pretrain, dropout, annealtime))
# f.write(", kappa= %f, pred_ze: %f, pred_rec: %f, pred_xhat: %f.Name: %r \n"
# % (kappa, pred_ze, pred_rec, pred_xhat, ex_name))
# f.close()
#################################################################################################################################################
| [
"TempDPSOM_model.TDPSOM",
"numpy.array",
"sacred.stflow.LogFileWriter",
"sys.exit",
"sklearn.metrics.normalized_mutual_info_score",
"numpy.arange",
"numpy.mean",
"numpy.reshape",
"tensorflow.Session",
"math.isnan",
"numpy.exp",
"numpy.stack",
"utils.print_trainable_vars",
"tensorflow.get_d... | [((844, 873), 'sacred.Experiment', 'sacred.Experiment', (['"""hyperopt"""'], {}), "('hyperopt')\n", (861, 873), False, 'import sacred\n'), ((330, 354), 'tensorflow.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (352, 354), True, 'import tensorflow as tf\n'), ((894, 956), 'sacred.observers.FileStorageObserver', 'sacred.observers.FileStorageObserver', (['"""../sacred_runs_finance"""'], {}), "('../sacred_runs_finance')\n", (930, 956), False, 'import sacred\n'), ((4104, 4120), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4118, 4120), False, 'from sklearn.preprocessing import RobustScaler, StandardScaler, MinMaxScaler\n'), ((4839, 4878), 'h5py.File', 'h5py.File', (['"""../data/eICU_data.csv"""', '"""r"""'], {}), "('../data/eICU_data.csv', 'r')\n", (4848, 4878), False, 'import h5py\n'), ((5832, 5847), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (5843, 5847), False, 'import random\n'), ((6706, 6726), 'numpy.stack', 'np.stack', (['train_data'], {}), '(train_data)\n', (6714, 6726), True, 'import numpy as np\n'), ((6746, 6768), 'numpy.stack', 'np.stack', (['train_labels'], {}), '(train_labels)\n', (6754, 6768), True, 'import numpy as np\n'), ((7362, 7381), 'numpy.stack', 'np.stack', (['eval_data'], {}), '(eval_data)\n', (7370, 7381), True, 'import numpy as np\n'), ((7400, 7421), 'numpy.stack', 'np.stack', (['eval_labels'], {}), '(eval_labels)\n', (7408, 7421), True, 'import numpy as np\n'), ((7815, 7911), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(2)', 'size': '(N_companies_train + 100, T_finance_data, input_size)'}), '(loc=0, scale=2, size=(N_companies_train + 100,\n T_finance_data, input_size))\n', (7831, 7911), True, 'import numpy as np\n'), ((7921, 7988), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N_companies_train + 100, T_finance_data, 2)'}), '(size=(N_companies_train + 100, T_finance_data, 2))\n', (7937, 7988), True, 'import numpy as np\n'), ((11984, 12013), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(5)'}), '(max_to_keep=5)\n', (11998, 12013), True, 'import tensorflow as tf\n'), ((12030, 12052), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (12050, 12052), True, 'import tensorflow as tf\n'), ((12103, 12154), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {}), '(tf.GraphKeys.TRAINABLE_VARIABLES)\n', (12120, 12154), True, 'import tensorflow as tf\n'), ((12159, 12191), 'utils.print_trainable_vars', 'print_trainable_vars', (['train_vars'], {}), '(train_vars)\n', (12179, 12191), False, 'from utils import compute_finance_labels, print_trainable_vars, get_gradients, find_nearest, compute_metrics\n'), ((28294, 28343), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'keep_checkpoint_every_n_hours': '(2.0)'}), '(keep_checkpoint_every_n_hours=2.0)\n', (28308, 28343), True, 'import tensorflow as tf\n'), ((32809, 32862), 'numpy.reshape', 'np.reshape', (['embeddings', '(som_dim[0] * som_dim[1], -1)'], {}), '(embeddings, (som_dim[0] * som_dim[1], -1))\n', (32819, 32862), True, 'import numpy as np\n'), ((32869, 32915), 'numpy.reshape', 'np.reshape', (['z_e', '(z_e.shape[0], 1, latent_dim)'], {}), '(z_e, (z_e.shape[0], 1, latent_dim))\n', (32879, 32915), True, 'import numpy as np\n'), ((32924, 32967), 'numpy.tile', 'np.tile', (['z', '[1, som_dim[0] * som_dim[1], 1]'], {}), '(z, [1, som_dim[0] * som_dim[1], 1])\n', (32931, 32967), True, 'import numpy as np\n'), ((32978, 32996), 'numpy.square', 'np.square', (['(z - emb)'], {}), '(z - emb)\n', (32987, 32996), True, 'import numpy as np\n'), ((33012, 33035), 'numpy.sum', 'np.sum', (['z_dist'], {'axis': '(-1)'}), '(z_dist, axis=-1)\n', (33018, 33035), True, 'import numpy as np\n'), ((33482, 33528), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['learning_rate', '[]'], {}), '(learning_rate, [])\n', (33509, 33528), True, 'import tensorflow as tf\n'), ((33545, 33583), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['prior', '[]'], {}), '(prior, [])\n', (33572, 33583), True, 'import tensorflow as tf\n'), ((33597, 33938), 'TempDPSOM_model.TDPSOM', 'TDPSOM', ([], {'input_size': 'input_size', 'latent_dim': 'latent_dim', 'som_dim': 'som_dim', 'learning_rate': 'lr_val', 'decay_factor': 'decay_factor', 'dropout': 'dropout', 'input_channels': 'input_channels', 'alpha': 'alpha', 'beta': 'beta', 'eta': 'eta', 'kappa': 'kappa', 'theta': 'theta', 'gamma': 'gamma', 'prior': 'prior', 'lstm_dim': 'lstm_dim', 'vae_nn_dim_1': 'vae_nn_dim_1', 'vae_nn_dim_2': 'vae_nn_dim_2'}), '(input_size=input_size, latent_dim=latent_dim, som_dim=som_dim,\n learning_rate=lr_val, decay_factor=decay_factor, dropout=dropout,\n input_channels=input_channels, alpha=alpha, beta=beta, eta=eta, kappa=\n kappa, theta=theta, gamma=gamma, prior=prior, lstm_dim=lstm_dim,\n vae_nn_dim_1=vae_nn_dim_1, vae_nn_dim_2=vae_nn_dim_2)\n', (33603, 33938), False, 'from TempDPSOM_model import TDPSOM\n'), ((34613, 34637), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (34635, 34637), True, 'import tensorflow as tf\n'), ((6206, 6240), 'utils.compute_finance_labels', 'compute_finance_labels', (['data[comp]'], {}), '(data[comp])\n', (6228, 6240), False, 'from utils import compute_finance_labels, print_trainable_vars, get_gradients, find_nearest, compute_metrics\n'), ((6869, 6903), 'utils.compute_finance_labels', 'compute_finance_labels', (['data[comp]'], {}), '(data[comp])\n', (6891, 6903), False, 'from utils import compute_finance_labels, print_trainable_vars, get_gradients, find_nearest, compute_metrics\n'), ((12202, 12214), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (12212, 12214), True, 'import tensorflow as tf\n'), ((12713, 12735), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (12733, 12735), True, 'import tensorflow as tf\n'), ((13214, 13274), 'tqdm.tqdm', 'tqdm', ([], {'total': '((num_epochs + epochs_pretrain * 3) * num_batches)'}), '(total=(num_epochs + epochs_pretrain * 3) * num_batches)\n', (13218, 13274), False, 'from tqdm import tqdm\n'), ((13359, 13419), 'numpy.zeros', 'np.zeros', (['(batch_size * max_n_step, som_dim[0] * som_dim[1])'], {}), '((batch_size * max_n_step, som_dim[0] * som_dim[1]))\n', (13367, 13419), True, 'import numpy as np\n'), ((28398, 28410), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (28408, 28410), True, 'import tensorflow as tf\n'), ((28568, 28590), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (28588, 28590), True, 'import tensorflow as tf\n'), ((29626, 29650), 'numpy.array', 'np.array', (['labels_val_all'], {}), '(labels_val_all)\n', (29634, 29650), True, 'import numpy as np\n'), ((29672, 29692), 'numpy.array', 'np.array', (['test_k_all'], {}), '(test_k_all)\n', (29680, 29692), True, 'import numpy as np\n'), ((29718, 29776), 'numpy.reshape', 'np.reshape', (['labels_val_all', '(-1, labels_val_all.shape[-1])'], {}), '(labels_val_all, (-1, labels_val_all.shape[-1]))\n', (29728, 29776), True, 'import numpy as np\n'), ((30023, 30125), 'sklearn.metrics.normalized_mutual_info_score', 'metrics.normalized_mutual_info_score', (['labels_val_all[:, 2]', 'test_k_all'], {'average_method': '"""geometric"""'}), "(labels_val_all[:, 2], test_k_all,\n average_method='geometric')\n", (30059, 30125), False, 'from sklearn import metrics\n'), ((30138, 30240), 'sklearn.metrics.normalized_mutual_info_score', 'metrics.normalized_mutual_info_score', (['labels_val_all[:, 1]', 'test_k_all'], {'average_method': '"""geometric"""'}), "(labels_val_all[:, 1], test_k_all,\n average_method='geometric')\n", (30174, 30240), False, 'from sklearn import metrics\n'), ((30253, 30355), 'sklearn.metrics.normalized_mutual_info_score', 'metrics.normalized_mutual_info_score', (['labels_val_all[:, 0]', 'test_k_all'], {'average_method': '"""geometric"""'}), "(labels_val_all[:, 0], test_k_all,\n average_method='geometric')\n", (30289, 30355), False, 'from sklearn import metrics\n'), ((30368, 30436), 'sklearn.metrics.adjusted_mutual_info_score', 'metrics.adjusted_mutual_info_score', (['test_k_all', 'labels_val_all[:, 0]'], {}), '(test_k_all, labels_val_all[:, 0])\n', (30402, 30436), False, 'from sklearn import metrics\n'), ((30592, 30625), 'numpy.zeros', 'np.zeros', (['(som_dim[0] * som_dim[1])'], {}), '(som_dim[0] * som_dim[1])\n', (30600, 30625), True, 'import numpy as np\n'), ((31095, 31111), 'numpy.arange', 'np.arange', (['(0)', 'sd'], {}), '(0, sd)\n', (31104, 31111), True, 'import numpy as np\n'), ((31181, 31199), 'numpy.zeros', 'np.zeros', (['(sd, sd)'], {}), '((sd, sd))\n', (31189, 31199), True, 'import numpy as np\n'), ((31749, 31758), 'numpy.sum', 'np.sum', (['W'], {}), '(W)\n', (31755, 31758), True, 'import numpy as np\n'), ((35032, 35044), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (35042, 35044), True, 'import tensorflow as tf\n'), ((35122, 35169), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (["(modelpath + '.meta')"], {}), "(modelpath + '.meta')\n", (35148, 35169), True, 'import tensorflow as tf\n'), ((35225, 35247), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (35245, 35247), True, 'import tensorflow as tf\n'), ((35565, 35587), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (35585, 35587), True, 'import tensorflow as tf\n'), ((37581, 37598), 'numpy.array', 'np.array', (['z_e_all'], {}), '(z_e_all)\n', (37589, 37598), True, 'import numpy as np\n'), ((37615, 37630), 'numpy.array', 'np.array', (['k_all'], {}), '(k_all)\n', (37623, 37630), True, 'import numpy as np\n'), ((37644, 37656), 'numpy.array', 'np.array', (['qq'], {}), '(qq)\n', (37652, 37656), True, 'import numpy as np\n'), ((37673, 37688), 'numpy.array', 'np.array', (['x_rec'], {}), '(x_rec)\n', (37681, 37688), True, 'import numpy as np\n'), ((37779, 37804), 'numpy.array', 'np.array', (['z_dist_flat_all'], {}), '(z_dist_flat_all)\n', (37787, 37804), True, 'import numpy as np\n'), ((37825, 37844), 'numpy.array', 'np.array', (['x_hat_all'], {}), '(x_hat_all)\n', (37833, 37844), True, 'import numpy as np\n'), ((39328, 39351), 'numpy.array', 'np.array', (['z_e_all_train'], {}), '(z_e_all_train)\n', (39336, 39351), True, 'import numpy as np\n'), ((39374, 39395), 'numpy.array', 'np.array', (['k_all_train'], {}), '(k_all_train)\n', (39382, 39395), True, 'import numpy as np\n'), ((39415, 39433), 'numpy.array', 'np.array', (['qq_train'], {}), '(qq_train)\n', (39423, 39433), True, 'import numpy as np\n'), ((39456, 39477), 'numpy.array', 'np.array', (['x_rec_train'], {}), '(x_rec_train)\n', (39464, 39477), True, 'import numpy as np\n'), ((39586, 39617), 'numpy.array', 'np.array', (['z_dist_flat_all_train'], {}), '(z_dist_flat_all_train)\n', (39594, 39617), True, 'import numpy as np\n'), ((39706, 39731), 'numpy.array', 'np.array', (['x_hat_all_train'], {}), '(x_hat_all_train)\n', (39714, 39731), True, 'import numpy as np\n'), ((39958, 40047), 'utils.compute_metrics', 'compute_metrics', (['data_train', 'data_val', 'save_dict'], {'T': 'T_finance_data', 'som_grid': 'som_dim'}), '(data_train, data_val, save_dict, T=T_finance_data, som_grid\n =som_dim)\n', (39973, 40047), False, 'from utils import compute_finance_labels, print_trainable_vars, get_gradients, find_nearest, compute_metrics\n'), ((3299, 3311), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3309, 3311), False, 'from datetime import date\n'), ((12241, 12274), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (12272, 12274), True, 'import tensorflow as tf\n'), ((12344, 12361), 'sacred.stflow.LogFileWriter', 'LogFileWriter', (['ex'], {}), '(ex)\n', (12357, 12361), False, 'from sacred.stflow import LogFileWriter\n'), ((12390, 12442), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(logdir + '/train')", 'sess.graph'], {}), "(logdir + '/train', sess.graph)\n", (12411, 12442), True, 'import tensorflow as tf\n'), ((12469, 12520), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(logdir + '/test')", 'sess.graph'], {}), "(logdir + '/test', sess.graph)\n", (12490, 12520), True, 'import tensorflow as tf\n'), ((13022, 13069), 'numpy.zeros', 'np.zeros', (['(max_n_step * batch_size, latent_dim)'], {}), '((max_n_step * batch_size, latent_dim))\n', (13030, 13069), True, 'import numpy as np\n'), ((13103, 13138), 'numpy.zeros', 'np.zeros', (['(2, batch_size, lstm_dim)'], {}), '((2, batch_size, lstm_dim))\n', (13111, 13138), True, 'import numpy as np\n'), ((13149, 13196), 'numpy.zeros', 'np.zeros', (['(max_n_step * batch_size, latent_dim)'], {}), '((max_n_step * batch_size, latent_dim))\n', (13157, 13196), True, 'import numpy as np\n'), ((20206, 20228), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (20226, 20228), False, 'import timeit\n'), ((20910, 20921), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (20918, 20921), True, 'import numpy as np\n'), ((21362, 21373), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (21370, 21373), True, 'import numpy as np\n'), ((23746, 23768), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (23766, 23768), False, 'import timeit\n'), ((23982, 24004), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (24002, 24004), False, 'import timeit\n'), ((25369, 25391), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (25389, 25391), False, 'import timeit\n'), ((26352, 26363), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (26360, 26363), False, 'import sys\n'), ((28437, 28470), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (28468, 28470), True, 'import tensorflow as tf\n'), ((28877, 28924), 'numpy.zeros', 'np.zeros', (['(max_n_step * batch_size, latent_dim)'], {}), '((max_n_step * batch_size, latent_dim))\n', (28885, 28924), True, 'import numpy as np\n'), ((28958, 28993), 'numpy.zeros', 'np.zeros', (['(2, batch_size, lstm_dim)'], {}), '((2, batch_size, lstm_dim))\n', (28966, 28993), True, 'import numpy as np\n'), ((29004, 29051), 'numpy.zeros', 'np.zeros', (['(max_n_step * batch_size, latent_dim)'], {}), '((max_n_step * batch_size, latent_dim))\n', (29012, 29051), True, 'import numpy as np\n'), ((30453, 30481), 'numpy.sum', 'np.sum', (['labels_val_all[:, 0]'], {}), '(labels_val_all[:, 0])\n', (30459, 30481), True, 'import numpy as np\n'), ((35071, 35104), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (35102, 35104), True, 'import tensorflow as tf\n'), ((36354, 36389), 'numpy.zeros', 'np.zeros', (['(2, batch_size, lstm_dim)'], {}), '((2, batch_size, lstm_dim))\n', (36362, 36389), True, 'import numpy as np\n'), ((40929, 40957), 'pickle.dump', 'pickle.dump', (['save_dict', 'file'], {}), '(save_dict, file)\n', (40940, 40957), False, 'import pickle\n'), ((3359, 3371), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3369, 3371), False, 'import uuid\n'), ((13956, 13978), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (13976, 13978), False, 'import timeit\n'), ((15546, 15568), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (15566, 15568), False, 'import timeit\n'), ((15801, 15823), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (15821, 15823), False, 'import timeit\n'), ((19905, 19927), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (19925, 19927), False, 'import timeit\n'), ((20422, 20444), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (20442, 20444), False, 'import timeit\n'), ((23624, 23646), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (23644, 23646), False, 'import timeit\n'), ((24108, 24130), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (24128, 24130), False, 'import timeit\n'), ((25242, 25264), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (25262, 25264), False, 'import timeit\n'), ((30538, 30564), 'numpy.reshape', 'np.reshape', (['test_k_all', '(-1)'], {}), '(test_k_all, -1)\n', (30548, 30564), True, 'import numpy as np\n'), ((31281, 31302), 'numpy.abs', 'np.abs', (['(k1[i] - k1[j])'], {}), '(k1[i] - k1[j])\n', (31287, 31302), True, 'import numpy as np\n'), ((31326, 31347), 'numpy.abs', 'np.abs', (['(k2[i] - k2[j])'], {}), '(k2[i] - k2[j])\n', (31332, 31347), True, 'import numpy as np\n'), ((31468, 31486), 'numpy.exp', 'np.exp', (['(-(d1 + d2))'], {}), '(-(d1 + d2))\n', (31474, 31486), True, 'import numpy as np\n'), ((14202, 14224), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (14222, 14224), False, 'import timeit\n'), ((15409, 15431), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (15429, 15431), False, 'import timeit\n'), ((15935, 15957), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (15955, 15957), False, 'import timeit\n'), ((17081, 17103), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (17101, 17103), False, 'import timeit\n'), ((17277, 17299), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (17297, 17299), False, 'import timeit\n'), ((18424, 18446), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (18444, 18446), False, 'import timeit\n'), ((18620, 18642), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (18640, 18642), False, 'import timeit\n'), ((19767, 19789), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (19787, 19789), False, 'import timeit\n'), ((22677, 22699), 'math.isnan', 'math.isnan', (['train_loss'], {}), '(train_loss)\n', (22687, 22699), False, 'import math\n'), ((22909, 22929), 'numpy.mean', 'np.mean', (['test_losses'], {}), '(test_losses)\n', (22916, 22929), True, 'import numpy as np\n'), ((25826, 25854), 'numpy.mean', 'np.mean', (['ttime_som_per_epoch'], {}), '(ttime_som_per_epoch)\n', (25833, 25854), True, 'import numpy as np\n'), ((25991, 26018), 'numpy.mean', 'np.mean', (['ttime_ae_per_epoch'], {}), '(ttime_ae_per_epoch)\n', (25998, 26018), True, 'import numpy as np\n'), ((26146, 26170), 'numpy.mean', 'np.mean', (['ttime_per_epoch'], {}), '(ttime_per_epoch)\n', (26153, 26170), True, 'import numpy as np\n'), ((26308, 26337), 'numpy.mean', 'np.mean', (['ttime_pred_per_epoch'], {}), '(ttime_pred_per_epoch)\n', (26315, 26337), True, 'import numpy as np\n'), ((37448, 37487), 'numpy.reshape', 'np.reshape', (['next_z_e_', '(-1, latent_dim)'], {}), '(next_z_e_, (-1, latent_dim))\n', (37458, 37487), True, 'import numpy as np\n'), ((39183, 39222), 'numpy.reshape', 'np.reshape', (['next_z_e_', '(-1, latent_dim)'], {}), '(next_z_e_, (-1, latent_dim))\n', (39193, 39222), True, 'import numpy as np\n'), ((22382, 22427), 'tensorflow.train.global_step', 'tf.train.global_step', (['sess', 'model.global_step'], {}), '(sess, model.global_step)\n', (22402, 22427), True, 'import tensorflow as tf\n'), ((22791, 22836), 'tensorflow.train.global_step', 'tf.train.global_step', (['sess', 'model.global_step'], {}), '(sess, model.global_step)\n', (22811, 22836), True, 'import tensorflow as tf\n'), ((24708, 24753), 'tensorflow.train.global_step', 'tf.train.global_step', (['sess', 'model.global_step'], {}), '(sess, model.global_step)\n', (24728, 24753), True, 'import tensorflow as tf\n'), ((24994, 25039), 'tensorflow.train.global_step', 'tf.train.global_step', (['sess', 'model.global_step'], {}), '(sess, model.global_step)\n', (25014, 25039), True, 'import tensorflow as tf\n'), ((14852, 14897), 'tensorflow.train.global_step', 'tf.train.global_step', (['sess', 'model.global_step'], {}), '(sess, model.global_step)\n', (14872, 14897), True, 'import tensorflow as tf\n'), ((15161, 15206), 'tensorflow.train.global_step', 'tf.train.global_step', (['sess', 'model.global_step'], {}), '(sess, model.global_step)\n', (15181, 15206), True, 'import tensorflow as tf\n'), ((16541, 16586), 'tensorflow.train.global_step', 'tf.train.global_step', (['sess', 'model.global_step'], {}), '(sess, model.global_step)\n', (16561, 16586), True, 'import tensorflow as tf\n'), ((16834, 16879), 'tensorflow.train.global_step', 'tf.train.global_step', (['sess', 'model.global_step'], {}), '(sess, model.global_step)\n', (16854, 16879), True, 'import tensorflow as tf\n'), ((17884, 17929), 'tensorflow.train.global_step', 'tf.train.global_step', (['sess', 'model.global_step'], {}), '(sess, model.global_step)\n', (17904, 17929), True, 'import tensorflow as tf\n'), ((18177, 18222), 'tensorflow.train.global_step', 'tf.train.global_step', (['sess', 'model.global_step'], {}), '(sess, model.global_step)\n', (18197, 18222), True, 'import tensorflow as tf\n'), ((19227, 19272), 'tensorflow.train.global_step', 'tf.train.global_step', (['sess', 'model.global_step'], {}), '(sess, model.global_step)\n', (19247, 19272), True, 'import tensorflow as tf\n'), ((19520, 19565), 'tensorflow.train.global_step', 'tf.train.global_step', (['sess', 'model.global_step'], {}), '(sess, model.global_step)\n', (19540, 19565), True, 'import tensorflow as tf\n'), ((30751, 30777), 'numpy.reshape', 'np.reshape', (['test_k_all', '(-1)'], {}), '(test_k_all, -1)\n', (30761, 30777), True, 'import numpy as np\n'), ((30897, 30923), 'numpy.reshape', 'np.reshape', (['test_k_all', '(-1)'], {}), '(test_k_all, -1)\n', (30907, 30923), True, 'import numpy as np\n'), ((30978, 31004), 'numpy.reshape', 'np.reshape', (['test_k_all', '(-1)'], {}), '(test_k_all, -1)\n', (30988, 31004), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from datasets.dataset import Dataset
class AdultDataset(Dataset):
def __init__(self):
super().__init__(name="Adult Census", description="The Adult Census dataset")
self.cat_mappings = {
"education": {
"School": 0,
"HS-grad": 1,
"Some-college": 2,
"Prof-school": 3,
"Assoc": 4,
"Bachelors": 5,
"Masters": 6,
"Doctorate": 7,
},
"marital_status": {
"Divorced": 0,
"Married": 1,
"Separated": 2,
"Single": 3,
"Widowed": 4,
},
"workclass": {
"Other/Unknown": 0,
"Government": 1,
"Private": 2,
"Self-Employed": 3,
},
"occupation": {
"Other/Unknown": 0,
"Blue-Collar": 1,
"Professional": 2,
"Sales": 3,
"Service": 4,
"White-Collar": 5,
},
"race": {
"White": 0,
"Other": 1,
},
"gender": {
"Male": 0,
"Female": 1,
},
"native_country": {
"?": 0,
"Cambodia": 1,
"Canada": 2,
"China": 3,
"Columbia": 4,
"Cuba": 5,
"Dominican-Republic": 6,
"Ecuador": 7,
"El-Salvador": 8,
"England": 9,
"France": 10,
"Germany": 11,
"Greece": 12,
"Guatemala": 13,
"Haiti": 14,
"Holand-Netherlands": 15,
"Honduras": 16,
"Hong": 17,
"Hungary": 18,
"India": 19,
"Iran": 20,
"Ireland": 21,
"Italy": 22,
"Jamaica": 23,
"Japan": 24,
"Laos": 25,
"Mexico": 26,
"Nicaragua": 27,
"Outlying-US(Guam-USVI-etc)": 28,
"Peru": 29,
"Philippines": 30,
"Poland": 31,
"Portugal": 32,
"Puerto-Rico": 33,
"Scotland": 34,
"South": 35,
"Taiwan": 36,
"Thailand": 37,
"Trinadad&Tobago": 38,
"United-States": 39,
"Vietnam": 40,
"Yugoslavia": 41,
},
}
self.inv_cat_mappings = {
key: {v: k for k, v in mapping.items()}
for key, mapping in self.cat_mappings.items()
}
self.__init_encoder()
def load(self) -> pd.DataFrame:
"""Loads adult income dataset from https://archive.ics.uci.edu/ml/datasets/Adult and prepares the data for data analysis based on https://rpubs.com/H_Zhu/235617
:param: save_intermediate: save the transformed dataset. Do not save by default.
"""
raw_data = np.genfromtxt(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
delimiter=", ",
dtype=str,
)
# column names from "https://archive.ics.uci.edu/ml/datasets/Adult"
column_names = [
"age",
"workclass",
"fnlwgt",
"education",
"educational-num",
"marital-status",
"occupation",
"relationship",
"race",
"gender",
"capital-gain",
"capital-loss",
"hours-per-week",
"native-country",
"income",
]
adult_data = pd.DataFrame(raw_data, columns=column_names)
# For more details on how the below transformations are made, please refer to https://rpubs.com/H_Zhu/235617
adult_data = adult_data.astype(
{"age": np.int64, "educational-num": np.int64, "hours-per-week": np.int64}
)
adult_data = adult_data.replace(
{
"workclass": {
"Without-pay": "Other/Unknown",
"Never-worked": "Other/Unknown",
}
}
)
adult_data = adult_data.replace(
{
"workclass": {
"Federal-gov": "Government",
"State-gov": "Government",
"Local-gov": "Government",
}
}
)
adult_data = adult_data.replace(
{
"workclass": {
"Self-emp-not-inc": "Self-Employed",
"Self-emp-inc": "Self-Employed",
}
}
)
# adult_data = adult_data.replace(
# {
# "workclass": {
# "Never-worked": "Self-Employed",
# "Without-pay": "Self-Employed",
# }
# }
# )
adult_data = adult_data.replace({"workclass": {"?": "Other/Unknown"}})
adult_data = adult_data.replace(
{
"occupation": {
"Adm-clerical": "White-Collar",
"Craft-repair": "Blue-Collar",
"Exec-managerial": "White-Collar",
"Farming-fishing": "Blue-Collar",
"Handlers-cleaners": "Blue-Collar",
"Machine-op-inspct": "Blue-Collar",
"Other-service": "Service",
"Priv-house-serv": "Service",
"Prof-specialty": "Professional",
"Protective-serv": "Service",
"Tech-support": "Service",
"Transport-moving": "Blue-Collar",
"Unknown": "Other/Unknown",
"Armed-Forces": "Other/Unknown",
"?": "Other/Unknown",
}
}
)
adult_data = adult_data.replace(
{
"marital-status": {
"Married-civ-spouse": "Married",
"Married-AF-spouse": "Married",
"Married-spouse-absent": "Married",
"Never-married": "Single",
}
}
)
adult_data = adult_data.replace(
{
"race": {
"Black": "Other",
"Asian-Pac-Islander": "Other",
"Amer-Indian-Eskimo": "Other",
}
}
)
# adult_data = adult_data[['age','workclass','education','marital-status','occupation','race','gender',
# 'hours-per-week','income']]
adult_data = adult_data[
[
"age",
"capital-gain",
"hours-per-week",
"workclass",
"education",
"marital-status",
"occupation",
"race",
"gender",
"capital-loss",
"native-country",
"income",
]
]
# adult_data = adult_data[
# [
# "age",
# "hours-per-week",
# "workclass",
# "education",
# "marital-status",
# "occupation",
# "race",
# "gender",
# "native-country",
# "income",
# ]
# ]
adult_data = adult_data.replace({"income": {"<=50K": 0, ">50K": 1}})
adult_data = adult_data.replace(
{
"education": {
"Assoc-voc": "Assoc",
"Assoc-acdm": "Assoc",
"11th": "School",
"10th": "School",
"7th-8th": "School",
"9th": "School",
"12th": "School",
"5th-6th": "School",
"1st-4th": "School",
"Preschool": "School",
}
}
)
adult_data = adult_data.rename(
columns={
"marital-status": "marital_status",
"hours-per-week": "hours_per_week",
"capital-gain": "capital_gain",
"native-country": "native_country",
"capital-loss": "capital_loss",
}
)
return adult_data.drop("income", axis=1), adult_data["income"]
def extract_info(self):
columns = self.dataset.columns
target = "income"
real_feat = np.array(
[
0, # age
1, # capital-gain
2, # hours-per-week
9, # capital-loss
]
)
cat_feat = np.array(
[
3, # workclass
4, # education
5, # marital
6, # occupation
7, # race
8, # gender
10, # native-country
]
)
_both = np.concatenate([real_feat, cat_feat])
_cond = (np.sort(_both) == np.arange(0, max(_both) + 1)).all()
assert _cond
# real_feat = np.array(
# [
# 0, # age
# 1, # hours-per-week
# ]
# )
# cat_feat = np.array(
# [
# 2, # workclass
# 3, # education
# 4, # marital
# 5, # occupation
# 6, # race
# 7, # gender
# 8, # native country
# ]
# )
return columns, target, real_feat, cat_feat
def __init_encoder(self):
self.encoder = OneHotEncoder(sparse=False)
X = self.get_optimizer_data().copy()
self.encoder.fit(X[:, self.cat_features])
return self.encoder
def encode_features(self, X: np.array) -> np.array:
onehot = self.encoder.transform(X[:, self.cat_features])
n_real = len(self.real_features)
n_onehot = onehot.shape[1]
_X = np.zeros((X.shape[0], n_real + n_onehot))
_X[:, :n_real] = X[:, self.real_features]
_X[:, n_real:] = onehot # .astype(int)
return _X.astype(int)
def decode_features(self, X: np.array) -> np.array:
_X = np.zeros((X.shape[0], self.dataset.shape[1]))
n_real = len(self.real_features)
orig_cat = self.encoder.inverse_transform(X[:, n_real:])
_X[:, self.real_features] = X[:, :n_real].copy()
_X[:, self.cat_features] = orig_cat
return _X.astype(int)
def preprocess(self, X: pd.DataFrame) -> pd.DataFrame:
df = self.dataset.copy()
return df.replace(self.cat_mappings)
def get_optimizer_data(self) -> np.array:
X = self.get_numpy_representation()
X[:, self.real_features] = X[:, self.real_features].astype(float)
X[:, self.cat_features] = X[:, self.cat_features].astype(int)
return X.astype(int)
def get_classifier_data(self):
X = self.get_optimizer_data().copy()
return self.encode_features(X), self.labels
def get_processed_orig_data(self, X: np.array) -> pd.DataFrame:
df = pd.DataFrame(X, columns=self.columns)
df = df.replace(self.inv_cat_mappings)
return df | [
"sklearn.preprocessing.OneHotEncoder",
"numpy.sort",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"pandas.DataFrame",
"numpy.genfromtxt"
] | [((3298, 3426), 'numpy.genfromtxt', 'np.genfromtxt', (['"""https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data"""'], {'delimiter': '""", """', 'dtype': 'str'}), "(\n 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data'\n , delimiter=', ', dtype=str)\n", (3311, 3426), True, 'import numpy as np\n'), ((3985, 4029), 'pandas.DataFrame', 'pd.DataFrame', (['raw_data'], {'columns': 'column_names'}), '(raw_data, columns=column_names)\n', (3997, 4029), True, 'import pandas as pd\n'), ((8977, 8999), 'numpy.array', 'np.array', (['[0, 1, 2, 9]'], {}), '([0, 1, 2, 9])\n', (8985, 8999), True, 'import numpy as np\n'), ((9177, 9209), 'numpy.array', 'np.array', (['[3, 4, 5, 6, 7, 8, 10]'], {}), '([3, 4, 5, 6, 7, 8, 10])\n', (9185, 9209), True, 'import numpy as np\n'), ((9462, 9499), 'numpy.concatenate', 'np.concatenate', (['[real_feat, cat_feat]'], {}), '([real_feat, cat_feat])\n', (9476, 9499), True, 'import numpy as np\n'), ((10151, 10178), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (10164, 10178), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((10513, 10554), 'numpy.zeros', 'np.zeros', (['(X.shape[0], n_real + n_onehot)'], {}), '((X.shape[0], n_real + n_onehot))\n', (10521, 10554), True, 'import numpy as np\n'), ((10753, 10798), 'numpy.zeros', 'np.zeros', (['(X.shape[0], self.dataset.shape[1])'], {}), '((X.shape[0], self.dataset.shape[1]))\n', (10761, 10798), True, 'import numpy as np\n'), ((11653, 11690), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': 'self.columns'}), '(X, columns=self.columns)\n', (11665, 11690), True, 'import pandas as pd\n'), ((9517, 9531), 'numpy.sort', 'np.sort', (['_both'], {}), '(_both)\n', (9524, 9531), True, 'import numpy as np\n')] |
import rasterio as rio
import rasterio.mask as riom
import rasterio.plot as riop
from rasterio.transform import Affine
import matplotlib.pyplot as plt
import fiona as fio
import numpy as np
import geopandas as gpd
from shapely.geometry import Polygon
import os
from IPython import embed
class DatasetManipulator:
def __init__(self, dataset_path):
self.dataset_path = dataset_path
self.dataset_path_padded = None
self.dataset_name = "_".join(dataset_path.split("/")[-1].split("_")[:3])
self.dataset = rio.open(dataset_path)
self.transform = self.dataset.transform
self.crs = self.dataset.crs
self.Xres = self.transform[0]
self.Yres = -self.transform[4]
self.gridspacing_x = None
self.gridspacing_y = None
self.grid = None
self.grid_bounds = None
self.mask = None
self.mask_path = None
def create_grid(self, outer_shapefile, gridspacing_x=256, gridspacing_y=256):
"""Creates a grid and sets it to the grid dataset attribute.
Given the shapefile confining the dataset geographic area , creates theembed()
geometry of a grid covering it. If the grid does not fit exactly in the
shapefile, the grid will have an extra cell in order to cover all of the
area. The x- and y cell spacings of the grid are determined by the
parameters <gridspacing_x> and <gridspacing_y> which have to be given
in pixels.
Parameters
----------
outer_shapefile: geopandas.geodataframe.GeoDataFrame
shapefile confining the area
gridspacing_x: int
cell width in pixels
gridspacing_y: int
cell height in pixels
"""
# get the xmin, xmax, ymin, ymax of the shapefile bounding-box
xmin, ymin, xmax, ymax = outer_shapefile.geometry.total_bounds
# set the x and y-spacing attributes
self.gridspacing_x = gridspacing_x
self.gridspacing_y = gridspacing_y
# convert the cell-dimensions from px to units
gridspacing_x = gridspacing_x * self.Xres
gridspacing_y = gridspacing_y * self.Yres
# get x and y number of cells
nx = (xmax - xmin) // gridspacing_x # //: integer division
if (xmax - xmin) % gridspacing_x != 0:
nx = nx + 1
ny = (ymax - ymin) // gridspacing_y
if (ymax - ymin) % gridspacing_y != 0:
ny = ny + 1
# get the new xmax, ymin
xmax = xmin + nx * gridspacing_x
ymin = ymax - ny * gridspacing_y
# set the grid bounds
self.grid_bounds = (xmin, ymin, xmax, ymax)
# get the x and y coordinates of the grid
x_coord = list(np.arange(xmin, xmax, gridspacing_x))
y_coord = list(np.arange(ymin, ymax, gridspacing_y))
y_coord.reverse()
# generate the polygon object determined by the 4 corners of each cell
polygons = []
for y in y_coord[:-1]:
for x in x_coord[:-1]:
polygons.append(Polygon([(x, y),
(x+gridspacing_x, y),
(x+gridspacing_x, y-gridspacing_y),
(x, y-gridspacing_y)]))
self.grid = gpd.GeoDataFrame({'geometry': polygons,
'grid_idx': range(0, len(polygons))})
def pad_geotiff_from_grid(self):
"""Uses the previously created grid to pad the source raster dataset.
The previously created grid is used to slice the raster dataset into N
smaller rasters all with the same shape. This functions pads the source
raster with 'zeros' in order to obtain N images all with the same shape.
"""
if self.grid is None:
raise NotImplementedError('Grid not created yet')
# get the total height and width in pixels of the dataset after padding
tot_px_x = int(round((self.grid_bounds[2] - self.grid_bounds[0])
/ self.Xres))
tot_px_y = int(round((self.grid_bounds[3] - self.grid_bounds[1])
/ self.Yres))
# load the data for every band and shape them as (height, width, band)
array = self.dataset.read()
# for rgb do not consider alpha channel
if 'rgb' in self.dataset_name:
array = array[:-1,:,:]
pad_hor = tot_px_x - array.shape[2]
pad_ver = tot_px_y - array.shape[1]
array = np.pad(array, ((0, 0), (0, pad_ver), (0, pad_hor)),
mode='constant', constant_values=0)
self.dataset_path_padded = (os.path.join('/'
.join(self.dataset_path.split('/')[:-1]),
self.dataset_path.split('/')[-1].split('.')[0])
+ '_padded.tif')
self.dataset = rio.open(
self.dataset_path_padded,
'w',
driver='Gtiff',
height=array.shape[1],
width=array.shape[2],
count=array.shape[0],
dtype=array.dtype,
crs=self.crs,
transform=self.transform
)
self.dataset.write(array, tuple(np.arange(1, array.shape[0] + 1)))
self.dataset.close()
self.dataset = rio.open(self.dataset_path_padded)
def get_pair_from_idx(self, grid_idx):
"""Returns the pair (image, mask) at the specified grid location.
Crops the GeoTIFF using the generated grid and returns the image and its
corresponding mask defined by the specified grid index.
Parameters
----------
grid_idx: int
integer corresponding to the index of the desired raster region
Returns
-------
img: numpy.ndarray
numpy-array representing the requested image with shape (H, W, C)
maks: numpy.ndarray
numpy-array representing the plant mask at the specified location
with shape (H, W)
"""
if self.grid is None:
raise NotImplementedError("The grid hasn't been created yet")
if self.dataset_path_padded is None:
raise NotImplementedError("The raster hasn't been padded. Perform"
" padding in order to insure coherent images dimensions.")
if self.mask is None:
raise NotImplementedError("The raster hasn't been masked. Impossible"
" to retunr the pair (img, mask)")
# get the coordinates of top-left and bottom-right corners into a tuple
boundary = self.grid["geometry"][grid_idx]
boundary = boundary.exterior.xy
top_left = (boundary[0][0], boundary[1][0])
bottom_right = (boundary[0][1], boundary[1][2])
# get the indexes of the pixels at top-left / bottom-right coordinates
row_start, col_start = rio.transform.rowcol(self.transform, top_left[0],
top_left[1])
row_end, col_end = rio.transform.rowcol(self.transform, bottom_right[0],
bottom_right[1])
# convert the indexes to integers
row_start = int(row_start); row_end = int(row_end)
col_start = int(col_start); col_end = int(col_end)
# get the values of the pixel within the boundary
img = self.dataset.read()[:, row_start:row_end, col_start:col_end]
img = np.moveaxis(img, 0, 2)
mask = self.mask.read()[:,row_start:row_end, col_start:col_end]
mask = np.moveaxis(mask, 0, 2)
mask = np.squeeze(mask, axis=2)
# convert the image to channels last and return it
return img, mask
def create_mask_from_shapes(self, shapefile):
# TODO: write documentation for the method
# if self.dataset_path_padded is None:
# raise NotImplementedError('Dataset has to be padded with the grid '
# 'before performing the masking operation')
with fio.open(shapefile, "r") as shp:
shapes = []
for feature in shp:
if feature["geometry"] != None:
shapes.append(feature["geometry"])
mask, _ = riom.mask(self.dataset, shapes)
mask = mask[0,:,:]
mask[mask!=0] = 1
self.mask_path = (os.path.join('/'
.join(self.dataset_path.split('/')[:-1]),
self.dataset_path.split('/')[-1].split('.')[0])
+ '_mask.tif')
self.mask = rio.open(
self.mask_path,
'w',
driver='Gtiff',
height=mask.shape[0],
width=mask.shape[1],
count=1,
dtype=mask.dtype,
crs=self.crs,
transform=self.transform
)
self.mask.write(mask, 1)
self.mask.close()
self.mask = rio.open(self.mask_path)
#-------------------------------------------------------------------------------
def visualize_dataset(self, with_grid=True):
if with_grid:
if self.grid is None:
raise ValueError("Grid value is '{}'. Grid not created yet."
.format(self.grid))
cells = self.grid["geometry"]
fig, axs = plt.subplots()
if self.dataset is not None:
riop.show(self.dataset, ax=axs)
if self.mask is not None:
riop.show(self.mask, ax=axs, alpha=0.5)
for i, cell in enumerate(cells):
x, y = cell.exterior.xy
plt.plot(x, y)
xm = (x[1] - x[0]) / 2
ym = (y[2] - y[1]) / 2
text = str(self.grid['grid_idx'][i])
plt.text(x[0] + xm, y[0] + ym, text, color='r')
plt.show()
| [
"matplotlib.pyplot.text",
"matplotlib.pyplot.show",
"rasterio.open",
"matplotlib.pyplot.plot",
"numpy.squeeze",
"rasterio.plot.show",
"shapely.geometry.Polygon",
"fiona.open",
"numpy.moveaxis",
"rasterio.mask.mask",
"numpy.pad",
"matplotlib.pyplot.subplots",
"numpy.arange",
"rasterio.trans... | [((538, 560), 'rasterio.open', 'rio.open', (['dataset_path'], {}), '(dataset_path)\n', (546, 560), True, 'import rasterio as rio\n'), ((4520, 4611), 'numpy.pad', 'np.pad', (['array', '((0, 0), (0, pad_ver), (0, pad_hor))'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(array, ((0, 0), (0, pad_ver), (0, pad_hor)), mode='constant',\n constant_values=0)\n", (4526, 4611), True, 'import numpy as np\n'), ((4841, 5032), 'rasterio.open', 'rio.open', (['self.dataset_path_padded', '"""w"""'], {'driver': '"""Gtiff"""', 'height': 'array.shape[1]', 'width': 'array.shape[2]', 'count': 'array.shape[0]', 'dtype': 'array.dtype', 'crs': 'self.crs', 'transform': 'self.transform'}), "(self.dataset_path_padded, 'w', driver='Gtiff', height=array.shape[\n 1], width=array.shape[2], count=array.shape[0], dtype=array.dtype, crs=\n self.crs, transform=self.transform)\n", (4849, 5032), True, 'import rasterio as rio\n'), ((5269, 5303), 'rasterio.open', 'rio.open', (['self.dataset_path_padded'], {}), '(self.dataset_path_padded)\n', (5277, 5303), True, 'import rasterio as rio\n'), ((6852, 6914), 'rasterio.transform.rowcol', 'rio.transform.rowcol', (['self.transform', 'top_left[0]', 'top_left[1]'], {}), '(self.transform, top_left[0], top_left[1])\n', (6872, 6914), True, 'import rasterio as rio\n'), ((6954, 7024), 'rasterio.transform.rowcol', 'rio.transform.rowcol', (['self.transform', 'bottom_right[0]', 'bottom_right[1]'], {}), '(self.transform, bottom_right[0], bottom_right[1])\n', (6974, 7024), True, 'import rasterio as rio\n'), ((7346, 7368), 'numpy.moveaxis', 'np.moveaxis', (['img', '(0)', '(2)'], {}), '(img, 0, 2)\n', (7357, 7368), True, 'import numpy as np\n'), ((7457, 7480), 'numpy.moveaxis', 'np.moveaxis', (['mask', '(0)', '(2)'], {}), '(mask, 0, 2)\n', (7468, 7480), True, 'import numpy as np\n'), ((7496, 7520), 'numpy.squeeze', 'np.squeeze', (['mask'], {'axis': '(2)'}), '(mask, axis=2)\n', (7506, 7520), True, 'import numpy as np\n'), ((8124, 8155), 'rasterio.mask.mask', 'riom.mask', (['self.dataset', 'shapes'], {}), '(self.dataset, shapes)\n', (8133, 8155), True, 'import rasterio.mask as riom\n'), ((8415, 8580), 'rasterio.open', 'rio.open', (['self.mask_path', '"""w"""'], {'driver': '"""Gtiff"""', 'height': 'mask.shape[0]', 'width': 'mask.shape[1]', 'count': '(1)', 'dtype': 'mask.dtype', 'crs': 'self.crs', 'transform': 'self.transform'}), "(self.mask_path, 'w', driver='Gtiff', height=mask.shape[0], width=\n mask.shape[1], count=1, dtype=mask.dtype, crs=self.crs, transform=self.\n transform)\n", (8423, 8580), True, 'import rasterio as rio\n'), ((8769, 8793), 'rasterio.open', 'rio.open', (['self.mask_path'], {}), '(self.mask_path)\n', (8777, 8793), True, 'import rasterio as rio\n'), ((9157, 9171), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9169, 9171), True, 'import matplotlib.pyplot as plt\n'), ((9634, 9644), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9642, 9644), True, 'import matplotlib.pyplot as plt\n'), ((2749, 2785), 'numpy.arange', 'np.arange', (['xmin', 'xmax', 'gridspacing_x'], {}), '(xmin, xmax, gridspacing_x)\n', (2758, 2785), True, 'import numpy as np\n'), ((2810, 2846), 'numpy.arange', 'np.arange', (['ymin', 'ymax', 'gridspacing_y'], {}), '(ymin, ymax, gridspacing_y)\n', (2819, 2846), True, 'import numpy as np\n'), ((7913, 7937), 'fiona.open', 'fio.open', (['shapefile', '"""r"""'], {}), "(shapefile, 'r')\n", (7921, 7937), True, 'import fiona as fio\n'), ((9222, 9253), 'rasterio.plot.show', 'riop.show', (['self.dataset'], {'ax': 'axs'}), '(self.dataset, ax=axs)\n', (9231, 9253), True, 'import rasterio.plot as riop\n'), ((9301, 9340), 'rasterio.plot.show', 'riop.show', (['self.mask'], {'ax': 'axs', 'alpha': '(0.5)'}), '(self.mask, ax=axs, alpha=0.5)\n', (9310, 9340), True, 'import rasterio.plot as riop\n'), ((9431, 9445), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (9439, 9445), True, 'import matplotlib.pyplot as plt\n'), ((9577, 9624), 'matplotlib.pyplot.text', 'plt.text', (['(x[0] + xm)', '(y[0] + ym)', 'text'], {'color': '"""r"""'}), "(x[0] + xm, y[0] + ym, text, color='r')\n", (9585, 9624), True, 'import matplotlib.pyplot as plt\n'), ((5182, 5214), 'numpy.arange', 'np.arange', (['(1)', '(array.shape[0] + 1)'], {}), '(1, array.shape[0] + 1)\n', (5191, 5214), True, 'import numpy as np\n'), ((3074, 3183), 'shapely.geometry.Polygon', 'Polygon', (['[(x, y), (x + gridspacing_x, y), (x + gridspacing_x, y - gridspacing_y), (x,\n y - gridspacing_y)]'], {}), '([(x, y), (x + gridspacing_x, y), (x + gridspacing_x, y -\n gridspacing_y), (x, y - gridspacing_y)])\n', (3081, 3183), False, 'from shapely.geometry import Polygon\n')] |
import pickle
from sklearn.decomposition import PCA
import numpy as np
class PCA_reduction:
def __init__(self, pca_path):
self.pca_reload = pickle.load(open(pca_path,'rb'))
def reduce_size(self, vector):
return self.pca_reload.transform([vector])[0]
@staticmethod
def create_new_pca_model(vectors, path_to_save, percentage_variance):
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
data_rescaled = scaler.fit_transform(vectors)
pca = PCA(n_components=percentage_variance)
result = pca.fit(data_rescaled)
pickle.dump(pca, open(path_to_save,"wb"))
@staticmethod
def plot_variance_nbComponents(vectors, percentage_variance, figsize=(15, 5)):
import matplotlib.pyplot as plt
pca = PCA().fit(vectors)
fig = plt.figure(figsize=figsize)
plt.plot(np.cumsum(pca.explained_variance_ratio_), marker='o')
plt.axhline(y=percentage_variance, color="red")
plt.xlabel('No. of principal components')
plt.ylabel('cumulative % variance retained')
plt.grid(True)
plt.title('Cumulative explained variance across the number of components ') | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.figure",
"numpy.cumsum",
"matplotlib.pyplot.title",
"sklearn.preprocessing.MinMaxScaler"
] | [((406, 420), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (418, 420), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((478, 515), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'percentage_variance'}), '(n_components=percentage_variance)\n', (481, 515), False, 'from sklearn.decomposition import PCA\n'), ((762, 789), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (772, 789), True, 'import matplotlib.pyplot as plt\n'), ((857, 904), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': 'percentage_variance', 'color': '"""red"""'}), "(y=percentage_variance, color='red')\n", (868, 904), True, 'import matplotlib.pyplot as plt\n'), ((907, 948), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""No. of principal components"""'], {}), "('No. of principal components')\n", (917, 948), True, 'import matplotlib.pyplot as plt\n'), ((951, 995), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cumulative % variance retained"""'], {}), "('cumulative % variance retained')\n", (961, 995), True, 'import matplotlib.pyplot as plt\n'), ((998, 1012), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1006, 1012), True, 'import matplotlib.pyplot as plt\n'), ((1015, 1090), 'matplotlib.pyplot.title', 'plt.title', (['"""Cumulative explained variance across the number of components """'], {}), "('Cumulative explained variance across the number of components ')\n", (1024, 1090), True, 'import matplotlib.pyplot as plt\n'), ((801, 841), 'numpy.cumsum', 'np.cumsum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (810, 841), True, 'import numpy as np\n'), ((735, 740), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (738, 740), False, 'from sklearn.decomposition import PCA\n')] |
import matplotlib.pyplot as pl
import anndata as ad
import pandas as pd
import numpy as np
import scanpy as sc
import scvelo as scv
from scipy.sparse import issparse
import matplotlib.gridspec as gridspec
from scipy.stats import gaussian_kde, spearmanr, pearsonr
from goatools.obo_parser import GODag
from goatools.anno.genetogo_reader import Gene2GoReader
from goatools.goea.go_enrichment_ns import GOEnrichmentStudyNS
import seaborn as sns
import re
import os
import gzip
import mygene
from csv import Sniffer
signatures_path_= os.path.join(os.path.dirname(os.path.realpath(__file__)), 'metadata/')
def get_genefamily_percentage(adata, key='MT-', start=True, name='mito'):
keys = key if isinstance(key, list) else [key, '____ignore____']
if start:
family_genes = np.logical_or(*[adata.var_names.str.startswith(k) for k in keys])
else:
family_genes = np.logical_or(*[adata.var_names.str.endswith(k) for k in keys])
if issparse(adata.X):
adata.obs['percent_'+name] = np.sum(
adata[:, family_genes].X, axis=1).A1 / np.sum(adata.X, axis=1).A1
else:
adata.obs['percent_'+name] = np.sum(
adata[:, family_genes].X, axis=1) / np.sum(adata.X, axis=1)
def get_mito_percentage(adata, species='human'):
key = 'MT-' if species == 'human' else 'mt-'
get_genefamily_percentage(adata, key=key, start=True, name='mito')
def get_ribo_percentage(adata, species='human'):
key = specify_genes(['RPS', 'RPL'], species=species)
get_genefamily_percentage(adata, key=key, start=True, name='ribo')
def get_hemo_percentage(adata, species='human'):
key = specify_genes(['HBA', 'HBB'], species=species)
get_genefamily_percentage(adata, key=key, start=True, name='hemo')
def score_cell_cycle(adata, signatures_path=signatures_path_, species='human'):
adatas = adata if isinstance(adata, list) else [adata]
for i in range(len(adatas)):
adata = adatas[i]
# score cell cycle
# cc score with genes from Kowalczyk, <NAME>., et al. “Single-Cell RNA-Seq Reveals Changes in Cell Cycle and Differentiation Programs upon Aging of Hematopoietic Stem Cells.” Genome Research, vol. 25, no. 12, 2015, pp. 1860–72, doi:10.1101/gr.192237.115.
cell_cycle_genes = [x.strip() for x in open(signatures_path+'/regev_lab_cell_cycle_genes.txt')]
cell_cycle_genes = [x for x in cell_cycle_genes if x in adata.var_names]
# Split into 2 lists
s_genes = cell_cycle_genes[:43]
g2m_genes = cell_cycle_genes[43:]
# score
sc.tl.score_genes_cell_cycle(adata, s_genes=s_genes, g2m_genes=g2m_genes)
adatas[i] = adata
return adatas[0] if len(adatas)==1 else adatas
def score_smillie_str_epi_imm(adata, signatures_path=signatures_path_, species='human'):
tab=pd.read_excel(signatures_path+'/colonoid_cancer_uhlitz_markers_revised.xlsx', skiprows=1, index_col=0)
score_genes(adata, np.array(tab.index[tab['Epithelial']==1].values, dtype='str'), score_name='epi_score', species=species)
score_genes(adata, np.array(tab.index[tab['Stromal']==1].values, dtype='str'), score_name='str_score', species=species)
score_genes(adata, np.array(tab.index[tab['Immune']==1].values, dtype='str'), score_name='imm_score', species=species)
def score_tumor_immune_cells(adata, signatures_path=signatures_path_, species='human'):
# ImSigGenes immune tumor signatures
tab=pd.read_excel(signatures_path+'/ImSigGenes_immunetumor.xlsx', skiprows=2, index_col=1)
annot = dict()
for ct in pd.unique(tab.Signature):
annot[ct] = tab[tab.Signature==ct].index.values
for ct in annot.keys():
score_genes(adata, annot[ct], score_name=ct, species=species)
def calc_qc_scvelo(adata):
adatas = adata if isinstance(adata, list) else [adata]
for adata in adatas:
# obs qc
adata.obs['ucounts'] = rsum(adata.layers['unspliced'], axis=1)
adata.obs['scounts'] = rsum(adata.layers['spliced'], axis=1)
adata.obs['ufeatures'] = rsum(adata.layers['unspliced']>0, axis=1)
adata.obs['sfeatures'] = rsum(adata.layers['spliced']>0, axis=1)
# var qc
adata.var['ucounts'] = rsum(adata.layers['unspliced'], axis=0)
adata.var['scounts'] = rsum(adata.layers['spliced'], axis=0)
adata.var['ucells'] = rsum(adata.layers['unspliced']>0, axis=0)
adata.var['scells'] = rsum(adata.layers['spliced']>0, axis=0)
def calc_qc(adata, extended_genesets=False, species='detect'):
adatas = adata if isinstance(adata, list) else [adata]
for adata in adatas:
# qc counts
adata.obs['ncounts'] = rsum(adata.X, axis=1)
adata.obs['ngenes'] = rsum(adata.X>0, axis=1)
adata.var['ncounts'] = rsum(adata.X, axis=0)
adata.var['ncells'] = rsum(adata.X>0, axis=0)
species = detect_organism(adata) if species == 'detect' else species
# gene modules
# mitochondrial genes
get_mito_percentage(adata, species)
# ribosomal genes
get_ribo_percentage(adata, species)
# hemoglobin genes
get_hemo_percentage(adata, species)
if extended_genesets:
if species is not 'human':
raise ValueError(species,' species is not known. Pls do not use extended_genesets=True.')
# interferon genes, immune response
get_genefamily_percentage(adata, key='IFIT', start=True, name='ifit')
# Cell adhesion molecules genes
get_genefamily_percentage(adata, key='CAM', start=False, name='cam')
# HLA genes encode MHC I and MHC II
get_genefamily_percentage(adata, key='HLA-', start=True, name='hla') # genome specific sometimes!!!
# S100 genes, saw them often in organoids
get_genefamily_percentage(adata, key='S100', start=True, name='s100')
# FOX genes, TFs
get_genefamily_percentage(adata, key='FOX', start=True, name='fox')
# Heat shock protein genes
get_genefamily_percentage(adata, key='HSP', start=True, name='heatshock')
# ABC transporter genes, can lead to multi-drug resistance in cancer
get_genefamily_percentage(adata, key='ABC', start=True, name='abc')
def specify_genes(genes, species='human'):
genes = genes if isinstance(genes, list) else list(genes) if isinstance(genes, np.ndarray) else [genes]
if species is 'human':
return [x.upper() for x in genes]
elif species is 'mouse':
return [x.capitalize() for x in genes]
else:
raise ValueError('Species '+species+' not known.')
def score_genes(adata, gene_list, score_name, species='human', **kwargs):
gene_list_ = specify_genes(gene_list, species=species)
sc.tl.score_genes(adata, gene_list_, score_name=score_name)
def score_hallmarks(adata, subset='organoid', signatures_path=signatures_path_, species='human'):
sc.settings.verbosity = 0
# subset can be a list of hallmarks, 'organoid' (), 'CRC' (~18) or 'all' (50 scores)
tab = pd.read_csv(signatures_path + 'h.all.v6.2.symbols.gmt', sep='\t', index_col=0, header=None).drop(1, axis=1).T
hallsigs={hallmark : tab[hallmark][~pd.isna(tab[hallmark])].values for hallmark in tab.columns}
if isinstance(subset, list):
selection = subset
elif subset == 'organoid':
selection = ['HALLMARK_DNA_REPAIR', 'HALLMARK_WNT_BETA_CATENIN_SIGNALING', 'HALLMARK_APOPTOSIS']
elif subset == 'CRC': # TODO this list is bugged, some entries do not exist
selection = ['HALLMARK_DNA_REPAIR', 'HALLMARK_WNT_BETA_CATENIN_SIGNALING', 'HALLMARK_APOPTOSIS',
'HALLMARK_NOTCH_SIGNALING', 'HALLMARK_TNFA_SIGNALING_VIA_NFKB', 'HALLMARK_HYPOXIA', 'HALLMARK_TGF_BETA_SIGNALING',
'HALLMARK_MITOTIC_SPINDLE', 'HALLMARK_MTORC1_SIGNALING', 'HALLMARK_PI3K_AKT_MTOR_SIGNALING', 'HALLMARK_PROTEIN_SECRETION'
'HALLMARK_G2M_CHECKPOINT', 'HALLMARK_EPITHELIAL_MESENCHYMAL_TRANSITION', 'HALLMARK_OXIDATIVE_PHOSPHORYLATION',
'HALLMARK_P53_PATHWAY', 'HALLMARK_ANGIOGENESIS', 'HALLMARK_KRAS_SIGNALING_UP', 'HALLMARK_KRAS_SIGNALING_DN',
'HALLMARK_GLYCOLYSIS']
elif subset == 'all':
selection = hallsigs.keys()
else:
raise ValueError('Please select a valid subset of hallmark to use. You can also choose "all".')
for hm in selection:
score_genes(adata, hallsigs[hm], score_name=hm, species=species)
def lin_corr_adata(adata, x, keys, method='spearman'):
"""Linearly correlates features (genes/obs_keys) of adata with a given array.
Computes pearson linear correlation (r and p value) for each selected feature
with the given values in x.
----------
adata: An adata object.
x: numeric numpy array
For example x = adata.obsm['X_diffmap'][:,1] or another gene's
expression.
keys: Either a list of genes or a list of adata.obs.columns.
method: Either 'spearman' or 'pearson'.
Returns
-------
df: A pandas DataFrame
The dataframe has genes as index and columns pearson_r and pearson_p. It
is sorted by correlation coefficient (pearson_r).
"""
# input keys may be list or str, make list
keys = [keys] if isinstance(keys, str) else keys
# select correlation method
if method == 'spearman':
correlate = spearmanr
elif method == 'pearsonr':
correlate = pearsonr
else:
raise ValueError(f'Method {method} not valid (pearson or spearman only).')
# feature set
if all(np.isin(keys, adata.obs.columns)):
feature_type = 'obs_keys'
Y = adata.obs[keys].values
elif any(np.isin(keys, adata.var_names)):
feature_type = 'genes'
Y = adata.X.A if issparse(adata.X) else adata.X
else:
raise ValueError('Keys must be list of genes or adata.obs keys.')
# linearly correlated
lincors = []
for i, key in enumerate(keys):
y = Y[:, i]
r, p = correlate(x, y)
lincors.append([key, r, p])
# format result as pandas.DataFrame
df = pd.DataFrame(lincors, columns=[feature_type, f'{method}_r', f'{method}_p']).set_index(feature_type)
df = df.sort_values(f'{method}_r', ascending=False) # sort by correlation
return df
def kde_trajectory(adata, key, groupby, velocity=False, rug_keys=[], component=1,
figsize=[15,5], n_convolve=10, ax=None, show=True, n_eval=200,
range_percs=[0,100], linewidth=4, rug_alpha=0.1,
n=19, ylim=30, scale=8):
X = adata.obsm['X_'+key] if 'X_'+key in adata.obsm.keys() else adata.obsm[key] if key in adata.obsm.keys() else adata.obs[key]
X = X[:, component] if len(X.shape)>1 else X
if velocity:
V = adata.obsm['velocity_'+key] if 'velocity_'+key in adata.obsm.keys() else adata.obsm[key] if key in adata.obsm.keys() else adata.obs[key]
V = V[:, component] if len(V.shape)>1 else V
ax = pl.figure(figsize=figsize).gca() if ax is None else ax
xmin = np.percentile(X, range_percs[0])
xmax = np.percentile(X, range_percs[1])
ev = np.linspace(xmin, xmax, n_eval)
# plot density per group
for i, cond in enumerate(np.sort(pd.unique(adata.obs[groupby]))):
mask = adata.obs[groupby] == cond
kernel = gaussian_kde(X[mask])
ax.plot(ev, kernel(ev), label=cond, linewidth=linewidth, color=adata.uns[groupby+'_colors'][i])
if velocity:
# arrow projections
edges = np.linspace(ev[0], ev[-1], n)
bins = [(edges[k]<X) & (X<edges[k+1]) for k in range(n-1)]
in_bin = bins[2]
xs = np.array([np.mean(X[mask & in_bin]) for in_bin in bins])
ys = np.array([np.mean(kernel(X[mask & in_bin])) for in_bin in bins])
vs = np.array([np.mean(V[mask & in_bin]) if y>ylim else 0 for y, in_bin in zip(ys, bins)])
vs = vs / np.max(np.abs(vs))
# pl.plot(X[mask & in_bin], kernel(X[mask & in_bin]), label=cond, linewidth=linewidth, color='red')
# pl.quiver(X[mask & in_bin], kernel(X[mask & in_bin]), V[mask & in_bin], 0)
ix = np.abs(vs) > 0
ax.quiver(xs[ix], ys[ix], vs[ix], 0 , zorder=100, scale_units='width', scale=scale, color=adata.uns[groupby+'_colors'][i])
# plot categorical annotations as rug
rug_y = ax.get_ylim()[1]/10
rug_keys = rug_keys if isinstance(rug_keys, list) else [rug_keys]
for i, rug in enumerate(rug_keys):
for j, cond in enumerate(np.sort(pd.unique(adata.obs[rug]))):
mask = (adata.obs[rug] == cond) & (X>xmin) & (X<xmax)
plot = ax.plot(X[mask], np.zeros(np.sum(mask)) - rug_y * (i+1), '|', color=adata.uns[rug+'_colors'][j], ms=10, alpha=rug_alpha)
ax.set_xticks([])
ax.set_xlabel(key + ' component '+str(component))
ax.set_ylabel(f'Cell density (KDE) by {groupby}')
ax.set_yticks(ax.get_yticks()[ax.get_yticks()>=0])
ax.axhline(y=0, c='k')
ax.legend()
if show:
pl.show()
else:
return
def diffusion_analysis_(adata, groupby, species='human', component=1, corr_cutoff=0.1, figsize=[10,8], range_percs=[3,97], velocity_mode=None, show=True):
"""Performs a diffusion analysis on adata for a specific diffusion component.
velocity_mode may be None, 'on density', 'average' or , 'single'
----------
adata: An adata object.
Returns
-------
None
"""
ckey = 'DC'+str(component)
add_velocity_subplot = velocity_mode!=None and velocity_mode!='on density'
# set layout
fig = pl.figure(constrained_layout=True, figsize=figsize)
widths = [1, 1, 1]
n_rows = 3 + add_velocity_subplot
heights = [1] * n_rows
spec = fig.add_gridspec(ncols=3, nrows=n_rows, width_ratios=widths,
height_ratios=heights)
ax0 = fig.add_subplot(spec[0, :])
kde_trajectory(adata, key='diffmap', groupby=groupby, range_percs=range_percs, ax=ax0,
show=False, component=component,
velocity=velocity_mode=='on density'
)
ax0.set_xlabel('diffusion pseudotime')
ax0.set_ylabel('cell density')
def add_annotation(row, keys, fig, df, name):
n_top=8
ax_0 = fig.add_subplot(spec[row, 0])
ax_1 = fig.add_subplot(spec[row, 1], sharey=ax_0)
ax_2 = fig.add_subplot(spec[row, 2], sharey=ax_0)
ax_0.set_axis_off()
ax_2.set_axis_off()
# Arrows
ax_0.annotate('', xy=(.4, 1), xytext=(.6, 1),
arrowprops=dict(facecolor='black', shrink=0.05), rotation=90)
ax_2.annotate('', xy=(.6, 1), xytext=(.4, 1),
arrowprops=dict(facecolor='black', shrink=0.05), rotation=90)
# Texts
neg_df = df['spearman_r'][df['spearman_r']<-corr_cutoff].iloc[::-1][:n_top]
pos_df = df['spearman_r'][df['spearman_r']>corr_cutoff][:n_top]
for i, hallmark in enumerate(neg_df.index):
ax_0.text(0.5, .8 - i/len(hallmarks), hallmark.replace('HALLMARK_','').replace('_',' '), ha='center', va='center')
for i, hallmark in enumerate(pos_df.index):
ax_2.text(0.5, .8 - i/len(hallmarks), hallmark.replace('HALLMARK_','').replace('_',' '), ha='center', va='center')
# Barplot
ax_1.barh([.8- i/10 for i in range(len(neg_df))], neg_df.values, align='center', height=0.08, color='tab:blue')
ax_1.barh([.8- i/10 for i in range(len(pos_df))], pos_df.values, align='center', height=0.08, color='tab:red')
ax_1.spines['right'].set_visible(False)
ax_1.spines['left'].set_visible(False)
ax_1.spines['top'].set_visible(False)
ax_1.set_yticks([])
m = np.max(np.abs(df['spearman_r']))
ax_1.set_xlim([-m,m])
ax_1.set_xlabel(f'correlation between diffusion axis \n and {name} expression \n (spearman R)')
ax_1.set_ylim([0,1])
### Pathways
# aggregate hallmarks
dfs = []
hallmarks = ['HALLMARK_ANGIOGENESIS', 'HALLMARK_APOPTOSIS', 'HALLMARK_COAGULATION', 'HALLMARK_COMPLEMENT',
'HALLMARK_IL2_STAT5_SIGNALING', 'HALLMARK_INFLAMMATORY_RESPONSE',
'HALLMARK_INTERFERON_ALPHA_RESPONSE', 'HALLMARK_INTERFERON_GAMMA_RESPONSE', 'HALLMARK_PI3K_AKT_MTOR_SIGNALING',
'HALLMARK_TGF_BETA_SIGNALING', 'HALLMARK_XENOBIOTIC_METABOLISM']
if not all(np.isin(hallmarks, adata.obs.keys())): score_hallmarks(adata, species=species, subset=hallmarks)
df_hallmarks = lin_corr_adata(adata, adata.obsm['X_diffmap'][:, component], hallmarks)
df_hallmarks = df_hallmarks[~pd.isna(df_hallmarks.spearman_r)]
add_annotation(-2, hallmarks, fig, df_hallmarks, 'signature score')
### Genes
df_genes = lin_corr_adata(adata, adata.obsm['X_diffmap'][:, component], adata.var_names)
df_genes = df_genes[~pd.isna(df_genes.spearman_r)]
add_annotation(-1, hallmarks, fig, df_genes, 'gene')
### velocities
if add_velocity_subplot:
ax1 = fig.add_subplot(spec[1, :], sharex=ax0)
groups = list(adata.obs[groupby].cat.categories)
colors = adata.uns[f'{groupby}_colors']
x = adata.obsm['X_diffmap'][:, component]
v = adata.obsm['velocity_diffmap'][:, component]
mask0 = (x>np.percentile(x, range_percs[0])) & (x<np.percentile(x, range_percs[1]))
if velocity_mode=='single':
for i, group in enumerate(groups):
mask = (adata.obs[groupby] == group) & mask0
ax1.quiver(x[mask], np.random.uniform(1-i, -i, x.shape)[mask], v[mask], np.zeros_like(v)[mask], color=colors[i], scale=0.4, edgecolor='k', linewidth = .5)
ax1.set_ylabel(f'RNA velocity\nby {groupby}')
else:
from scipy.interpolate import interp1d
n_evals = 10
xint=np.linspace(np.percentile(x, range_percs[0]), np.percentile(x, range_percs[1]), n_evals)
for i, group in enumerate(groups):
mask = (adata.obs[groupby] == group) & mask0
f = interp1d(x[mask], v[mask])
x_int = xint[(xint >= np.min(x[mask])) & (xint <= np.max(x[mask]))]
v_int = f(x_int)
# Normalize
v_absmax = np.max(np.abs(v_int))
x_segment = (x_int[1] - x_int[0]) / (n_evals/5)
v_int = v_int * x_segment / v_absmax
ax1.quiver(x_int, i * np.ones_like(x_int), v_int, np.zeros_like(v_int),
headwidth=4, color=colors[i], edgecolor='k', linewidth = .5, angles='xy', scale_units='xy', scale=1)
ax1.set_ylim(-1, len(groups))
ax1.set_ylabel(f'Average RNA velocity\nby {groupby}')
ax1.set_yticks([])
# pl.suptitle('Neutrophil cell density on diffusion pseudotime')
if show: pl.show()
def identify_barcode_overlap(df1, df2, key1, key2, reg1='[ACGT]+-', reg2='[ACGT]+-', kick=-1, plot=True):
# clear index
x1 = np.array([re.findall(reg1, txt)[0][:kick] for txt in df1.index])
x2 = np.array([re.findall(reg2, txt)[0][:kick] for txt in df2.index])
# count co-occurences of barcodes by key categories
c1 = pd.unique(df1[key1])
c2 = pd.unique(df2[key2])
Z = np.zeros((len(c1), len(c2)))
for i, ci in enumerate(c1):
for j, cj in enumerate(c2):
Z[i,j] = np.sum(np.isin(x1[df1[key1]==ci], x2[df2[key2]==cj]))
X = pd.DataFrame(Z, index=c1, columns=c2)
if plot: sns.heatmap(X, annot=False), pl.show()
return X
def get_subfolders(d, full_path=True):
prefix=d if full_path else ''
return [os.path.join(prefix, o) for o in os.listdir(d) if os.path.isdir(os.path.join(d,o))]
def get_files(d, full_path=True):
prefix=d if full_path else ''
return [os.path.join(prefix, f) for f in os.listdir(d) if os.path.isfile(os.path.join(d, f))]
def force_merge(df1, df2):
# pd.concat([adata.obs, tab], axis=0, ignore_index=True) is not working as one would think
# see https://stackoverflow.com/questions/32801806/pandas-concat-ignore-index-doesnt-work
df = pd.DataFrame(np.concatenate([df1, df2], axis=1),
index=df1.index,
columns=list(df1.columns) + list(df2.columns))
df = df.loc[:,~df.columns.duplicated()] # remove duplicate columns
return df
def peek(f):
opener = open if f.split('.')[-1]!='gz' else lambda x: gzip.open(x, 'rb')
# peek into file to find out length and separator
file_length = sum(1 for line in opener(f))
for line in opener(f):
try:
first_line = line.decode()
except (UnicodeDecodeError, AttributeError):
first_line = line
break
sniffer = Sniffer()
dialect = sniffer.sniff(first_line)
separator=dialect.delimiter
return file_length, separator
def gene_symbols_to_entrezid(gene_list, species='human', verbose=False):
mg = mygene.MyGeneInfo()
out = mg.querymany(gene_list, scopes='symbol', fields='entrezgene', species=species, verbose=verbose)
df = pd.DataFrame([[o['query'], o['_id']] if '_id' in o.keys() else [o['query'], None] for o in out], columns=['gene_symbol', 'entrez_id']).set_index('gene_symbol')
return df
# NOTE: you need to run beforehand:
# from goatools.base import download_go_basic_obo, download_ncbi_associations
# obo_fname = download_go_basic_obo(goa_path+'go-basic.obo')
# fin_gene2go = download_ncbi_associations(goa_path+'gene2go')
# also see:
# https://github.com/tanghaibao/goatools/blob/main/notebooks/goea_nbt3102.ipynb
goa_path = '/fast/work/users/peidlis_c/utils/goa/' # replace with your path
def GOEA(gene_list, species='human', namespaces=['BP'], sig_alpha=0.05, verbose=False):
"""Performs GO enrichment analysis with goatools.
Based on https://github.com/tanghaibao/goatools/blob/main/notebooks/goea_nbt3102.ipynb.
Note that you must ensure goa_path is filled (see jnb link above) by running:
from goatools.base import download_go_basic_obo, download_ncbi_associations
obo_fname = download_go_basic_obo(goa_path+'go-basic.obo')
fin_gene2go = download_ncbi_associations(goa_path+'gene2go')
----------
gene_list: `list` of `str`
A list of gene symbols.
species: `str`, either `'human'` or `'mouse'` (default: `'human'`)
The species the genes came from.
namespaces: `list` of `str` (default: `['BP']`)
A `list` of strings from `['BP', 'CC', 'MF']`.
BP: Biological Process (larger processes, e.g. immun)
CC: Cellular Component (location in the cell)
MF: Molecular Function (small process)
See http://geneontology.org/docs/ontology-documentation/.
sig_alpha: `float` in `[0,1]` (default: `0.05`)
Significance cut-off for multiple testing corrected p values.
Returns
-------
df: A pandas DataFrame
The dataframe has GO term ids as index and is sorted by multiple testing
corrected p values. Gene ids of the respective GO term are found in the
column 'study_items'.
"""
largs = {} if verbose else {'prt': None}
# species handling
taxids_dics = {'human' : 9606, 'mouse': 10090}
if species not in taxids_dics.keys():
raise ValueError('Species ', species, ' not known...')
else:
taxid = taxids_dics[species]
# convert gene symbols to entrez ids
df = gene_symbols_to_entrezid(gene_list, species=species, verbose=verbose)
genes = [x for x in df.entrez_id if x!=None]
geneids_study = [int(x) for x in genes if x.isdigit()]
# read GO-relevant databases
obodag = GODag(goa_path+"go-basic.obo", **largs)
objanno = Gene2GoReader(goa_path+'gene2go', taxids=[taxid], **largs)
ns2assoc = objanno.get_ns2assc()
# define background
if species == 'mouse':
from metadata.goatools_bg_genes.genes_ncbi_10090_proteincoding import GENEID2NT
elif species == 'human':
from metadata.goatools_bg_genes.genes_ncbi_9606_proteincoding import GENEID2NT
# initialize GOEA object
goeaobj = GOEnrichmentStudyNS(
GENEID2NT.keys(), # List of protein-coding genes
ns2assoc, # geneid/GO associations
obodag, # Ontologies
propagate_counts = False,
alpha = sig_alpha, # default significance cut-off
methods = ['fdr_bh']) # defult multipletest correction method
# run analysis
goea_results_all = goeaobj.run_study(geneids_study, **largs)
goea_results = [r for r in goea_results_all if r.p_fdr_bh < sig_alpha]
if len(goea_results) > 0:
df = pd.DataFrame([o.__dict__ for o in goea_results])
df = df.set_index('GO').drop(['kws', 'method_flds'], axis=1)
df = df[np.isin(df.NS, namespaces)]
else:
df = None
print('Results are empty. Check if gene set or species wrong. Or set only_significant=False.')
return df
| [
"pandas.read_csv",
"gzip.open",
"goatools.obo_parser.GODag",
"numpy.isin",
"scipy.interpolate.interp1d",
"numpy.array",
"scanpy.tl.score_genes_cell_cycle",
"csv.Sniffer",
"pandas.read_excel",
"pandas.unique",
"goatools.anno.genetogo_reader.Gene2GoReader",
"numpy.mean",
"scipy.stats.gaussian_... | [((953, 970), 'scipy.sparse.issparse', 'issparse', (['adata.X'], {}), '(adata.X)\n', (961, 970), False, 'from scipy.sparse import issparse\n'), ((2807, 2915), 'pandas.read_excel', 'pd.read_excel', (["(signatures_path + '/colonoid_cancer_uhlitz_markers_revised.xlsx')"], {'skiprows': '(1)', 'index_col': '(0)'}), "(signatures_path +\n '/colonoid_cancer_uhlitz_markers_revised.xlsx', skiprows=1, index_col=0)\n", (2820, 2915), True, 'import pandas as pd\n'), ((3423, 3515), 'pandas.read_excel', 'pd.read_excel', (["(signatures_path + '/ImSigGenes_immunetumor.xlsx')"], {'skiprows': '(2)', 'index_col': '(1)'}), "(signatures_path + '/ImSigGenes_immunetumor.xlsx', skiprows=2,\n index_col=1)\n", (3436, 3515), True, 'import pandas as pd\n'), ((3543, 3567), 'pandas.unique', 'pd.unique', (['tab.Signature'], {}), '(tab.Signature)\n', (3552, 3567), True, 'import pandas as pd\n'), ((6765, 6824), 'scanpy.tl.score_genes', 'sc.tl.score_genes', (['adata', 'gene_list_'], {'score_name': 'score_name'}), '(adata, gene_list_, score_name=score_name)\n', (6782, 6824), True, 'import scanpy as sc\n'), ((11012, 11044), 'numpy.percentile', 'np.percentile', (['X', 'range_percs[0]'], {}), '(X, range_percs[0])\n', (11025, 11044), True, 'import numpy as np\n'), ((11056, 11088), 'numpy.percentile', 'np.percentile', (['X', 'range_percs[1]'], {}), '(X, range_percs[1])\n', (11069, 11088), True, 'import numpy as np\n'), ((11098, 11129), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'n_eval'], {}), '(xmin, xmax, n_eval)\n', (11109, 11129), True, 'import numpy as np\n'), ((13561, 13612), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'constrained_layout': '(True)', 'figsize': 'figsize'}), '(constrained_layout=True, figsize=figsize)\n', (13570, 13612), True, 'import matplotlib.pyplot as pl\n'), ((19111, 19131), 'pandas.unique', 'pd.unique', (['df1[key1]'], {}), '(df1[key1])\n', (19120, 19131), True, 'import pandas as pd\n'), ((19141, 19161), 'pandas.unique', 'pd.unique', (['df2[key2]'], {}), '(df2[key2])\n', (19150, 19161), True, 'import pandas as pd\n'), ((19350, 19387), 'pandas.DataFrame', 'pd.DataFrame', (['Z'], {'index': 'c1', 'columns': 'c2'}), '(Z, index=c1, columns=c2)\n', (19362, 19387), True, 'import pandas as pd\n'), ((20657, 20666), 'csv.Sniffer', 'Sniffer', ([], {}), '()\n', (20664, 20666), False, 'from csv import Sniffer\n'), ((20856, 20875), 'mygene.MyGeneInfo', 'mygene.MyGeneInfo', ([], {}), '()\n', (20873, 20875), False, 'import mygene\n'), ((23556, 23597), 'goatools.obo_parser.GODag', 'GODag', (["(goa_path + 'go-basic.obo')"], {}), "(goa_path + 'go-basic.obo', **largs)\n", (23561, 23597), False, 'from goatools.obo_parser import GODag\n'), ((23610, 23670), 'goatools.anno.genetogo_reader.Gene2GoReader', 'Gene2GoReader', (["(goa_path + 'gene2go')"], {'taxids': '[taxid]'}), "(goa_path + 'gene2go', taxids=[taxid], **largs)\n", (23623, 23670), False, 'from goatools.anno.genetogo_reader import Gene2GoReader\n'), ((560, 586), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (576, 586), False, 'import os\n'), ((2558, 2631), 'scanpy.tl.score_genes_cell_cycle', 'sc.tl.score_genes_cell_cycle', (['adata'], {'s_genes': 's_genes', 'g2m_genes': 'g2m_genes'}), '(adata, s_genes=s_genes, g2m_genes=g2m_genes)\n', (2586, 2631), True, 'import scanpy as sc\n'), ((2933, 2996), 'numpy.array', 'np.array', (["tab.index[tab['Epithelial'] == 1].values"], {'dtype': '"""str"""'}), "(tab.index[tab['Epithelial'] == 1].values, dtype='str')\n", (2941, 2996), True, 'import numpy as np\n'), ((3060, 3120), 'numpy.array', 'np.array', (["tab.index[tab['Stromal'] == 1].values"], {'dtype': '"""str"""'}), "(tab.index[tab['Stromal'] == 1].values, dtype='str')\n", (3068, 3120), True, 'import numpy as np\n'), ((3184, 3243), 'numpy.array', 'np.array', (["tab.index[tab['Immune'] == 1].values"], {'dtype': '"""str"""'}), "(tab.index[tab['Immune'] == 1].values, dtype='str')\n", (3192, 3243), True, 'import numpy as np\n'), ((9532, 9564), 'numpy.isin', 'np.isin', (['keys', 'adata.obs.columns'], {}), '(keys, adata.obs.columns)\n', (9539, 9564), True, 'import numpy as np\n'), ((11289, 11310), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['X[mask]'], {}), '(X[mask])\n', (11301, 11310), False, 'from scipy.stats import gaussian_kde, spearmanr, pearsonr\n'), ((12997, 13006), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (13004, 13006), True, 'import matplotlib.pyplot as pl\n'), ((18762, 18771), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (18769, 18771), True, 'import matplotlib.pyplot as pl\n'), ((19539, 19562), 'os.path.join', 'os.path.join', (['prefix', 'o'], {}), '(prefix, o)\n', (19551, 19562), False, 'import os\n'), ((19704, 19727), 'os.path.join', 'os.path.join', (['prefix', 'f'], {}), '(prefix, f)\n', (19716, 19727), False, 'import os\n'), ((20029, 20063), 'numpy.concatenate', 'np.concatenate', (['[df1, df2]'], {'axis': '(1)'}), '([df1, df2], axis=1)\n', (20043, 20063), True, 'import numpy as np\n'), ((24035, 24051), 'metadata.goatools_bg_genes.genes_ncbi_9606_proteincoding.GENEID2NT.keys', 'GENEID2NT.keys', ([], {}), '()\n', (24049, 24051), False, 'from metadata.goatools_bg_genes.genes_ncbi_9606_proteincoding import GENEID2NT\n'), ((24521, 24569), 'pandas.DataFrame', 'pd.DataFrame', (['[o.__dict__ for o in goea_results]'], {}), '([o.__dict__ for o in goea_results])\n', (24533, 24569), True, 'import pandas as pd\n'), ((1142, 1182), 'numpy.sum', 'np.sum', (['adata[:, family_genes].X'], {'axis': '(1)'}), '(adata[:, family_genes].X, axis=1)\n', (1148, 1182), True, 'import numpy as np\n'), ((1198, 1221), 'numpy.sum', 'np.sum', (['adata.X'], {'axis': '(1)'}), '(adata.X, axis=1)\n', (1204, 1221), True, 'import numpy as np\n'), ((9649, 9679), 'numpy.isin', 'np.isin', (['keys', 'adata.var_names'], {}), '(keys, adata.var_names)\n', (9656, 9679), True, 'import numpy as np\n'), ((10067, 10142), 'pandas.DataFrame', 'pd.DataFrame', (['lincors'], {'columns': "[feature_type, f'{method}_r', f'{method}_p']"}), "(lincors, columns=[feature_type, f'{method}_r', f'{method}_p'])\n", (10079, 10142), True, 'import pandas as pd\n'), ((11197, 11226), 'pandas.unique', 'pd.unique', (['adata.obs[groupby]'], {}), '(adata.obs[groupby])\n', (11206, 11226), True, 'import pandas as pd\n'), ((11489, 11518), 'numpy.linspace', 'np.linspace', (['ev[0]', 'ev[-1]', 'n'], {}), '(ev[0], ev[-1], n)\n', (11500, 11518), True, 'import numpy as np\n'), ((15713, 15737), 'numpy.abs', 'np.abs', (["df['spearman_r']"], {}), "(df['spearman_r'])\n", (15719, 15737), True, 'import numpy as np\n'), ((16561, 16593), 'pandas.isna', 'pd.isna', (['df_hallmarks.spearman_r'], {}), '(df_hallmarks.spearman_r)\n', (16568, 16593), True, 'import pandas as pd\n'), ((16800, 16828), 'pandas.isna', 'pd.isna', (['df_genes.spearman_r'], {}), '(df_genes.spearman_r)\n', (16807, 16828), True, 'import pandas as pd\n'), ((19401, 19428), 'seaborn.heatmap', 'sns.heatmap', (['X'], {'annot': '(False)'}), '(X, annot=False)\n', (19412, 19428), True, 'import seaborn as sns\n'), ((19430, 19439), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (19437, 19439), True, 'import matplotlib.pyplot as pl\n'), ((19572, 19585), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (19582, 19585), False, 'import os\n'), ((19737, 19750), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (19747, 19750), False, 'import os\n'), ((20346, 20364), 'gzip.open', 'gzip.open', (['x', '"""rb"""'], {}), "(x, 'rb')\n", (20355, 20364), False, 'import gzip\n'), ((24655, 24681), 'numpy.isin', 'np.isin', (['df.NS', 'namespaces'], {}), '(df.NS, namespaces)\n', (24662, 24681), True, 'import numpy as np\n'), ((1009, 1049), 'numpy.sum', 'np.sum', (['adata[:, family_genes].X'], {'axis': '(1)'}), '(adata[:, family_genes].X, axis=1)\n', (1015, 1049), True, 'import numpy as np\n'), ((1068, 1091), 'numpy.sum', 'np.sum', (['adata.X'], {'axis': '(1)'}), '(adata.X, axis=1)\n', (1074, 1091), True, 'import numpy as np\n'), ((7053, 7149), 'pandas.read_csv', 'pd.read_csv', (["(signatures_path + 'h.all.v6.2.symbols.gmt')"], {'sep': '"""\t"""', 'index_col': '(0)', 'header': 'None'}), "(signatures_path + 'h.all.v6.2.symbols.gmt', sep='\\t', index_col\n =0, header=None)\n", (7064, 7149), True, 'import pandas as pd\n'), ((9738, 9755), 'scipy.sparse.issparse', 'issparse', (['adata.X'], {}), '(adata.X)\n', (9746, 9755), False, 'from scipy.sparse import issparse\n'), ((10945, 10971), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (10954, 10971), True, 'import matplotlib.pyplot as pl\n'), ((12137, 12147), 'numpy.abs', 'np.abs', (['vs'], {}), '(vs)\n', (12143, 12147), True, 'import numpy as np\n'), ((12512, 12537), 'pandas.unique', 'pd.unique', (['adata.obs[rug]'], {}), '(adata.obs[rug])\n', (12521, 12537), True, 'import pandas as pd\n'), ((17221, 17253), 'numpy.percentile', 'np.percentile', (['x', 'range_percs[0]'], {}), '(x, range_percs[0])\n', (17234, 17253), True, 'import numpy as np\n'), ((17260, 17292), 'numpy.percentile', 'np.percentile', (['x', 'range_percs[1]'], {}), '(x, range_percs[1])\n', (17273, 17292), True, 'import numpy as np\n'), ((17786, 17818), 'numpy.percentile', 'np.percentile', (['x', 'range_percs[0]'], {}), '(x, range_percs[0])\n', (17799, 17818), True, 'import numpy as np\n'), ((17820, 17852), 'numpy.percentile', 'np.percentile', (['x', 'range_percs[1]'], {}), '(x, range_percs[1])\n', (17833, 17852), True, 'import numpy as np\n'), ((17991, 18017), 'scipy.interpolate.interp1d', 'interp1d', (['x[mask]', 'v[mask]'], {}), '(x[mask], v[mask])\n', (17999, 18017), False, 'from scipy.interpolate import interp1d\n'), ((19295, 19344), 'numpy.isin', 'np.isin', (['x1[df1[key1] == ci]', 'x2[df2[key2] == cj]'], {}), '(x1[df1[key1] == ci], x2[df2[key2] == cj])\n', (19302, 19344), True, 'import numpy as np\n'), ((19603, 19621), 'os.path.join', 'os.path.join', (['d', 'o'], {}), '(d, o)\n', (19615, 19621), False, 'import os\n'), ((19769, 19787), 'os.path.join', 'os.path.join', (['d', 'f'], {}), '(d, f)\n', (19781, 19787), False, 'import os\n'), ((7203, 7225), 'pandas.isna', 'pd.isna', (['tab[hallmark]'], {}), '(tab[hallmark])\n', (7210, 7225), True, 'import pandas as pd\n'), ((11646, 11671), 'numpy.mean', 'np.mean', (['X[mask & in_bin]'], {}), '(X[mask & in_bin])\n', (11653, 11671), True, 'import numpy as np\n'), ((11907, 11917), 'numpy.abs', 'np.abs', (['vs'], {}), '(vs)\n', (11913, 11917), True, 'import numpy as np\n'), ((18197, 18210), 'numpy.abs', 'np.abs', (['v_int'], {}), '(v_int)\n', (18203, 18210), True, 'import numpy as np\n'), ((18395, 18415), 'numpy.zeros_like', 'np.zeros_like', (['v_int'], {}), '(v_int)\n', (18408, 18415), True, 'import numpy as np\n'), ((18916, 18937), 're.findall', 're.findall', (['reg1', 'txt'], {}), '(reg1, txt)\n', (18926, 18937), False, 'import re\n'), ((18990, 19011), 're.findall', 're.findall', (['reg2', 'txt'], {}), '(reg2, txt)\n', (19000, 19011), False, 'import re\n'), ((11802, 11827), 'numpy.mean', 'np.mean', (['V[mask & in_bin]'], {}), '(V[mask & in_bin])\n', (11809, 11827), True, 'import numpy as np\n'), ((12652, 12664), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (12658, 12664), True, 'import numpy as np\n'), ((17474, 17511), 'numpy.random.uniform', 'np.random.uniform', (['(1 - i)', '(-i)', 'x.shape'], {}), '(1 - i, -i, x.shape)\n', (17491, 17511), True, 'import numpy as np\n'), ((17526, 17542), 'numpy.zeros_like', 'np.zeros_like', (['v'], {}), '(v)\n', (17539, 17542), True, 'import numpy as np\n'), ((18367, 18386), 'numpy.ones_like', 'np.ones_like', (['x_int'], {}), '(x_int)\n', (18379, 18386), True, 'import numpy as np\n'), ((18056, 18071), 'numpy.min', 'np.min', (['x[mask]'], {}), '(x[mask])\n', (18062, 18071), True, 'import numpy as np\n'), ((18084, 18099), 'numpy.max', 'np.max', (['x[mask]'], {}), '(x[mask])\n', (18090, 18099), True, 'import numpy as np\n')] |
"""
Author: <NAME> and <NAME>
E-mail author: <EMAIL>
Last Modified: May 7 2020
"""
import math
import pandas as pd
import motmetrics as mm
import numpy as np
from collections import defaultdict
from Metrics import Metrics
"""
Create individual metrics class for new challenge. The new metrics class inherits functionality from the parent class Metrics
"""
class Zef_3dMetrics(Metrics):
def __init__(self, seqName = None):
super().__init__()
if seqName:
self.seqName = seqName
else: self.seqName = 0
# The maximum allowed distance in centimetres between detections and
# ground truth positions
self.thresh_3d = 0.5
print('Registrering metrics for sequence {0}'.format(self.seqName))
"""
Register metrics for the evaluation script
E.g.
self.register(name = "MOTA", formatter='{:.2f}'.format )
self.register(name = "recall", display_name="Rcll", formatter='{:.2f}'.format)
"""
self.register(name = "mota", formatter = '{:.2f}'.format, display_name = 'MOTA')
self.register(name = "motp", formatter = '{:.2f}'.format, display_name = 'MOTP')
self.register(name = "motal", formatter = '{:.2f}'.format, display_name = 'MOTAL', write_mail = False)
self.register(name = "recall", formatter = '{:.2f}'.format, display_name = 'Rcll')
self.register(name = "precision", formatter = '{:.2f}'.format, display_name = 'Prcn')
self.register(name = "f1", formatter = '{:.2f}'.format, display_name = 'F1')
self.register(name = "FAR", formatter='{:.2f}'.format)
self.register(name = "fp", formatter = '{:d}'.format, display_name = 'FP')
self.register(name = "tp", formatter = '{:d}'.format, display_name = 'TP')
self.register(name = "fn", formatter = '{:d}'.format, display_name = 'FN')
self.register(name = "n_gt_trajectories", display_name = "GT",formatter='{:.0f}'.format)
self.register(name = "n_gt", display_name = "GT_OBJ", formatter='{:.0f}'.format, write_mail = False, write_db = False) # number of ground truth detections
self.register(name = "num_objects", formatter = '{:d}'.format, display_name = "GT", write_db = True, write_mail = False) # tp+fn
self.register(name = "num_switches", formatter = '{:d}'.format, display_name = "IDSW")
self.register(name = "idsw_ratio", formatter = '{:.1f}'.format, display_name = "IDSWR")
self.register(name = "total_num_frames", display_name = "TOTAL_NUM", formatter='{:.0f}'.format, write_mail = False, write_db = False)
self.register(name = "num_predictions", formatter = '{:.1f}'.format, write_db = False, write_mail = False)
self.register(name = "dist", formatter = '{:.2f}'.format, write_db = False, write_mail = False)
self.register(name = "frag", formatter = '{:d}'.format, display_name = "FM")
self.register(name = "fragments_rel", display_name="FMR", formatter='{:.2f}'.format)
self.register(name = "mtbf_s", formatter = '{:.2f}'.format, display_name = "MTBFs")
self.register(name = "mtbf_m", formatter = '{:.2f}'.format, display_name = "MTBFm")
self.register(name = "mtbf_ssum", formatter = '{:.2f}'.format, write_db=False, write_mail=False)
self.register(name = "mtbf_slen", formatter = '{:.2f}'.format, write_db=False, write_mail=False)
self.register(name = "mtbf_nslen", formatter = '{:.2f}'.format, write_db=False, write_mail=False)
self.register(name = "idfp", formatter = '{:.1f}'.format, display_name = "IDFP", write_mail = False)
self.register(name = "idfn", formatter = '{:.1f}'.format, display_name = "IDFN", write_mail = False)
self.register(name = "idtp", formatter = '{:.1f}'.format, display_name = "IDTP")
self.register(name = "idp", formatter = '{:.1f}'.format, display_name = "IDP")
self.register(name = "idr", formatter = '{:.1f}'.format, display_name = "IDR")
self.register(name = "idf1", formatter = '{:.1f}'.format, display_name = "IDF1")
self.register(name = "mt", formatter = '{:d}'.format, display_name = "MT")
self.register(name = "ml", formatter = '{:d}'.format, display_name = "ML")
self.register(name = "pt", formatter = '{:d}'.format, display_name = "PT")
self.register(name = "MTR", formatter='{:.2f}'.format)
self.register(name = "PTR", formatter='{:.2f}'.format)
self.register(name = "MLR", formatter='{:.2f}'.format)
def compute_clearmot(self):
""" Compute clear mot metric for the benchmark
E.g.
# precision/recall etc.
if (self.fp + self.tp) == 0 or (self.tp + self.fn) == 0:
self.recall = 0.
self.precision = 0.
else:
self.recall = (self.tp / float(self.tp + self.fn) ) * 100.
self.precision = (self.tp / float(self.fp + self.tp) ) * 100.
"""
# precision/recall
if (self.fp + self.tp) == 0 or self.num_objects == 0:
self.recall = 0.
self.precision = 0.
else:
self.recall = (self.tp / float(self.num_objects) ) * 100.
self.precision = (self.tp / float(self.fp + self.tp) ) * 100.
# F1-score
if (self.precision + self.recall) == 0:
self.f1 = 0.
else:
self.f1 = (2 * self.precision * self.recall) / (self.precision + self.recall)
# False alarm rate
if self.total_num_frames == 0:
self.FAR = "n/a"
else:
self.FAR = ( self.fp / float(self.total_num_frames) )
# ID switch ratio
if self.recall == 0:
self.idsw_ratio = 0.
self.fragments_rel = 0
else:
self.idsw_ratio = self.num_switches / (self.recall / 100.)
self.fragments_rel = self.frag/self.recall
# MOTA/MOTAL
if self.num_objects == 0:
self.mota = 0.
else:
self.mota = (1. - (self.fn + self.num_switches +
self.fp) / self.num_objects) * 100.
self.motal = (1 - (self.fn + self.fp +
np.log10(self.num_switches + 1)) / self.num_objects) * 100.
# MOTP
if self.tp == 0:
self.motp = -1.
else:
self.motp = (self.thresh_3d - (self.dist/self.tp)) * 100
# ID precission/recall
if (self.idfp + self.idtp) == 0 or (self.idtp + self.idfn) == 0:
self.idr = 0.
self.idp = 0.
else:
self.idr = (self.idtp / float(self.idtp + self.idfn) ) * 100.
self.idp = (self.idtp / float(self.idfp + self.idtp) ) * 100.
# IDF1
if (self.num_objects + self.num_predictions) == 0:
self.idf1 = 0.
else:
self.idf1 = float(2 * self.idtp) / (self.num_objects + self.num_predictions) * 100.
# MTBF standard and monotonic
if self.mtbf_slen == 0:
self.mtbf_s = 0
self.mtbf_m = 0
else:
self.mtbf_s = self.mtbf_ssum/self.mtbf_slen
self.mtbf_m = self.mtbf_ssum/(self.mtbf_slen+self.mtbf_nslen)
if self.n_gt_trajectories == 0:
self.MTR = 0.
self.PTR = 0.
self.MLR = 0.
else:
self.MTR = self.mt * 100. / float(self.n_gt_trajectories)
self.PTR = self.pt * 100. / float(self.n_gt_trajectories)
self.MLR = self.ml * 100. / float(self.n_gt_trajectories)
def compute_metrics_per_sequence(self, sequence, det_df, gt_df, gtDataDir, benchmark_name, **kwargs):
"""
"""
maxDist = self.thresh_3d
posFunc = self.get3Dpos
distFunc = self.pairwiseDistance
gt_frame_col = "frame"
det_frame_col = "frame"
gt_df = gt_df.dropna(subset=["3d_x", "3d_y", "3d_z"])
det_df = det_df[(det_df["3d_x"] > 0) & (det_df["3d_y"] > 0) & (det_df["3d_z"] > 0)]
# Get unique occurring frames
gt_frames = gt_df[gt_frame_col].unique()
det_frames = det_df[det_frame_col].unique()
gt_frames = [int(x) for x in gt_frames]
det_frames = [int(x) for x in det_frames]
print( "det num frames")
frames = list(set(gt_frames+det_frames))
print("[Seq {}]\nAmount of GT frames: {}\nAmount of det frames: {}\nSet of all frames: {}".format(sequence, len(gt_frames), len(det_frames), len(frames)))
acc = mm.MOTAccumulator(auto_id=False)
dist_sum = 0
try:
for frame in frames:
# Get the df entries for this specific frame
gts = gt_df[gt_df[gt_frame_col] == frame]
dets = det_df[det_df[det_frame_col] == frame]
gt_data = True
det_data = True
# Get ground truth positions, if any
if len(gts) > 0:
gt_pos, gt_ids = posFunc(gts)
gt_ids = [x for x in gt_ids]
else:
gt_ids = []
gt_data = False
# Get detections, if any
if len(dets) > 0:
det_pos, det_ids = posFunc(dets)
det_ids = [x for x in det_ids]
else:
det_ids = []
det_data = False
# Get the L2 distance between ground truth positions, and the detections
if gt_data and det_data:
dist = distFunc(gt_pos, det_pos, maxDist=maxDist).tolist()
else:
dist = []
# Update accumulator
acc.update(gt_ids, # Ground truth objects in this frame
det_ids, # Detector hypotheses in this frame
dist, # Distance between ground truths and observations
frame)
dist_sum += self.nestedSum(dist)
except:
print("Add some more information for the exception here.") # FIX
raise Exception("<exc> Evaluation failed <!exc>")
metrics = self.calcMetrics(acc)
# get number of gt tracks
self.n_gt_trajectories = int(len(gt_df["id"].unique()))
# True/False positives
self.tp = int(metrics['num_detections'])
self.fp = int(metrics['num_false_positives'])
# Total number of unique object appearances over all frames (tp + fn)
self.num_objects = int(metrics['num_objects'])
# Total number of misses
self.fn = int(metrics['num_misses'])
# Total number of track switches
self.num_switches = int(metrics['num_switches'])
# Total L2 distance between gt and dets
self.dist = dist_sum
# Total amount of fragments
self.frag = int(metrics["num_fragmentations"])
# MTBF sequences and null sequences
self.mtbf_ssum = int(metrics["mtbf_ssum"])
self.mtbf_slen = int(metrics["mtbf_slen"])
self.mtbf_nslen = int(metrics["mtbf_nslen"])
# ID true positives/false negatives/false positives
self.idtp = int(metrics["idtp"])
self.idfn = int(metrics["idfn"])
self.idfp = int(metrics["idfp"])
# Total number of unique prediction appearances
self.num_predictions = int(metrics["num_predictions"])
# Mostly tracked
self.mt = int(metrics["mostly_tracked"])
# Mostly lost
self.ml = int(metrics["mostly_lost"])
# Partially tracked
self.pt = int(metrics["partially_tracked"])
# total number of frames
self.total_num_frames = int(metrics["num_frames"])
def nestedSum(self, x):
"""
Returns the summed elements in nested lists
"""
total = 0
for i in x:
if isinstance(i, list):
total += self.nestedSum(i)
elif not math.isnan(i):
total += i
else:
pass
return total
def get3Dpos(self, df):
"""
Returns the 3D position in a dataset
Input:
df: Pandas dataframe
Output:
pos: Numpy array of size [n_ids, 3] containing the 3d position
ids: List of IDs
"""
ids = df["id"].unique()
ids = [int(x) for x in ids]
pos = np.zeros((len(ids), 3))
for idx, identity in enumerate(ids):
df_id = df[df["id"] == identity]
pos[idx,0] = df_id["3d_x"]
pos[idx,1] = df_id["3d_y"]
pos[idx,2] = df_id["3d_z"]
return pos, ids
def pairwiseDistance(self, X,Y, maxDist):
"""
X and Y are n x d and m x d matrices, where n and m are the amount of observations, and d is the dimensionality of the observations
"""
X_ele, X_dim = X.shape
Y_ele, Y_dim = Y.shape
assert X_dim == Y_dim, "The two provided matrices not have observations of the same dimensionality"
mat = np.zeros((X_ele, Y_ele))
for row, posX in enumerate(X):
for col, posY in enumerate(Y):
mat[row, col] = np.linalg.norm(posX-posY)
mat[mat > maxDist] = np.nan
return mat
def calcMetrics(self, acc):
"""
Calculates all relevant metrics for the dataset
Input:
acc: MOT Accumulator object
Output:
summary: Pandas dataframe containing all the metrics
"""
mh = mm.metrics.create()
summary = mh.compute_many([acc],
metrics=mm.metrics.motchallenge_metrics
+["num_objects"]
+["num_predictions"]
+["num_frames"]
+["num_detections"]
+["num_fragmentations"]
+["idfp"]
+["idfn"]
+["idtp"])
summary["motal"] = self.MOTAL(summary)
mtbf_ssum, mtbf_slen, mtbf_nslen = self.MTBF(acc.mot_events)
summary["mtbf_ssum"] = mtbf_ssum # Sum of sequences
summary["mtbf_slen"] = mtbf_slen # Number of sequences
summary["mtbf_nslen"] = mtbf_nslen # Number of null sequences
return summary
def MOTAL(self, metrics):
"""
Calculates the MOTA variation where the amount of id switches is
attenuated by using the log10 function
"""
return 1 - (metrics["num_misses"] + metrics["num_false_positives"] + np.log10(metrics["num_switches"]+1)) / metrics["num_objects"]
def MTBF(self, events):
"""
Calculates the Mean Time Between Failures (MTBF) metric from the motmetric events dataframe
Input:
events: Pandas Dataframe structured as per the motmetrics package
Output:
MTBF_s: The Standard MTBF metric proposed in the original paper
MTBF_m: The monotonic MTBF metric proposed in the original paper
"""
unique_gt_ids = events.OId.unique()
seqs = []
null_seqs = []
for gt_id in unique_gt_ids:
gt_events = events[events.OId == gt_id]
counter = 0
null_counter = 0
for _, row in gt_events.iterrows():
if row["Type"] == "MATCH":
counter += 1
elif row["Type"] == "SWITCH":
seqs.append(counter)
counter = 1
else:
seqs.append(counter)
counter = 0
null_counter = 1
if counter > 0:
if null_counter > 0:
null_seqs.append(null_counter)
null_counter = 0
if counter > 0:
seqs.append(counter)
if null_counter > 0:
null_seqs.append(null_counter)
seqs = np.asarray(seqs)
seqs = seqs[seqs>0]
mtbf_ssum = sum(seqs)
mtbf_slen = len(seqs)
mtbf_nslen = len(null_seqs)
if mtbf_ssum == 0:
return 0, 0, 0
else:
return mtbf_ssum, mtbf_slen, mtbf_nslen
| [
"motmetrics.MOTAccumulator",
"numpy.log10",
"numpy.asarray",
"numpy.zeros",
"motmetrics.metrics.create",
"numpy.linalg.norm",
"math.isnan"
] | [((8546, 8578), 'motmetrics.MOTAccumulator', 'mm.MOTAccumulator', ([], {'auto_id': '(False)'}), '(auto_id=False)\n', (8563, 8578), True, 'import motmetrics as mm\n'), ((13173, 13197), 'numpy.zeros', 'np.zeros', (['(X_ele, Y_ele)'], {}), '((X_ele, Y_ele))\n', (13181, 13197), True, 'import numpy as np\n'), ((13660, 13679), 'motmetrics.metrics.create', 'mm.metrics.create', ([], {}), '()\n', (13677, 13679), True, 'import motmetrics as mm\n'), ((16198, 16214), 'numpy.asarray', 'np.asarray', (['seqs'], {}), '(seqs)\n', (16208, 16214), True, 'import numpy as np\n'), ((13313, 13340), 'numpy.linalg.norm', 'np.linalg.norm', (['(posX - posY)'], {}), '(posX - posY)\n', (13327, 13340), True, 'import numpy as np\n'), ((12066, 12079), 'math.isnan', 'math.isnan', (['i'], {}), '(i)\n', (12076, 12079), False, 'import math\n'), ((14782, 14819), 'numpy.log10', 'np.log10', (["(metrics['num_switches'] + 1)"], {}), "(metrics['num_switches'] + 1)\n", (14790, 14819), True, 'import numpy as np\n'), ((6228, 6259), 'numpy.log10', 'np.log10', (['(self.num_switches + 1)'], {}), '(self.num_switches + 1)\n', (6236, 6259), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import unittest
import sys
import os
import multiprocessing
import tempfile
import time
import random
import numpy as np
if __name__ == '__main__':
# add ../.. directory to python path such that we can import the main
# module
HERE = os.path.dirname(os.path.realpath(__file__))
PROJ_PATH = os.path.abspath(os.path.join(HERE, '../..'))
sys.path.insert(0, PROJ_PATH)
from h5pyswmr import File
class TestHDF5(unittest.TestCase):
def setUp(self):
self.shape = (8000, 1500)
def test_parallel(self):
"""
Test parallel read/write access
"""
tmpdir = tempfile.gettempdir()
NO_WORKERS = 40
filename = os.path.join(tmpdir, 'paralleltest827348723.h5')
f = File(filename, 'w')
# create some datasets (to test reading)
for i in range(NO_WORKERS):
f.create_dataset(name='/testgrp/dataset{}'.format(i),
data=np.random.random(self.shape)
.astype(np.float32))
def worker_read(i, hdf5file):
""" reading worker """
time.sleep(random.random())
print("worker {0} is reading...".format(i))
data = hdf5file['/testgrp/dataset{}'.format(i)][:]
print("worker {0} is done reading.".format(i))
self.assertEqual(data.shape, self.shape)
def worker_write(i, hdf5file):
""" writing worker """
# do some reading
# print(hdf5file.keys())
# do some writing
time.sleep(random.random())
data = np.empty((4, self.shape[0], self.shape[1]), dtype=np.int32)
data[:] = i*100
# modify existing dataset
dst = hdf5file['/testgrp/dataset{}'.format(i)]
print("worker {0} is writing...".format(i))
dst[0:50, ] = i
print("worker {0} done writing.".format(i))
jobs = []
writers = []
print("")
for i in range(NO_WORKERS):
if i % 4 == 0:
p = multiprocessing.Process(target=worker_write, args=(i, f))
writers.append(i)
else:
p = multiprocessing.Process(target=worker_read, args=(i, f))
jobs.append(p)
p.start()
# p.join()
# wait until all processes have terminated
while True:
time.sleep(0.3)
all_terminated = not max((job.is_alive() for job in jobs))
if all_terminated:
break
# then test if data was written correctly
print("Testing if data was written correctly...")
for i in writers:
dst = f['/testgrp/dataset{}'.format(i)]
self.assertTrue(np.all(dst[0:50, ] == i))
def tearDown(self):
pass
def run():
suite = unittest.TestLoader().loadTestsFromTestCase(TestHDF5)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
run()
| [
"numpy.all",
"sys.path.insert",
"numpy.random.random",
"unittest.TextTestRunner",
"multiprocessing.Process",
"os.path.join",
"time.sleep",
"os.path.realpath",
"tempfile.gettempdir",
"numpy.empty",
"random.random",
"h5pyswmr.File",
"unittest.TestLoader"
] | [((382, 411), 'sys.path.insert', 'sys.path.insert', (['(0)', 'PROJ_PATH'], {}), '(0, PROJ_PATH)\n', (397, 411), False, 'import sys\n'), ((289, 315), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (305, 315), False, 'import os\n'), ((349, 376), 'os.path.join', 'os.path.join', (['HERE', '"""../.."""'], {}), "(HERE, '../..')\n", (361, 376), False, 'import os\n'), ((644, 665), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (663, 665), False, 'import tempfile\n'), ((710, 758), 'os.path.join', 'os.path.join', (['tmpdir', '"""paralleltest827348723.h5"""'], {}), "(tmpdir, 'paralleltest827348723.h5')\n", (722, 758), False, 'import os\n'), ((771, 790), 'h5pyswmr.File', 'File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (775, 790), False, 'from h5pyswmr import File\n'), ((1631, 1690), 'numpy.empty', 'np.empty', (['(4, self.shape[0], self.shape[1])'], {'dtype': 'np.int32'}), '((4, self.shape[0], self.shape[1]), dtype=np.int32)\n', (1639, 1690), True, 'import numpy as np\n'), ((2440, 2455), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (2450, 2455), False, 'import time\n'), ((2884, 2905), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (2903, 2905), False, 'import unittest\n'), ((2942, 2978), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (2965, 2978), False, 'import unittest\n'), ((1152, 1167), 'random.random', 'random.random', ([], {}), '()\n', (1165, 1167), False, 'import random\n'), ((1595, 1610), 'random.random', 'random.random', ([], {}), '()\n', (1608, 1610), False, 'import random\n'), ((2097, 2154), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'worker_write', 'args': '(i, f)'}), '(target=worker_write, args=(i, f))\n', (2120, 2154), False, 'import multiprocessing\n'), ((2227, 2283), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'worker_read', 'args': '(i, f)'}), '(target=worker_read, args=(i, f))\n', (2250, 2283), False, 'import multiprocessing\n'), ((2795, 2818), 'numpy.all', 'np.all', (['(dst[0:50,] == i)'], {}), '(dst[0:50,] == i)\n', (2801, 2818), True, 'import numpy as np\n'), ((976, 1004), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (992, 1004), True, 'import numpy as np\n')] |
from scipy.misc import comb
import math
def ensemble_error(n_classifier, error):
k_start = int(math.ceil(n_classifier / 2.))
probs = [comb(n_classifier, k) * error ** k * (1 - error) ** (n_classifier - k)
for k in range(k_start, n_classifier + 1)]
return sum(probs)
import numpy as np
error_range = np.arange(0.0, 1.01, 0.01)
ens_errors = [ensemble_error(n_classifier=11, error=error)
for error in error_range]
import matplotlib.pyplot as plt
plt.plot(error_range,
ens_errors,
label='Ensemble error',
linewidth=2)
plt.plot(error_range,
error_range,
linestyle='--',
label='Base error',
linewidth=2)
plt.xlabel('Base error')
plt.ylabel('Base/Ensemble error')
plt.legend(loc='upper left')
plt.grid(alpha=0.5)
plt.show()
| [
"matplotlib.pyplot.grid",
"math.ceil",
"scipy.misc.comb",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((329, 355), 'numpy.arange', 'np.arange', (['(0.0)', '(1.01)', '(0.01)'], {}), '(0.0, 1.01, 0.01)\n', (338, 355), True, 'import numpy as np\n'), ((489, 559), 'matplotlib.pyplot.plot', 'plt.plot', (['error_range', 'ens_errors'], {'label': '"""Ensemble error"""', 'linewidth': '(2)'}), "(error_range, ens_errors, label='Ensemble error', linewidth=2)\n", (497, 559), True, 'import matplotlib.pyplot as plt\n'), ((588, 675), 'matplotlib.pyplot.plot', 'plt.plot', (['error_range', 'error_range'], {'linestyle': '"""--"""', 'label': '"""Base error"""', 'linewidth': '(2)'}), "(error_range, error_range, linestyle='--', label='Base error',\n linewidth=2)\n", (596, 675), True, 'import matplotlib.pyplot as plt\n'), ((709, 733), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Base error"""'], {}), "('Base error')\n", (719, 733), True, 'import matplotlib.pyplot as plt\n'), ((734, 767), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Base/Ensemble error"""'], {}), "('Base/Ensemble error')\n", (744, 767), True, 'import matplotlib.pyplot as plt\n'), ((768, 796), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (778, 796), True, 'import matplotlib.pyplot as plt\n'), ((797, 816), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (805, 816), True, 'import matplotlib.pyplot as plt\n'), ((817, 827), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (825, 827), True, 'import matplotlib.pyplot as plt\n'), ((101, 130), 'math.ceil', 'math.ceil', (['(n_classifier / 2.0)'], {}), '(n_classifier / 2.0)\n', (110, 130), False, 'import math\n'), ((144, 165), 'scipy.misc.comb', 'comb', (['n_classifier', 'k'], {}), '(n_classifier, k)\n', (148, 165), False, 'from scipy.misc import comb\n')] |
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from nnunet.network_architecture.custom_modules.conv_block_for_Double_Dense import DenseDownLayer_2, DenseDownBlock_2, DenseUpBlock, DenseUpLayer , DenseDownLayer_first, DenseDownBlock_first
from nnunet.network_architecture.generic_UNet import Upsample
from nnunet.network_architecture.generic_modular_UNet import PlainConvUNetDecoder, get_default_network_config
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.training.loss_functions.dice_loss import DC_and_CE_loss
from torch import nn
from torch.optim import SGD
from torch.backends import cudnn
from monai.networks.blocks.dynunet_block import UnetOutBlock
from monai.networks.blocks import UnetrBasicBlock, UnetrPrUpBlock, UnetrUpBlock
from monai.networks.nets import ViT
from typing import Tuple, Union
class UNETR_Encoder_Decoder(nn.Module):
def __init__(self, in_channels, output_channels, props,
img_size: Tuple[int, int, int],
feature_size = 16, hidden_size = 768, mlp_dim = 3072, num_heads = 12,
pos_embed = 'perceptron', norm_name:Union[Tuple, str] = "instance",
conv_block:bool = False,
res_block:bool = True,
dropout_rate:float = 0.0,
default_return_skips=True,
deep_supervision=False):
"""
:input_channels: (1)
:output_channels: (2) = 0,1의 binary segmentation으로 최종 아웃풋의 클래스 수를 뜻함
:img_size : (96,96,32) 패치 사이즈
:feature_size : dimension of network feature size.
:hidden_size: dimension of hidden layer.
:mlp_dim: dimension of feedforward layer.함
:num_heads: number of attention heads.
:pos_embed: position embedding layer type.
:norm_name: feature normalization type and arguments.
:conv_block: bool argument to determine if convolutional block is used.
:res_block: bool argument to determine if residual block is used.
:dropout_rate: faction of the input units to drop.
:num_blocks_per_stage: (1,1,1,1) -> ViT에서는 불필요
:feat_map_mul_on_downscale: (2) -> ? feature map의 채널수가 곱해지는 비율 (2로 설정됬으니 32 -> 64로감) -> ViT에서는 불필요
:pool_op_kernel_sizes: [[2,2,2],[2,2,2],[2,2,2],[2,2,2]] -> ViT에서는 불필요
:conv_kernel_sizes: [[3,3,3],[3,3,3],[3,3,3],[3,3,3]] -> ViT에서는 불필요
:props:
"""
super(UNETR_Encoder_Decoder, self).__init__()
self.default_return_skips = default_return_skips
self.props = props
self.deep_supervision = deep_supervision
self.stages = []
# self.stage_output_features = []
# self.stage_pool_kernel_size = []
# self.stage_conv_op_kernel_size = []
if not (0 <= dropout_rate <= 1):
raise AssertionError("dropout_rate should be between 0 and 1.")
if hidden_size % num_heads != 0:
raise AssertionError("hidden size should be divisible by num_heads.")
if pos_embed not in ["conv", "perceptron"]:
raise KeyError(f"Position embedding layer of type {pos_embed} is not supported.")
self.num_layers = 12
self.patch_size = (16, 16, 16)
self.feat_size = (
img_size[0] // self.patch_size[0],
img_size[1] // self.patch_size[1],
img_size[2] // self.patch_size[2],
)
self.hidden_size = hidden_size
self.classification = False
self.vit = ViT(
in_channels=in_channels,
img_size=img_size,
patch_size=self.patch_size,
hidden_size=hidden_size,
mlp_dim=mlp_dim,
num_layers=self.num_layers,
num_heads=num_heads,
pos_embed=pos_embed,
classification=self.classification,
dropout_rate=dropout_rate,
)
self.encoder1 = UnetrBasicBlock(
spatial_dims=3,
in_channels=in_channels,
out_channels=feature_size,
kernel_size=3,
stride=1,
norm_name=norm_name,
res_block=res_block,
)
self.encoder2 = UnetrPrUpBlock(
spatial_dims=3,
in_channels=hidden_size,
out_channels=feature_size * 2,
num_layer=2,
kernel_size=3,
stride=1,
upsample_kernel_size=2,
norm_name=norm_name,
conv_block=conv_block,
res_block=res_block,
)
self.encoder3 = UnetrPrUpBlock(
spatial_dims=3,
in_channels=hidden_size,
out_channels=feature_size * 4,
num_layer=1,
kernel_size=3,
stride=1,
upsample_kernel_size=2,
norm_name=norm_name,
conv_block=conv_block,
res_block=res_block,
)
self.encoder4 = UnetrPrUpBlock(
spatial_dims=3,
in_channels=hidden_size,
out_channels=feature_size * 8,
num_layer=0,
kernel_size=3,
stride=1,
upsample_kernel_size=2,
norm_name=norm_name,
conv_block=conv_block,
res_block=res_block,
)
self.stages.append(self.vit)
self.stages.append(self.encoder1)
self.stages.append(self.encoder2)
self.stages.append(self.encoder3)
self.stages.append(self.encoder4)
self.stages = nn.ModuleList(self.stages)
self.decoder5 = UnetrUpBlock(
spatial_dims=3,
in_channels=hidden_size,
out_channels=feature_size * 8,
kernel_size=3,
upsample_kernel_size=2,
norm_name=norm_name,
res_block=res_block,
)
self.decoder4 = UnetrUpBlock(
spatial_dims=3,
in_channels=feature_size * 8,
out_channels=feature_size * 4,
kernel_size=3,
upsample_kernel_size=2,
norm_name=norm_name,
res_block=res_block,
)
self.decoder3 = UnetrUpBlock(
spatial_dims=3,
in_channels=feature_size * 4,
out_channels=feature_size * 2,
kernel_size=3,
upsample_kernel_size=2,
norm_name=norm_name,
res_block=res_block,
)
self.decoder2 = UnetrUpBlock(
spatial_dims=3,
in_channels=feature_size * 2,
out_channels=feature_size,
kernel_size=3,
upsample_kernel_size=2,
norm_name=norm_name,
res_block=res_block,
)
self.out_1 = UnetOutBlock(spatial_dims=3, in_channels=feature_size, out_channels=output_channels)
self.out_2 = UnetOutBlock(spatial_dims=3, in_channels=feature_size*2, out_channels=output_channels)
self.out_3 = UnetOutBlock(spatial_dims=3, in_channels=feature_size*4, out_channels=output_channels)
self.stages.append(self.decoder5)
self.stages.append(self.decoder4)
self.stages.append(self.decoder3)
self.stages.append(self.decoder2)
self.stages = nn.ModuleList(self.stages)
self.deep_supervision_outputs = []
self.deep_supervision_outputs.append(self.out_1)
self.deep_supervision_outputs.append(self.out_2)
self.deep_supervision_outputs.append(self.out_3)
self.deep_supervision_outputs = nn.ModuleList(self.deep_supervision_outputs)
def proj_feat(self, x, hidden_size, feat_size):
x = x.view(x.size(0), feat_size[0], feat_size[1], feat_size[2], hidden_size)
x = x.permute(0, 4, 1, 2, 3).contiguous()
return x
def load_from(self, weights):
with torch.no_grad():
res_weight = weights
# copy weights from patch embedding
for i in weights['state_dict']:
print(i)
self.vit.patch_embedding.position_embeddings.copy_(
weights['state_dict']['module.transformer.patch_embedding.position_embeddings_3d'])
self.vit.patch_embedding.cls_token.copy_(
weights['state_dict']['module.transformer.patch_embedding.cls_token'])
self.vit.patch_embedding.patch_embeddings[1].weight.copy_(
weights['state_dict']['module.transformer.patch_embedding.patch_embeddings.1.weight'])
self.vit.patch_embedding.patch_embeddings[1].bias.copy_(
weights['state_dict']['module.transformer.patch_embedding.patch_embeddings.1.bias'])
# copy weights from encoding blocks (default: num of blocks: 12)
for bname, block in self.vit.blocks.named_children():
print(block)
block.loadFrom(weights, n_block=bname)
# last norm layer of transformer
self.vit.norm.weight.copy_(weights['state_dict']['module.transformer.norm.weight'])
self.vit.norm.bias.copy_(weights['state_dict']['module.transformer.norm.bias'])
# x : (1,1,192,176,32)
# RuntimeError: The size of tensor a (264) must match the size of tensor b (72) at non-singleton dimension 1
def forward(self, x_in):
out_list = []
x, hidden_states_out = self.vit(x_in)
enc1 = self.encoder1(x_in)
x2 = hidden_states_out[3]
enc2 = self.encoder2(self.proj_feat(x2, self.hidden_size, self.feat_size))
x3 = hidden_states_out[6]
enc3 = self.encoder3(self.proj_feat(x3, self.hidden_size, self.feat_size))
x4 = hidden_states_out[9]
enc4 = self.encoder4(self.proj_feat(x4, self.hidden_size, self.feat_size))
dec4 = self.proj_feat(x, self.hidden_size, self.feat_size)
dec3 = self.decoder5(dec4, enc4)
dec2 = self.decoder4(dec3, enc3)
out_3 = self.out_3(dec2)
dec1 = self.decoder3(dec2, enc2)
out_2 = self.out_2(dec1)
out = self.decoder2(dec1, enc1)
out_1 = self.out_1(out)
out_list.append(out_1)
out_list.append(out_2)
out_list.append(out_3)
return out_list
@staticmethod
def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_modalities, pool_op_kernel_sizes, num_conv_per_stage_encoder,
feat_map_mul_on_downscale, batch_size):
npool = len(pool_op_kernel_sizes) - 1
current_shape = np.array(patch_size)
tmp = (num_conv_per_stage_encoder[0] * 2 + 1) * np.prod(current_shape) * base_num_features \
+ num_modalities * np.prod(current_shape)
num_feat = base_num_features
for p in range(1, npool + 1):
current_shape = current_shape / np.array(pool_op_kernel_sizes[p])
num_feat = min(num_feat * feat_map_mul_on_downscale, max_num_features)
num_convs = num_conv_per_stage_encoder[p] * 2 + 1 # + 1 for conv in skip in first block
print(p, num_feat, num_convs, current_shape)
tmp += num_convs * np.prod(current_shape) * num_feat
return tmp * batch_size
class UNETR(SegmentationNetwork):
"""
Residual Encoder, Plain conv decoder
"""
use_this_for_2D_configuration = 1244233721.0 # 1167982592.0
use_this_for_3D_configuration = 1230348801.0
default_blocks_per_stage_encoder = (1, 1, 1, 1, 1, 1, 1, 1)
default_blocks_per_stage_decoder = (1, 1, 1, 1, 1, 1, 1, 1)
default_min_batch_size = 4 # this is what works with the numbers above
def __init__(self, in_channels, num_classes, props,
img_size: Tuple[int, int, int],
feature_size = 16, hidden_size = 768, mlp_dim = 3072, num_heads = 12,
pos_embed = 'perceptron', norm_name:Union[Tuple, str] = "instance",
conv_block:bool = False,
res_block:bool = True,
dropout_rate:float = 0.0,
default_return_skips=True):
# input_channels, base_num_features, num_blocks_per_stage_encoder, feat_map_mul_on_downscale,
# pool_op_kernel_sizes, conv_kernel_sizes, props, num_classes, num_blocks_per_stage_decoder,
# deep_supervision=False, upscale_logits=False, max_features=512, initializer=None,
# block=DenseDownBlock_2,
# props_decoder=None):
super().__init__()
self.conv_op = props['conv_op']
self.num_classes = num_classes
self.encoder_decoder = UNETR_Encoder_Decoder(in_channels,num_classes, props, img_size, feature_size, hidden_size, mlp_dim,
num_heads, pos_embed, norm_name, conv_block, res_block, dropout_rate, default_return_skips)
# if initializer is not None:
# self.apply(initializer)
def forward(self, x):
return self.encoder_decoder(x)
@staticmethod
def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_modalities, num_classes, pool_op_kernel_sizes,
num_conv_per_stage_encoder,
num_conv_per_stage_decoder, feat_map_mul_on_downscale, batch_size):
rst = UNETR_Encoder_Decoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_modalities, pool_op_kernel_sizes,
num_conv_per_stage_encoder,
feat_map_mul_on_downscale, batch_size)
return rst
def find_3d_configuration():
# lets compute a reference for 3D
# we select hyperparameters here so that we get approximately the same patch size as we would get with the
# regular unet. This is just my choice. You can do whatever you want
# These default hyperparemeters will then be used by the experiment planner
# since this is more parameter intensive than the UNet, we will test a configuration that has a lot of parameters
# herefore we copy the UNet configuration for Task005_Prostate
cudnn.deterministic = False
cudnn.benchmark = True
patch_size = (20, 320, 256)
max_num_features = 320
num_modalities = 2
num_classes = 3
batch_size = 2
# now we fiddle with the network specific hyperparameters until everything just barely fits into a titanx
blocks_per_stage_encoder = UNETR.default_blocks_per_stage_encoder
blocks_per_stage_decoder = UNETR.default_blocks_per_stage_decoder
initial_num_features = 32
# we neeed to add a [1, 1, 1] for the res unet because in this implementation all stages of the encoder can have a stride
pool_op_kernel_sizes = [[1, 1, 1],
[1, 2, 2],
[1, 2, 2],
[2, 2, 2],
[2, 2, 2],
[1, 2, 2],
[1, 2, 2]]
conv_op_kernel_sizes = [[1, 3, 3],
[1, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3]]
unet = UNETR(num_modalities, initial_num_features, blocks_per_stage_encoder[:len(conv_op_kernel_sizes)], 2,
pool_op_kernel_sizes, conv_op_kernel_sizes,
get_default_network_config(3, dropout_p=None), num_classes,
blocks_per_stage_decoder[:len(conv_op_kernel_sizes)-1], False, False,
max_features=max_num_features).cuda()
optimizer = SGD(unet.parameters(), lr=0.1, momentum=0.95)
loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'do_bg': False}, {})
dummy_input = torch.rand((batch_size, num_modalities, *patch_size)).cuda()
dummy_gt = (torch.rand((batch_size, 1, *patch_size)) * num_classes).round().clamp_(0, 2).cuda().long()
for _ in range(20):
optimizer.zero_grad()
skips = unet.encoder(dummy_input)
print([i.shape for i in skips])
output = unet.decoder(skips)
l = loss(output, dummy_gt)
l.backward()
optimizer.step()
if _ == 0:
torch.cuda.empty_cache()
# that should do. Now take the network hyperparameters and insert them in FabiansUNet.compute_approx_vram_consumption
# whatever number this spits out, save it to FabiansUNet.use_this_for_batch_size_computation_3D
print(UNETR.compute_approx_vram_consumption(patch_size, initial_num_features, max_num_features, num_modalities,
num_classes, pool_op_kernel_sizes,
blocks_per_stage_encoder[:len(conv_op_kernel_sizes)],
blocks_per_stage_decoder[:len(conv_op_kernel_sizes)-1], 2, batch_size))
# the output is 1230348800.0 for me
# I increment that number by 1 to allow this configuration be be chosen
if __name__ == "__main__":
pass
| [
"numpy.prod",
"nnunet.training.loss_functions.dice_loss.DC_and_CE_loss",
"torch.rand",
"torch.nn.ModuleList",
"monai.networks.nets.ViT",
"monai.networks.blocks.UnetrPrUpBlock",
"monai.networks.blocks.UnetrUpBlock",
"numpy.array",
"torch.no_grad",
"nnunet.network_architecture.generic_modular_UNet.g... | [((16605, 16678), 'nnunet.training.loss_functions.dice_loss.DC_and_CE_loss', 'DC_and_CE_loss', (["{'batch_dice': True, 'smooth': 1e-05, 'do_bg': False}", '{}'], {}), "({'batch_dice': True, 'smooth': 1e-05, 'do_bg': False}, {})\n", (16619, 16678), False, 'from nnunet.training.loss_functions.dice_loss import DC_and_CE_loss\n'), ((4169, 4432), 'monai.networks.nets.ViT', 'ViT', ([], {'in_channels': 'in_channels', 'img_size': 'img_size', 'patch_size': 'self.patch_size', 'hidden_size': 'hidden_size', 'mlp_dim': 'mlp_dim', 'num_layers': 'self.num_layers', 'num_heads': 'num_heads', 'pos_embed': 'pos_embed', 'classification': 'self.classification', 'dropout_rate': 'dropout_rate'}), '(in_channels=in_channels, img_size=img_size, patch_size=self.patch_size,\n hidden_size=hidden_size, mlp_dim=mlp_dim, num_layers=self.num_layers,\n num_heads=num_heads, pos_embed=pos_embed, classification=self.\n classification, dropout_rate=dropout_rate)\n', (4172, 4432), False, 'from monai.networks.nets import ViT\n'), ((4575, 4735), 'monai.networks.blocks.UnetrBasicBlock', 'UnetrBasicBlock', ([], {'spatial_dims': '(3)', 'in_channels': 'in_channels', 'out_channels': 'feature_size', 'kernel_size': '(3)', 'stride': '(1)', 'norm_name': 'norm_name', 'res_block': 'res_block'}), '(spatial_dims=3, in_channels=in_channels, out_channels=\n feature_size, kernel_size=3, stride=1, norm_name=norm_name, res_block=\n res_block)\n', (4590, 4735), False, 'from monai.networks.blocks import UnetrBasicBlock, UnetrPrUpBlock, UnetrUpBlock\n'), ((4845, 5071), 'monai.networks.blocks.UnetrPrUpBlock', 'UnetrPrUpBlock', ([], {'spatial_dims': '(3)', 'in_channels': 'hidden_size', 'out_channels': '(feature_size * 2)', 'num_layer': '(2)', 'kernel_size': '(3)', 'stride': '(1)', 'upsample_kernel_size': '(2)', 'norm_name': 'norm_name', 'conv_block': 'conv_block', 'res_block': 'res_block'}), '(spatial_dims=3, in_channels=hidden_size, out_channels=\n feature_size * 2, num_layer=2, kernel_size=3, stride=1,\n upsample_kernel_size=2, norm_name=norm_name, conv_block=conv_block,\n res_block=res_block)\n', (4859, 5071), False, 'from monai.networks.blocks import UnetrBasicBlock, UnetrPrUpBlock, UnetrUpBlock\n'), ((5214, 5440), 'monai.networks.blocks.UnetrPrUpBlock', 'UnetrPrUpBlock', ([], {'spatial_dims': '(3)', 'in_channels': 'hidden_size', 'out_channels': '(feature_size * 4)', 'num_layer': '(1)', 'kernel_size': '(3)', 'stride': '(1)', 'upsample_kernel_size': '(2)', 'norm_name': 'norm_name', 'conv_block': 'conv_block', 'res_block': 'res_block'}), '(spatial_dims=3, in_channels=hidden_size, out_channels=\n feature_size * 4, num_layer=1, kernel_size=3, stride=1,\n upsample_kernel_size=2, norm_name=norm_name, conv_block=conv_block,\n res_block=res_block)\n', (5228, 5440), False, 'from monai.networks.blocks import UnetrBasicBlock, UnetrPrUpBlock, UnetrUpBlock\n'), ((5583, 5809), 'monai.networks.blocks.UnetrPrUpBlock', 'UnetrPrUpBlock', ([], {'spatial_dims': '(3)', 'in_channels': 'hidden_size', 'out_channels': '(feature_size * 8)', 'num_layer': '(0)', 'kernel_size': '(3)', 'stride': '(1)', 'upsample_kernel_size': '(2)', 'norm_name': 'norm_name', 'conv_block': 'conv_block', 'res_block': 'res_block'}), '(spatial_dims=3, in_channels=hidden_size, out_channels=\n feature_size * 8, num_layer=0, kernel_size=3, stride=1,\n upsample_kernel_size=2, norm_name=norm_name, conv_block=conv_block,\n res_block=res_block)\n', (5597, 5809), False, 'from monai.networks.blocks import UnetrBasicBlock, UnetrPrUpBlock, UnetrUpBlock\n'), ((6156, 6182), 'torch.nn.ModuleList', 'nn.ModuleList', (['self.stages'], {}), '(self.stages)\n', (6169, 6182), False, 'from torch import nn\n'), ((6209, 6384), 'monai.networks.blocks.UnetrUpBlock', 'UnetrUpBlock', ([], {'spatial_dims': '(3)', 'in_channels': 'hidden_size', 'out_channels': '(feature_size * 8)', 'kernel_size': '(3)', 'upsample_kernel_size': '(2)', 'norm_name': 'norm_name', 'res_block': 'res_block'}), '(spatial_dims=3, in_channels=hidden_size, out_channels=\n feature_size * 8, kernel_size=3, upsample_kernel_size=2, norm_name=\n norm_name, res_block=res_block)\n', (6221, 6384), False, 'from monai.networks.blocks import UnetrBasicBlock, UnetrPrUpBlock, UnetrUpBlock\n'), ((6494, 6674), 'monai.networks.blocks.UnetrUpBlock', 'UnetrUpBlock', ([], {'spatial_dims': '(3)', 'in_channels': '(feature_size * 8)', 'out_channels': '(feature_size * 4)', 'kernel_size': '(3)', 'upsample_kernel_size': '(2)', 'norm_name': 'norm_name', 'res_block': 'res_block'}), '(spatial_dims=3, in_channels=feature_size * 8, out_channels=\n feature_size * 4, kernel_size=3, upsample_kernel_size=2, norm_name=\n norm_name, res_block=res_block)\n', (6506, 6674), False, 'from monai.networks.blocks import UnetrBasicBlock, UnetrPrUpBlock, UnetrUpBlock\n'), ((6784, 6964), 'monai.networks.blocks.UnetrUpBlock', 'UnetrUpBlock', ([], {'spatial_dims': '(3)', 'in_channels': '(feature_size * 4)', 'out_channels': '(feature_size * 2)', 'kernel_size': '(3)', 'upsample_kernel_size': '(2)', 'norm_name': 'norm_name', 'res_block': 'res_block'}), '(spatial_dims=3, in_channels=feature_size * 4, out_channels=\n feature_size * 2, kernel_size=3, upsample_kernel_size=2, norm_name=\n norm_name, res_block=res_block)\n', (6796, 6964), False, 'from monai.networks.blocks import UnetrBasicBlock, UnetrPrUpBlock, UnetrUpBlock\n'), ((7074, 7250), 'monai.networks.blocks.UnetrUpBlock', 'UnetrUpBlock', ([], {'spatial_dims': '(3)', 'in_channels': '(feature_size * 2)', 'out_channels': 'feature_size', 'kernel_size': '(3)', 'upsample_kernel_size': '(2)', 'norm_name': 'norm_name', 'res_block': 'res_block'}), '(spatial_dims=3, in_channels=feature_size * 2, out_channels=\n feature_size, kernel_size=3, upsample_kernel_size=2, norm_name=\n norm_name, res_block=res_block)\n', (7086, 7250), False, 'from monai.networks.blocks import UnetrBasicBlock, UnetrPrUpBlock, UnetrUpBlock\n'), ((7357, 7446), 'monai.networks.blocks.dynunet_block.UnetOutBlock', 'UnetOutBlock', ([], {'spatial_dims': '(3)', 'in_channels': 'feature_size', 'out_channels': 'output_channels'}), '(spatial_dims=3, in_channels=feature_size, out_channels=\n output_channels)\n', (7369, 7446), False, 'from monai.networks.blocks.dynunet_block import UnetOutBlock\n'), ((7463, 7556), 'monai.networks.blocks.dynunet_block.UnetOutBlock', 'UnetOutBlock', ([], {'spatial_dims': '(3)', 'in_channels': '(feature_size * 2)', 'out_channels': 'output_channels'}), '(spatial_dims=3, in_channels=feature_size * 2, out_channels=\n output_channels)\n', (7475, 7556), False, 'from monai.networks.blocks.dynunet_block import UnetOutBlock\n'), ((7571, 7664), 'monai.networks.blocks.dynunet_block.UnetOutBlock', 'UnetOutBlock', ([], {'spatial_dims': '(3)', 'in_channels': '(feature_size * 4)', 'out_channels': 'output_channels'}), '(spatial_dims=3, in_channels=feature_size * 4, out_channels=\n output_channels)\n', (7583, 7664), False, 'from monai.networks.blocks.dynunet_block import UnetOutBlock\n'), ((7849, 7875), 'torch.nn.ModuleList', 'nn.ModuleList', (['self.stages'], {}), '(self.stages)\n', (7862, 7875), False, 'from torch import nn\n'), ((8131, 8175), 'torch.nn.ModuleList', 'nn.ModuleList', (['self.deep_supervision_outputs'], {}), '(self.deep_supervision_outputs)\n', (8144, 8175), False, 'from torch import nn\n'), ((11151, 11171), 'numpy.array', 'np.array', (['patch_size'], {}), '(patch_size)\n', (11159, 11171), True, 'import numpy as np\n'), ((8431, 8446), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8444, 8446), False, 'import torch\n'), ((16697, 16750), 'torch.rand', 'torch.rand', (['(batch_size, num_modalities, *patch_size)'], {}), '((batch_size, num_modalities, *patch_size))\n', (16707, 16750), False, 'import torch\n'), ((17153, 17177), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (17175, 17177), False, 'import torch\n'), ((11307, 11329), 'numpy.prod', 'np.prod', (['current_shape'], {}), '(current_shape)\n', (11314, 11329), True, 'import numpy as np\n'), ((11451, 11484), 'numpy.array', 'np.array', (['pool_op_kernel_sizes[p]'], {}), '(pool_op_kernel_sizes[p])\n', (11459, 11484), True, 'import numpy as np\n'), ((16317, 16362), 'nnunet.network_architecture.generic_modular_UNet.get_default_network_config', 'get_default_network_config', (['(3)'], {'dropout_p': 'None'}), '(3, dropout_p=None)\n', (16343, 16362), False, 'from nnunet.network_architecture.generic_modular_UNet import PlainConvUNetDecoder, get_default_network_config\n'), ((11229, 11251), 'numpy.prod', 'np.prod', (['current_shape'], {}), '(current_shape)\n', (11236, 11251), True, 'import numpy as np\n'), ((11757, 11779), 'numpy.prod', 'np.prod', (['current_shape'], {}), '(current_shape)\n', (11764, 11779), True, 'import numpy as np\n'), ((16774, 16814), 'torch.rand', 'torch.rand', (['(batch_size, 1, *patch_size)'], {}), '((batch_size, 1, *patch_size))\n', (16784, 16814), False, 'import torch\n')] |
"""
DANet for image segmentation, implemented in Chainer.
Original paper: 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
"""
__all__ = ['DANet', 'danet_resnetd50b_cityscapes', 'danet_resnetd101b_cityscapes']
import os
import chainer.functions as F
from chainer import link
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from chainer.variable import Parameter
from chainer.initializers import _get_initializer
from .common import conv1x1, conv3x3_block
from .resnetd import resnetd50b, resnetd101b
class ScaleBlock(link.Link):
"""
Simple scale block.
Parameters:
----------
initial_alpha : obj, default 0
Initializer for the weights.
"""
def __init__(self,
initial_alpha=0):
super(ScaleBlock, self).__init__()
with self.init_scope():
alpha_initializer = _get_initializer(initial_alpha)
self.alpha = Parameter(
initializer=alpha_initializer,
shape=(1,),
name="alpha")
def __call__(self, x):
return self.alpha.data * x
class PosAttBlock(Chain):
"""
Position attention block from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
It captures long-range spatial contextual information.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 8
Squeeze reduction value.
"""
def __init__(self,
channels,
reduction=8):
super(PosAttBlock, self).__init__()
mid_channels = channels // reduction
with self.init_scope():
self.query_conv = conv1x1(
in_channels=channels,
out_channels=mid_channels,
use_bias=True)
self.key_conv = conv1x1(
in_channels=channels,
out_channels=mid_channels,
use_bias=True)
self.value_conv = conv1x1(
in_channels=channels,
out_channels=channels,
use_bias=True)
self.scale = ScaleBlock()
def __call__(self, x):
batch, channels, height, width = x.shape
proj_query = self.query_conv(x).reshape((batch, -1, height * width))
proj_key = self.key_conv(x).reshape((batch, -1, height * width))
proj_value = self.value_conv(x).reshape((batch, -1, height * width))
energy = F.batch_matmul(proj_query, proj_key, transa=True)
w = F.softmax(energy, axis=-1)
y = F.batch_matmul(proj_value, w, transb=True)
y = y.reshape((batch, -1, height, width))
y = self.scale(y) + x
return y
class ChaAttBlock(Chain):
"""
Channel attention block from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
It explicitly models interdependencies between channels.
"""
def __init__(self):
super(ChaAttBlock, self).__init__()
with self.init_scope():
self.scale = ScaleBlock()
def __call__(self, x):
batch, channels, height, width = x.shape
proj_query = x.reshape((batch, -1, height * width))
proj_key = x.reshape((batch, -1, height * width))
proj_value = x.reshape((batch, -1, height * width))
energy = F.batch_matmul(proj_query, proj_key, transb=True)
energy_new = F.broadcast_to(F.max(energy, axis=-1, keepdims=True), shape=energy.shape) - energy
w = F.softmax(energy_new, axis=-1)
y = F.batch_matmul(w, proj_value)
y = y.reshape((batch, -1, height, width))
y = self.scale(y) + x
return y
class DANetHeadBranch(Chain):
"""
DANet head branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
pose_att : bool, default True
Whether to use position attention instead of channel one.
"""
def __init__(self,
in_channels,
out_channels,
pose_att=True):
super(DANetHeadBranch, self).__init__()
mid_channels = in_channels // 4
dropout_rate = 0.1
with self.init_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
if pose_att:
self.att = PosAttBlock(mid_channels)
else:
self.att = ChaAttBlock()
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels)
self.conv3 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
self.dropout = partial(
F.dropout,
ratio=dropout_rate)
def __call__(self, x):
x = self.conv1(x)
x = self.att(x)
y = self.conv2(x)
x = self.conv3(y)
x = self.dropout(x)
return x, y
class DANetHead(Chain):
"""
DANet head block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(DANetHead, self).__init__()
mid_channels = in_channels // 4
dropout_rate = 0.1
with self.init_scope():
self.branch_pa = DANetHeadBranch(
in_channels=in_channels,
out_channels=out_channels,
pose_att=True)
self.branch_ca = DANetHeadBranch(
in_channels=in_channels,
out_channels=out_channels,
pose_att=False)
self.conv = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
self.dropout = partial(
F.dropout,
ratio=dropout_rate)
def __call__(self, x):
pa_x, pa_y = self.branch_pa(x)
ca_x, ca_y = self.branch_ca(x)
y = pa_y + ca_y
x = self.conv(y)
x = self.dropout(x)
return x, pa_x, ca_x
class DANet(Chain):
"""
DANet model from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
classes=19):
super(DANet, self).__init__()
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
with self.init_scope():
self.backbone = backbone
self.head = DANetHead(
in_channels=backbone_out_channels,
out_channels=classes)
def __call__(self, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
x, _ = self.backbone(x)
x, y, z = self.head(x)
x = F.resize_images(x, output_shape=in_size)
if self.aux:
y = F.resize_images(y, output_shape=in_size)
z = F.resize_images(z, output_shape=in_size)
return x, y, z
else:
return x
def get_danet(backbone,
classes,
aux=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create DANet model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
net = DANet(
backbone=backbone,
classes=classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def danet_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
DANet model on the base of ResNet(D)-50b for Cityscapes from 'Dual Attention Network for Scene Segmentation,'
https://arxiv.org/abs/1809.02983.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_danet(backbone=backbone, classes=classes, aux=aux, model_name="danet_resnetd50b_cityscapes",
**kwargs)
def danet_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
DANet model on the base of ResNet(D)-101b for Cityscapes from 'Dual Attention Network for Scene Segmentation,'
https://arxiv.org/abs/1809.02983.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_danet(backbone=backbone, classes=classes, aux=aux, model_name="danet_resnetd101b_cityscapes",
**kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
in_size = (480, 480)
aux = False
pretrained = False
models = [
danet_resnetd50b_cityscapes,
danet_resnetd101b_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != danet_resnetd50b_cityscapes or weight_count == 47586427)
assert (model != danet_resnetd101b_cityscapes or weight_count == 66578555)
batch = 2
classes = 19
x = np.zeros((batch, 3, in_size[0], in_size[1]), np.float32)
ys = net(x)
y = ys[0] if aux else ys
assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and
(y.shape[3] == x.shape[3]))
if __name__ == "__main__":
_test()
| [
"chainer.functions.max",
"chainer.functions.batch_matmul",
"os.path.join",
"chainer.functions.softmax",
"numpy.zeros",
"functools.partial",
"chainer.functions.resize_images",
"chainer.initializers._get_initializer",
"chainer.variable.Parameter"
] | [((8388, 8427), 'os.path.join', 'os.path.join', (['"""~"""', '""".chainer"""', '"""models"""'], {}), "('~', '.chainer', 'models')\n", (8400, 8427), False, 'import os\n'), ((2543, 2592), 'chainer.functions.batch_matmul', 'F.batch_matmul', (['proj_query', 'proj_key'], {'transa': '(True)'}), '(proj_query, proj_key, transa=True)\n', (2557, 2592), True, 'import chainer.functions as F\n'), ((2605, 2631), 'chainer.functions.softmax', 'F.softmax', (['energy'], {'axis': '(-1)'}), '(energy, axis=-1)\n', (2614, 2631), True, 'import chainer.functions as F\n'), ((2645, 2687), 'chainer.functions.batch_matmul', 'F.batch_matmul', (['proj_value', 'w'], {'transb': '(True)'}), '(proj_value, w, transb=True)\n', (2659, 2687), True, 'import chainer.functions as F\n'), ((3417, 3466), 'chainer.functions.batch_matmul', 'F.batch_matmul', (['proj_query', 'proj_key'], {'transb': '(True)'}), '(proj_query, proj_key, transb=True)\n', (3431, 3466), True, 'import chainer.functions as F\n'), ((3583, 3613), 'chainer.functions.softmax', 'F.softmax', (['energy_new'], {'axis': '(-1)'}), '(energy_new, axis=-1)\n', (3592, 3613), True, 'import chainer.functions as F\n'), ((3627, 3656), 'chainer.functions.batch_matmul', 'F.batch_matmul', (['w', 'proj_value'], {}), '(w, proj_value)\n', (3641, 3656), True, 'import chainer.functions as F\n'), ((7994, 8034), 'chainer.functions.resize_images', 'F.resize_images', (['x'], {'output_shape': 'in_size'}), '(x, output_shape=in_size)\n', (8009, 8034), True, 'import chainer.functions as F\n'), ((12250, 12306), 'numpy.zeros', 'np.zeros', (['(batch, 3, in_size[0], in_size[1])', 'np.float32'], {}), '((batch, 3, in_size[0], in_size[1]), np.float32)\n', (12258, 12306), True, 'import numpy as np\n'), ((934, 965), 'chainer.initializers._get_initializer', '_get_initializer', (['initial_alpha'], {}), '(initial_alpha)\n', (950, 965), False, 'from chainer.initializers import _get_initializer\n'), ((991, 1057), 'chainer.variable.Parameter', 'Parameter', ([], {'initializer': 'alpha_initializer', 'shape': '(1,)', 'name': '"""alpha"""'}), "(initializer=alpha_initializer, shape=(1,), name='alpha')\n", (1000, 1057), False, 'from chainer.variable import Parameter\n'), ((4899, 4937), 'functools.partial', 'partial', (['F.dropout'], {'ratio': 'dropout_rate'}), '(F.dropout, ratio=dropout_rate)\n', (4906, 4937), False, 'from functools import partial\n'), ((6085, 6123), 'functools.partial', 'partial', (['F.dropout'], {'ratio': 'dropout_rate'}), '(F.dropout, ratio=dropout_rate)\n', (6092, 6123), False, 'from functools import partial\n'), ((8072, 8112), 'chainer.functions.resize_images', 'F.resize_images', (['y'], {'output_shape': 'in_size'}), '(y, output_shape=in_size)\n', (8087, 8112), True, 'import chainer.functions as F\n'), ((8129, 8169), 'chainer.functions.resize_images', 'F.resize_images', (['z'], {'output_shape': 'in_size'}), '(z, output_shape=in_size)\n', (8144, 8169), True, 'import chainer.functions as F\n'), ((3503, 3540), 'chainer.functions.max', 'F.max', (['energy'], {'axis': '(-1)', 'keepdims': '(True)'}), '(energy, axis=-1, keepdims=True)\n', (3508, 3540), True, 'import chainer.functions as F\n')] |
import numpy as np
import cv2
#K&D for the wide angle camera used
DIM=(1920, 1080)
K=np.array([[1276.399158532128, 0.0, 930.054799272954], [0.0, 1274.1510638009997, 510.7404213142207], [0.0, 0.0, 1.0]])
D=np.array([[0.10664484858106192], [-2.4113027405249046], [12.185556649445054], [-20.93957191606188]])
cap = cv2.VideoCapture(0)
cap.set(3, 1920)
cap.set(4, 1080)
while(True):
ret, frame = cap.read()
#fr = np.rot90(frame)
h,w = frame.shape[:2]
map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, DIM, cv2.CV_16SC2)
undistorted_img = cv2.remap(frame, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.imwrite('test.jpg', frame)
break
cap.release()
cv2.destroyAllWindows()
| [
"cv2.imwrite",
"numpy.eye",
"cv2.remap",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.waitKey"
] | [((86, 208), 'numpy.array', 'np.array', (['[[1276.399158532128, 0.0, 930.054799272954], [0.0, 1274.1510638009997, \n 510.7404213142207], [0.0, 0.0, 1.0]]'], {}), '([[1276.399158532128, 0.0, 930.054799272954], [0.0, \n 1274.1510638009997, 510.7404213142207], [0.0, 0.0, 1.0]])\n', (94, 208), True, 'import numpy as np\n'), ((206, 311), 'numpy.array', 'np.array', (['[[0.10664484858106192], [-2.4113027405249046], [12.185556649445054], [-\n 20.93957191606188]]'], {}), '([[0.10664484858106192], [-2.4113027405249046], [12.185556649445054\n ], [-20.93957191606188]])\n', (214, 311), True, 'import numpy as np\n'), ((314, 333), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (330, 333), False, 'import cv2\n'), ((807, 830), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (828, 830), False, 'import cv2\n'), ((575, 672), 'cv2.remap', 'cv2.remap', (['frame', 'map1', 'map2'], {'interpolation': 'cv2.INTER_LINEAR', 'borderMode': 'cv2.BORDER_CONSTANT'}), '(frame, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2\n .BORDER_CONSTANT)\n', (584, 672), False, 'import cv2\n'), ((672, 698), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (682, 698), False, 'import cv2\n'), ((520, 529), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (526, 529), True, 'import numpy as np\n'), ((748, 778), 'cv2.imwrite', 'cv2.imwrite', (['"""test.jpg"""', 'frame'], {}), "('test.jpg', frame)\n", (759, 778), False, 'import cv2\n'), ((705, 719), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (716, 719), False, 'import cv2\n')] |
from time import time
import math, os
from utils import *
import scanpy as sc
from sklearn import metrics
from sklearn.cluster import KMeans
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import Parameter
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
import collections
import h5py
from preprocess import read_dataset, normalize
from scipy import stats, spatial
from DSSC import Spatialmodel
import sys
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='train',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data_file', default='./osmFISH_SScortex_mouse.H5')
parser.add_argument('--n_pairwise', default=100000, type=int)
parser.add_argument('--weight_ml', default = 1., type = float)
parser.add_argument('--dir_name', default = 'results_cycif')
parser.add_argument('--n_clusters', default=1, type=int)
parser.add_argument('--batch_size', default=256, type=int)
parser.add_argument('--gamma', default=1., type=float,
help='coefficient of clustering loss')
parser.add_argument('--fi', default=0., type=float, help='coefficient of KL loss')
parser.add_argument('--sigma', default=2.5, type=float)
parser.add_argument('--ae_weight_file', default='AE_weights_1.pth.tar')
parser.add_argument('--embedding_file', default=-1, type=int)
parser.add_argument('--prediction_file', default=-1, type=int)
parser.add_argument('--neb', default=20, type=int)
parser.add_argument('--filter', default=-1, type=int)
parser.add_argument('--n_features', default=1000, type=int)
parser.add_argument('--saveFeatures', default=-1, type=int)
parser.add_argument('--pretrain_epochs', default=400, type=int)
parser.add_argument('--filter_file', default="NA")
args = parser.parse_args()
data_mat = h5py.File(args.data_file)
x = np.array(data_mat['X'])
y0 = np.array(data_mat['Y'])
DIST_markers = np.array(data_mat["pos"])
DIST_markers = np.transpose(DIST_markers)
data_mat.close()
f = np.where(y0.astype(np.str) != "NA")[0]
y = y0[f]
x = x[f,:]
DIST_markers = DIST_markers[f,:]
#u = np.unique(y0)
#y=[]
#for i in range(len(y0)):
# a = np.where(u == y0[i])[0][0]
# y.append(a)
#y = np.array(y)
#print(np.unique(y))
#print(x.shape)
#print(DIST_markers.shape)
neb = int(args.neb)
A = kneighbors_graph(DIST_markers, 30, mode="connectivity", metric="euclidean", include_self=False, n_jobs=-1)
A = A.toarray()
if args.filter != -1 and args.filter_file == "NA":
#Gene filter
#importantGenes = geneSelection(x, n=1000, plot=False)
#x = x[:, importantGenes]
adata0 = sc.AnnData(x)
adata0 = read_dataset(adata0,transpose=False,test_split=False,copy=True)
adata0 = normalize(adata0,size_factors=True,normalize_input=True,logtrans_input=True)
score = []
for i in range(adata0.X.shape[1]):
gene = adata0.X[:,i]
I = Iscore_gene(gene, A)
score.append(I)
if i%100==0:
print(i)
score_ = np.argpartition(score, -args.n_features)[-args.n_features:]
x = adata0.raw.X[:,score_]
else:
filter = np.loadtxt(args.filter_file)
filter = filter.astype(np.int)
x = x[:,filter]
print(x.shape)
adata = sc.AnnData(x)
adata.obs['Group'] = y
adata = read_dataset(adata,
transpose=False,
test_split=False,
copy=True)
adata = normalize(adata,
size_factors=True,
normalize_input=True,
logtrans_input=True)
input_size = adata.n_vars
x_sd = adata.X.std(0)
x_sd_median = np.median(x_sd)
print("median of gene sd: %.5f" % x_sd_median)
#spatial dist
dist = spatial.distance_matrix(DIST_markers,DIST_markers)
p_ = []
for i in range(dist.shape[0]):
idx = np.argpartition(dist[i], neb)[:neb]
p_.append(idx)
#Raw kmeans
kmeans = KMeans(np.unique(y).shape[0], n_init=20)
y_p = kmeans.fit_predict(adata.X)
y_pred_ = best_map(y, y_p)
acc = np.round(metrics.accuracy_score(y, y_pred_), 5)
nmi = np.round(metrics.normalized_mutual_info_score(y, y_p), 5)
ari = np.round(metrics.adjusted_rand_score(y, y_p), 5)
Iscore = Iscore_label(y_p+1., A)
ka = knn_ACC(p_, y_p)
print('Raw Kmeans Clustering: ACC= %.4f, NMI= %.4f, ARI= %.4f, kNN_ACC= %.4f, I_score= %.4f' % (acc, nmi, ari, ka, Iscore))
#Constraints
n_pairwise = args.n_pairwise
ml_ind1_1, ml_ind2_1 = generate_random_pair_from_neighbor2(p_, n_pairwise, neb, y_pred_)
ml_ind1 = ml_ind1_1
ml_ind2 = ml_ind2_1
#Build model
model = Spatialmodel(input_dim=input_size, z_dim=32, neg=p_,
encodeLayer=[256,64], decodeLayer=[64,256], sigma=args.sigma, gamma=args.gamma,
ml_weight=args.weight_ml).cuda()
model.pretrain_autoencoder(X=adata.X, X_raw = adata.raw.X, X_sf=adata.obs.size_factors, batch_size=args.batch_size, epochs=args.pretrain_epochs, ae_weights="RU_ALL_weights.pth.tar")
if not os.path.exists(args.dir_name):
os.makedirs(args.dir_name)
#get k
latent = model.encodeBatch(torch.tensor(adata.X).cuda(), batch_size=args.batch_size).cpu().numpy()
if args.n_clusters == -1:
n_clusters = GetCluster(latent, res=1., n=30)
else:
print("n_cluster is defined as " + str(args.n_clusters))
if args.n_clusters > 1:
n_clusters = args.n_clusters
else:
n_clusters = np.unique(y).shape[0]
y_pred, _, _, _, _ = model.fit(X=adata.X, X_raw = adata.raw.X, X_sf=adata.obs.size_factors, n_clusters = n_clusters,
batch_size=args.batch_size, num_epochs=1000, y=y,
ml_ind1=ml_ind1, ml_ind2=ml_ind2, lr = 1.,
update_interval=1, tol=0.001, save_dir=args.dir_name)
y_pred_ = best_map(y, y_pred)
acc = np.round(metrics.accuracy_score(y, y_pred_), 5)
nmi = np.round(metrics.normalized_mutual_info_score(y, y_pred), 5)
ari = np.round(metrics.adjusted_rand_score(y, y_pred), 5)
Iscore = Iscore_label(y_pred+1., A)
ka = knn_ACC(p_, y_pred)
print('Final Clustering: ACC= %.4f, NMI= %.4f, ARI= %.4f, kNN_ACC= %.4f, I_score= %.4f' % (acc, nmi, ari, ka, Iscore))
file = args.data_file.split("/")[2]
file = file.split(".")[0]
final_latent = model.encodeBatch(torch.tensor(adata.X).cuda(), batch_size=args.batch_size).cpu().numpy()
if args.embedding_file != -1:
np.savetxt(args.dir_name+"/" + file + "." + "FINAL_latent.csv", final_latent, delimiter=",")
if args.prediction_file != -1:
np.savetxt(args.dir_name+"/" + file + "." + "y_pred.txt", y_pred, delimiter="\t")
if args.saveFeatures != -1:
np.savetxt(args.dir_name+"/" + file + "." + "featureSelection.txt", score_, delimiter="\t")
| [
"preprocess.read_dataset",
"sklearn.metrics.adjusted_rand_score",
"numpy.array",
"sklearn.metrics.normalized_mutual_info_score",
"preprocess.normalize",
"os.path.exists",
"argparse.ArgumentParser",
"scanpy.AnnData",
"scipy.spatial.distance_matrix",
"h5py.File",
"numpy.savetxt",
"numpy.transpos... | [((592, 697), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""train"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='train', formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n", (615, 697), False, 'import argparse\n'), ((1990, 2015), 'h5py.File', 'h5py.File', (['args.data_file'], {}), '(args.data_file)\n', (1999, 2015), False, 'import h5py\n'), ((2024, 2047), 'numpy.array', 'np.array', (["data_mat['X']"], {}), "(data_mat['X'])\n", (2032, 2047), True, 'import numpy as np\n'), ((2057, 2080), 'numpy.array', 'np.array', (["data_mat['Y']"], {}), "(data_mat['Y'])\n", (2065, 2080), True, 'import numpy as np\n'), ((2100, 2125), 'numpy.array', 'np.array', (["data_mat['pos']"], {}), "(data_mat['pos'])\n", (2108, 2125), True, 'import numpy as np\n'), ((2145, 2171), 'numpy.transpose', 'np.transpose', (['DIST_markers'], {}), '(DIST_markers)\n', (2157, 2171), True, 'import numpy as np\n'), ((3586, 3599), 'scanpy.AnnData', 'sc.AnnData', (['x'], {}), '(x)\n', (3596, 3599), True, 'import scanpy as sc\n'), ((3640, 3705), 'preprocess.read_dataset', 'read_dataset', (['adata'], {'transpose': '(False)', 'test_split': '(False)', 'copy': '(True)'}), '(adata, transpose=False, test_split=False, copy=True)\n', (3652, 3705), False, 'from preprocess import read_dataset, normalize\n'), ((3759, 3837), 'preprocess.normalize', 'normalize', (['adata'], {'size_factors': '(True)', 'normalize_input': '(True)', 'logtrans_input': '(True)'}), '(adata, size_factors=True, normalize_input=True, logtrans_input=True)\n', (3768, 3837), False, 'from preprocess import read_dataset, normalize\n'), ((3952, 3967), 'numpy.median', 'np.median', (['x_sd'], {}), '(x_sd)\n', (3961, 3967), True, 'import numpy as np\n'), ((4053, 4104), 'scipy.spatial.distance_matrix', 'spatial.distance_matrix', (['DIST_markers', 'DIST_markers'], {}), '(DIST_markers, DIST_markers)\n', (4076, 4104), False, 'from scipy import stats, spatial\n'), ((2884, 2897), 'scanpy.AnnData', 'sc.AnnData', (['x'], {}), '(x)\n', (2894, 2897), True, 'import scanpy as sc\n'), ((2916, 2982), 'preprocess.read_dataset', 'read_dataset', (['adata0'], {'transpose': '(False)', 'test_split': '(False)', 'copy': '(True)'}), '(adata0, transpose=False, test_split=False, copy=True)\n', (2928, 2982), False, 'from preprocess import read_dataset, normalize\n'), ((2999, 3078), 'preprocess.normalize', 'normalize', (['adata0'], {'size_factors': '(True)', 'normalize_input': '(True)', 'logtrans_input': '(True)'}), '(adata0, size_factors=True, normalize_input=True, logtrans_input=True)\n', (3008, 3078), False, 'from preprocess import read_dataset, normalize\n'), ((3446, 3474), 'numpy.loadtxt', 'np.loadtxt', (['args.filter_file'], {}), '(args.filter_file)\n', (3456, 3474), True, 'import numpy as np\n'), ((4390, 4424), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y', 'y_pred_'], {}), '(y, y_pred_)\n', (4412, 4424), False, 'from sklearn import metrics\n'), ((4448, 4492), 'sklearn.metrics.normalized_mutual_info_score', 'metrics.normalized_mutual_info_score', (['y', 'y_p'], {}), '(y, y_p)\n', (4484, 4492), False, 'from sklearn import metrics\n'), ((4516, 4551), 'sklearn.metrics.adjusted_rand_score', 'metrics.adjusted_rand_score', (['y', 'y_p'], {}), '(y, y_p)\n', (4543, 4551), False, 'from sklearn import metrics\n'), ((5363, 5392), 'os.path.exists', 'os.path.exists', (['args.dir_name'], {}), '(args.dir_name)\n', (5377, 5392), False, 'import math, os\n'), ((5406, 5432), 'os.makedirs', 'os.makedirs', (['args.dir_name'], {}), '(args.dir_name)\n', (5417, 5432), False, 'import math, os\n'), ((6211, 6245), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y', 'y_pred_'], {}), '(y, y_pred_)\n', (6233, 6245), False, 'from sklearn import metrics\n'), ((6269, 6316), 'sklearn.metrics.normalized_mutual_info_score', 'metrics.normalized_mutual_info_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (6305, 6316), False, 'from sklearn import metrics\n'), ((6340, 6378), 'sklearn.metrics.adjusted_rand_score', 'metrics.adjusted_rand_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (6367, 6378), False, 'from sklearn import metrics\n'), ((6807, 6905), 'numpy.savetxt', 'np.savetxt', (["(args.dir_name + '/' + file + '.' + 'FINAL_latent.csv')", 'final_latent'], {'delimiter': '""","""'}), "(args.dir_name + '/' + file + '.' + 'FINAL_latent.csv',\n final_latent, delimiter=',')\n", (6817, 6905), True, 'import numpy as np\n'), ((6944, 7031), 'numpy.savetxt', 'np.savetxt', (["(args.dir_name + '/' + file + '.' + 'y_pred.txt')", 'y_pred'], {'delimiter': '"""\t"""'}), "(args.dir_name + '/' + file + '.' + 'y_pred.txt', y_pred,\n delimiter='\\t')\n", (6954, 7031), True, 'import numpy as np\n'), ((7067, 7164), 'numpy.savetxt', 'np.savetxt', (["(args.dir_name + '/' + file + '.' + 'featureSelection.txt')", 'score_'], {'delimiter': '"""\t"""'}), "(args.dir_name + '/' + file + '.' + 'featureSelection.txt',\n score_, delimiter='\\t')\n", (7077, 7164), True, 'import numpy as np\n'), ((3322, 3362), 'numpy.argpartition', 'np.argpartition', (['score', '(-args.n_features)'], {}), '(score, -args.n_features)\n', (3337, 3362), True, 'import numpy as np\n'), ((4165, 4194), 'numpy.argpartition', 'np.argpartition', (['dist[i]', 'neb'], {}), '(dist[i], neb)\n', (4180, 4194), True, 'import numpy as np\n'), ((4974, 5143), 'DSSC.Spatialmodel', 'Spatialmodel', ([], {'input_dim': 'input_size', 'z_dim': '(32)', 'neg': 'p_', 'encodeLayer': '[256, 64]', 'decodeLayer': '[64, 256]', 'sigma': 'args.sigma', 'gamma': 'args.gamma', 'ml_weight': 'args.weight_ml'}), '(input_dim=input_size, z_dim=32, neg=p_, encodeLayer=[256, 64],\n decodeLayer=[64, 256], sigma=args.sigma, gamma=args.gamma, ml_weight=\n args.weight_ml)\n', (4986, 5143), False, 'from DSSC import Spatialmodel\n'), ((4265, 4277), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (4274, 4277), True, 'import numpy as np\n'), ((5818, 5830), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (5827, 5830), True, 'import numpy as np\n'), ((5476, 5497), 'torch.tensor', 'torch.tensor', (['adata.X'], {}), '(adata.X)\n', (5488, 5497), False, 'import torch\n'), ((6692, 6713), 'torch.tensor', 'torch.tensor', (['adata.X'], {}), '(adata.X)\n', (6704, 6713), False, 'import torch\n')] |
import numpy as np
import matplotlib.patches as mpatches
from matplotlib.colors import LinearSegmentedColormap
from scipy import stats
"""
Load 8 simulations (main experiment)
print for each simulation:
SimID: Performance(mean,std) rejcetedBlocks maxMinDist
"""
#load data
folder1='2020_09_23_mainExperiment_final'
folder2='2020_09_23_mainExperiment_final'
simIDs1=[1,2,3,4]
simIDs2=[5,6,7,8]
Performance=np.zeros((8,2))
rejectedBlocks=np.zeros(8)
maxMinDist=np.zeros(8)
for folderIdx,folder in enumerate([folder1,folder2]):
simIDs=[simIDs1,simIDs2][folderIdx]
for simIdx,sim in enumerate(simIDs):
idx=simIdx+[0,4][folderIdx]
#Performance
selection=np.load('../data/'+folder+'/selection'+str(sim)+'.npy')
startTime=selection[:,0]
decisionTime=selection[:,1]
correctOrientation=selection[:,2]
decisionOrientation=selection[:,3]
correct=selection[:,4]
stimulus=selection[:,5]
blockList=selection[:,6]
block_Performance=np.zeros(10)
for block in range(int(np.max(blockList))):
block_selections=decisionOrientation[blockList==block+1]
block_stimuli=stimulus[blockList==block+1]
block_t1selections=block_selections[block_stimuli==1]
block_Performance[block]=np.sum((block_t1selections==55).astype(int))/float(block_t1selections.shape[0])
Performance[idx,0]=np.mean(block_Performance)
Performance[idx,1]=np.std(block_Performance)
rejectedBlocks[idx]=np.sum((block_Performance<0.8).astype(int))
#maxMinDist
dist=np.load('../data/'+folder+'/distList'+str(sim)+'.npy', allow_pickle=True)
for distList in dist:
if min(distList)>maxMinDist[idx]:
maxMinDist[idx]=min(distList)
with open('T1_performance.txt', 'w') as f:
print('Sim Nr Performance rejected maxMinDist', file=f)
for folderIdx,folder in enumerate([folder1,folder2]):
simIDs=[simIDs1,simIDs2][folderIdx]
for simIdx,sim in enumerate(simIDs):
idx=simIdx+[0,4][folderIdx]
print('Sim '+str(sim)+' ('+str(round(Performance[idx,0],4))+', '+str(round(Performance[idx,1],4))+') '+str(rejectedBlocks[idx])+' '+str(maxMinDist[idx]), file=f)
| [
"numpy.max",
"numpy.mean",
"numpy.zeros",
"numpy.std"
] | [((414, 430), 'numpy.zeros', 'np.zeros', (['(8, 2)'], {}), '((8, 2))\n', (422, 430), True, 'import numpy as np\n'), ((445, 456), 'numpy.zeros', 'np.zeros', (['(8)'], {}), '(8)\n', (453, 456), True, 'import numpy as np\n'), ((468, 479), 'numpy.zeros', 'np.zeros', (['(8)'], {}), '(8)\n', (476, 479), True, 'import numpy as np\n'), ((950, 962), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (958, 962), True, 'import numpy as np\n'), ((1302, 1328), 'numpy.mean', 'np.mean', (['block_Performance'], {}), '(block_Performance)\n', (1309, 1328), True, 'import numpy as np\n'), ((1350, 1375), 'numpy.std', 'np.std', (['block_Performance'], {}), '(block_Performance)\n', (1356, 1375), True, 'import numpy as np\n'), ((988, 1005), 'numpy.max', 'np.max', (['blockList'], {}), '(blockList)\n', (994, 1005), True, 'import numpy as np\n')] |
from util import *
import numpy as np
import cv2
def questao9A(img):
'''
aplicar o filtro
0 -1 0
+ +
-1 5 -1
+ +
0 -1 0
'''
kernel = np.array(([0,-1,0],[-1, 5, -1],[ 0, -1, 0]))
return applyFilter3x3(img, kernel)
def questao9B(img):
'''
aplicar o filtro
0 0 0
+ +
0 1 0
+ +
0 0 -1
'''
kernel = np.array(([0, 0, 0], [0, 1, 0], [0, 0, -1]))
return applyFilter3x3(img, kernel)
def questao9ATest(img):
'''
Usa a funcao da propria biblioteca prar observar o resultado
'''
kernel = np.array(([0,-1,0],[-1, 5, -1],[ 0, -1, 0]))
resultImage = img # pegar o mesmo tamanho da imaem original
cv2.filter2D(src=img, ddepth=-1, kernel=kernel, dst=resultImage)
return resultImage
def questao9BTest(img):
kernel = np.array(([0, 0, 0], [0, 1, 0], [0, 0, -1]))
resultImage = img # pegar o mesmo tamanho da imaem original
cv2.filter2D(src=img, ddepth=-1, kernel=kernel, dst=resultImage)
return resultImage | [
"numpy.array",
"cv2.filter2D"
] | [((164, 211), 'numpy.array', 'np.array', (['([0, -1, 0], [-1, 5, -1], [0, -1, 0])'], {}), '(([0, -1, 0], [-1, 5, -1], [0, -1, 0]))\n', (172, 211), True, 'import numpy as np\n'), ((359, 403), 'numpy.array', 'np.array', (['([0, 0, 0], [0, 1, 0], [0, 0, -1])'], {}), '(([0, 0, 0], [0, 1, 0], [0, 0, -1]))\n', (367, 403), True, 'import numpy as np\n'), ((547, 594), 'numpy.array', 'np.array', (['([0, -1, 0], [-1, 5, -1], [0, -1, 0])'], {}), '(([0, -1, 0], [-1, 5, -1], [0, -1, 0]))\n', (555, 594), True, 'import numpy as np\n'), ((654, 718), 'cv2.filter2D', 'cv2.filter2D', ([], {'src': 'img', 'ddepth': '(-1)', 'kernel': 'kernel', 'dst': 'resultImage'}), '(src=img, ddepth=-1, kernel=kernel, dst=resultImage)\n', (666, 718), False, 'import cv2\n'), ((775, 819), 'numpy.array', 'np.array', (['([0, 0, 0], [0, 1, 0], [0, 0, -1])'], {}), '(([0, 0, 0], [0, 1, 0], [0, 0, -1]))\n', (783, 819), True, 'import numpy as np\n'), ((883, 947), 'cv2.filter2D', 'cv2.filter2D', ([], {'src': 'img', 'ddepth': '(-1)', 'kernel': 'kernel', 'dst': 'resultImage'}), '(src=img, ddepth=-1, kernel=kernel, dst=resultImage)\n', (895, 947), False, 'import cv2\n')] |
# Copyright: (c) 2021, <NAME>
import numpy as np
import sys
sys.path.append('../../pyCuSDR')
from lib import *
packetData = lambda: createBitSequence(10000,seed=123) # gives us a default 10000 bit length packet
zeropad = lambda sig,n: np.concatenate((np.zeros(n),sig,np.zeros(n))) # pre and post pad signal with zeros
def createBitSequence(n_bits, seed=None):
"""
Returns a random sequence of bits. Can be provided with a seed, which returns a deterministic random sequence of bits. The random number generator state is preserved and restored after generation of the bit sequence
"""
if seed:
cur_state = np.random.get_state() # store the random gen state
np.random.seed(seed)
bitData = np.random.randint(0,2,n_bits)
if seed:
np.random.set_state(cur_state) # restore random gen state
return bitData
def encodeNRZS(bitData):
"""
NRZ-S encode the binary data
"""
outData = np.zeros(len(bitData),dtype=np.uint8)
outData[0]=bitData[0]
for i, bit in enumerate(bitData[1:],1):
if bit == 1:
outData[i] = outData[i-1]
else:
outData[i] = ~outData[i-1] & 1
return outData
def modulateBPSK(raw_bits,samples_pr_sym):
"""
BPSK modulate the raw bits with NRZ-S encoding to avoid phase ambiguity
The NRZ-s coding leaves the first bit ambiguous. Therefore, a few extra bits are inserted in front of the sequence
"""
bits_nrzs = encodeNRZS(np.concatenate(([1,0,1],raw_bits))).astype(float)*2-1
# filt = rrcosfilter(0.25,2,samples_pr_sym) # root raised cosine filter spanning 2 symbols
filt = rrcosfilter(0.5,6,samples_pr_sym) # root raised cosine filter spanning 2 symbols
filt = filt/np.sum(filt)
sig = np.convolve(filt,np.repeat(bits_nrzs,samples_pr_sym)).astype(np.complex64)
return sig
def modulateFSK(raw_bits,samples_pr_sym):
"""
FSK modulate the raw bits with NRZ-S encoding to avoid phase ambiguity
"""
wavePhase = np.ones(samples_pr_sym)/samples_pr_sym*np.pi
LUT = np.array([-wavePhase,wavePhase])
outPhase = np.cumsum(LUT[raw_bits]) - (raw_bits[0]*2-1)*np.pi/2
outPhaseWrap = np.mod(outPhase,2*np.pi)
# outPhaseWrapPad = np.r_[VAL*np.ones(WRAPLEN*self.spSym),outPhaseWrap,VAL*np.ones(WRAPLEN*self.spSym)]
return np.exp(1j*outPhaseWrap).astype(np.complex64)
return sig
def modulateGFSK2(raw_bits,samples_pr_sym):
"""
GFSK2 modulation of raw bits. without NRZ-S encoding, since there is no phase ambiguity
"""
gausFilt = gaussianFilter(1,1,samples_pr_sym,4*samples_pr_sym)
phase = np.convolve(gausFilt,np.repeat(raw_bits*2-1,samples_pr_sym))
sig = np.exp(1j*np.cumsum(phase)/samples_pr_sym*np.pi).astype(np.complex64)
return sig
def modulateGMSK(raw_bits,samples_pr_sym):
"""
GMSK modulation of raw bits. without NRZ-S encoding, since there is no phase ambiguity
"""
gausFilt = gaussianFilter(1,0.5,samples_pr_sym,4*samples_pr_sym)
phase = np.convolve(gausFilt,np.repeat(raw_bits*2-1,samples_pr_sym))
sig = np.exp(1j*np.cumsum(phase)/samples_pr_sym*np.pi/2).astype(np.complex64)
return sig
def awgn(sig,snr,measured = True):
"""
Sends signal sig through AWGN channel
Inputs:
sig -- modulated signal
noisePwr -- noise power in dB
measured -- True if noise power is scaled to signal power. Scaled to unity otherwise (default True)
Returns:
sigN -- signal plus noise
SNR -- actual snr
"""
if measured:
# Normalized SNR
sigp = 10*np.log10(np.linalg.norm(np.abs(sig),2)**2/len(sig))
snr = snr - sigp
noiseP = 10**(-snr/10)
else:
# Assuming unity signal power
noiseP = 10**(-snr/10)
if np.iscomplexobj(sig):
return sig + np.sqrt(noiseP/2) * (np.random.randn(len(sig)) + 1j*np.random.randn(len(sig)))
else:
return sig + np.sqrt(noiseP) * np.random.randn(len(sig))
def get_GMSK_packet(spSym = 16, fs = 9600*16, offset_freq = None):
"""
returns standard GMSK packet
"""
if offset_freq is None:
offset_freq = fs/4
raw_bits = packetData()
sig_GMSK = modulateGMSK(raw_bits,spSym)
sig_GMSK_full = zeropad(sig_GMSK,10000)
sig_GMSK_full*= np.exp(1j*2*np.pi*offset_freq/fs*np.arange(len(sig_GMSK_full)))
return sig_GMSK_full, raw_bits
def get_BPSK_packet(spSym = 16, fs = 9600*16, offset_freq = None):
"""
returns standard BPSK packet
"""
if offset_freq is None:
offset_freq = fs/4
raw_bits = packetData()
sig = modulateBPSK(raw_bits,spSym)
sig_full = zeropad(sig,10000)
sig_full*= np.exp(1j*2*np.pi*offset_freq/fs*np.arange(len(sig_full)))
return sig_full, raw_bits
def get_padded_packet(modulation, spSym = 16, fs = 9600*16, offset_freq = None, raw_bits = []):
if offset_freq is None:
offset_freq = fs/4
if len(raw_bits) == 0:
raw_bits = packetData()
if modulation == 'BPSK':
sig = modulateBPSK(raw_bits,spSym)
elif modulation == 'GMSK':
sig = modulateGMSK(raw_bits,spSym)
elif modulation == 'FSK':
sig = modulateFSK(raw_bits,spSym)
elif modulation == 'GFSK':
sig = modulateGFSK2(raw_bits,spSym)
else:
raise TypeError('Only supports GMSK, FSK and BPSK')
sig_full = zeropad(sig,10000)
sig_full*= np.exp(1j*2*np.pi*offset_freq/fs*np.arange(len(sig_full)))
return sig_full, raw_bits
if __name__ == "__main__":
spSym = 16
fs = 9600*16
raw_bits = packetData()
sig_BPSK = modulateBPSK(raw_bits,spSym)
sig_GMSK = modulateGMSK(raw_bits,spSym)
SNR = 10
bw_gmsk = 9.6e3/0.7
bw_bpsk = 9.6e3*2
SNR_r = SNR + 10*np.log10(bw_gmsk/fs) # for generating AWGN, the bandwidth and oversampling rate need to be taken into account
# zero pad each signal to make it behave as a packet
sig_BPSK_full = zeropad(sig_BPSK,10000)
sig_GMSK_full = zeropad(sig_GMSK,10000)
offset_freq = fs/4
var_noise = 0.01
sig_BPSK_full *= np.exp(1j*2*np.pi*offset_freq/fs*np.arange(len(sig_BPSK_full)))
sig_GMSK_full *= np.exp(1j*2*np.pi*offset_freq/fs*np.arange(len(sig_GMSK_full)))
sig_BPSK_full_n = awgn(sig_BPSK_full,SNR_r)
sig_GMSK_full_n = awgn(sig_GMSK_full,SNR_r)
sig_GMSK_short_shift = awgn(sig_GMSK * np.exp(1j*2*np.pi*offset_freq/fs*np.arange(len(sig_GMSK))) ,SNR_r)
# longer GMSK signel
# sig_GMSK_short_shift = sig_GMSK * np.exp(1j*2*np.pi*offset_freq/fs*np.arange(len(sig_GMSK)))
sig_GMSK_long = np.concatenate((np.tile(sig_GMSK_full_n,10), sig_GMSK_short_shift, sig_GMSK_short_shift, sig_GMSK_short_shift, np.tile(sig_GMSK_full_n,10))) # contains 23 packets (3 in row and 20 with a delay)
sig_GMSK_very_long = np.tile(sig_GMSK_long,10) # contains 2300 packets
sig_BPSK_short_shift = sig_BPSK * np.exp(1j*2*np.pi*offset_freq/fs*np.arange(len(sig_BPSK)))
sig_BPSK_long = np.concatenate((np.tile(sig_BPSK_full_n,10), sig_BPSK_short_shift, sig_BPSK_short_shift, sig_BPSK_short_shift, np.tile(sig_BPSK_full_n,10))) # contains 23 packets (3 in row and 20 with a delay)
sig_BPSK_very_long = np.tile(sig_BPSK_long,10) # contains 2300 packets
# add enough zeros at the end to ensure that adfags modem empties the buffer
np.save('sig_BPSK.bin',np.concatenate((sig_BPSK_very_long,np.zeros(int(2**17)))).astype(np.complex64))
np.save('sig_GMSK.bin',np.concatenate((sig_GMSK_very_long,np.zeros(int(2**17)))).astype(np.complex64))
| [
"numpy.random.get_state",
"numpy.log10",
"numpy.random.set_state",
"numpy.sqrt",
"numpy.array",
"sys.path.append",
"numpy.mod",
"numpy.repeat",
"numpy.exp",
"numpy.random.seed",
"numpy.concatenate",
"numpy.tile",
"numpy.abs",
"numpy.ones",
"numpy.iscomplexobj",
"numpy.sum",
"numpy.ra... | [((62, 94), 'sys.path.append', 'sys.path.append', (['"""../../pyCuSDR"""'], {}), "('../../pyCuSDR')\n", (77, 94), False, 'import sys\n'), ((730, 761), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'n_bits'], {}), '(0, 2, n_bits)\n', (747, 761), True, 'import numpy as np\n'), ((2078, 2111), 'numpy.array', 'np.array', (['[-wavePhase, wavePhase]'], {}), '([-wavePhase, wavePhase])\n', (2086, 2111), True, 'import numpy as np\n'), ((2199, 2226), 'numpy.mod', 'np.mod', (['outPhase', '(2 * np.pi)'], {}), '(outPhase, 2 * np.pi)\n', (2205, 2226), True, 'import numpy as np\n'), ((3826, 3846), 'numpy.iscomplexobj', 'np.iscomplexobj', (['sig'], {}), '(sig)\n', (3841, 3846), True, 'import numpy as np\n'), ((6921, 6947), 'numpy.tile', 'np.tile', (['sig_GMSK_long', '(10)'], {}), '(sig_GMSK_long, 10)\n', (6928, 6947), True, 'import numpy as np\n'), ((7311, 7337), 'numpy.tile', 'np.tile', (['sig_BPSK_long', '(10)'], {}), '(sig_BPSK_long, 10)\n', (7318, 7337), True, 'import numpy as np\n'), ((635, 656), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (654, 656), True, 'import numpy as np\n'), ((694, 714), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (708, 714), True, 'import numpy as np\n'), ((782, 812), 'numpy.random.set_state', 'np.random.set_state', (['cur_state'], {}), '(cur_state)\n', (801, 812), True, 'import numpy as np\n'), ((1742, 1754), 'numpy.sum', 'np.sum', (['filt'], {}), '(filt)\n', (1748, 1754), True, 'import numpy as np\n'), ((2127, 2151), 'numpy.cumsum', 'np.cumsum', (['LUT[raw_bits]'], {}), '(LUT[raw_bits])\n', (2136, 2151), True, 'import numpy as np\n'), ((2677, 2720), 'numpy.repeat', 'np.repeat', (['(raw_bits * 2 - 1)', 'samples_pr_sym'], {}), '(raw_bits * 2 - 1, samples_pr_sym)\n', (2686, 2720), True, 'import numpy as np\n'), ((3076, 3119), 'numpy.repeat', 'np.repeat', (['(raw_bits * 2 - 1)', 'samples_pr_sym'], {}), '(raw_bits * 2 - 1, samples_pr_sym)\n', (3085, 3119), True, 'import numpy as np\n'), ((256, 267), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (264, 267), True, 'import numpy as np\n'), ((272, 283), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (280, 283), True, 'import numpy as np\n'), ((2018, 2041), 'numpy.ones', 'np.ones', (['samples_pr_sym'], {}), '(samples_pr_sym)\n', (2025, 2041), True, 'import numpy as np\n'), ((2354, 2381), 'numpy.exp', 'np.exp', (['(1.0j * outPhaseWrap)'], {}), '(1.0j * outPhaseWrap)\n', (2360, 2381), True, 'import numpy as np\n'), ((5855, 5877), 'numpy.log10', 'np.log10', (['(bw_gmsk / fs)'], {}), '(bw_gmsk / fs)\n', (5863, 5877), True, 'import numpy as np\n'), ((6717, 6745), 'numpy.tile', 'np.tile', (['sig_GMSK_full_n', '(10)'], {}), '(sig_GMSK_full_n, 10)\n', (6724, 6745), True, 'import numpy as np\n'), ((6812, 6840), 'numpy.tile', 'np.tile', (['sig_GMSK_full_n', '(10)'], {}), '(sig_GMSK_full_n, 10)\n', (6819, 6840), True, 'import numpy as np\n'), ((7107, 7135), 'numpy.tile', 'np.tile', (['sig_BPSK_full_n', '(10)'], {}), '(sig_BPSK_full_n, 10)\n', (7114, 7135), True, 'import numpy as np\n'), ((7202, 7230), 'numpy.tile', 'np.tile', (['sig_BPSK_full_n', '(10)'], {}), '(sig_BPSK_full_n, 10)\n', (7209, 7230), True, 'import numpy as np\n'), ((1787, 1823), 'numpy.repeat', 'np.repeat', (['bits_nrzs', 'samples_pr_sym'], {}), '(bits_nrzs, samples_pr_sym)\n', (1796, 1823), True, 'import numpy as np\n'), ((3869, 3888), 'numpy.sqrt', 'np.sqrt', (['(noiseP / 2)'], {}), '(noiseP / 2)\n', (3876, 3888), True, 'import numpy as np\n'), ((3979, 3994), 'numpy.sqrt', 'np.sqrt', (['noiseP'], {}), '(noiseP)\n', (3986, 3994), True, 'import numpy as np\n'), ((1484, 1521), 'numpy.concatenate', 'np.concatenate', (['([1, 0, 1], raw_bits)'], {}), '(([1, 0, 1], raw_bits))\n', (1498, 1521), True, 'import numpy as np\n'), ((2738, 2754), 'numpy.cumsum', 'np.cumsum', (['phase'], {}), '(phase)\n', (2747, 2754), True, 'import numpy as np\n'), ((3655, 3666), 'numpy.abs', 'np.abs', (['sig'], {}), '(sig)\n', (3661, 3666), True, 'import numpy as np\n'), ((3137, 3153), 'numpy.cumsum', 'np.cumsum', (['phase'], {}), '(phase)\n', (3146, 3153), True, 'import numpy as np\n')] |
import numpy as np
import numpy.testing as npt
from stumpy import core
import pytest
def naive_rolling_window_dot_product(Q, T):
window = len(Q)
result = np.zeros(len(T) - window + 1)
for i in range(len(result)):
result[i] = np.dot(T[i:i + window], Q)
return result
test_data = [
(np.array([-1,1,2], dtype=np.float64),np.array(range(5), dtype=np.float64)),
(np.array([9,8100,-60], dtype=np.float64), np.array([584,-11,23,79,1001], dtype=np.float64)),
(np.random.uniform(-1000, 1000, [8]), np.random.uniform(-1000, 1000, [64])),
]
@pytest.mark.parametrize("Q, T", test_data)
def test_sliding_dot_product(Q, T):
left = naive_rolling_window_dot_product(Q, T)
right = core.sliding_dot_product(Q, T)
npt.assert_almost_equal(left, right)
@pytest.mark.parametrize("Q, T", test_data)
def test_compute_mean_std(Q, T):
m = Q.shape[0]
left_μ_Q = np.sum(Q)/m
left_σ_Q = np.sqrt(np.sum(np.square(Q-left_μ_Q)/m))
left_M_T = np.mean(core.rolling_window(T, m), axis=1)
left_Σ_T = np.std(core.rolling_window(T, m), axis=1)
right_μ_Q, right_σ_Q = core.compute_mean_std(Q, m)
right_M_T, right_Σ_T = core.compute_mean_std(T, m)
npt.assert_almost_equal(left_μ_Q, right_μ_Q)
npt.assert_almost_equal(left_σ_Q, right_σ_Q)
npt.assert_almost_equal(left_M_T, right_M_T)
npt.assert_almost_equal(left_Σ_T, right_Σ_T)
@pytest.mark.parametrize("Q, T", test_data)
def test_calculate_distance_profile(Q, T):
m = Q.shape[0]
left = np.linalg.norm(core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1)
QT = core.sliding_dot_product(Q, T)
μ_Q, σ_Q = core.compute_mean_std(Q, m)
M_T, Σ_T = core.compute_mean_std(T, m)
right = core.calculate_distance_profile(m, QT, μ_Q, σ_Q, M_T, Σ_T)
npt.assert_almost_equal(left, right)
@pytest.mark.parametrize("Q, T", test_data)
def test_mueen_calculate_distance_profile(Q, T):
m = Q.shape[0]
left = np.linalg.norm(core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1)
right = core.mueen_calculate_distance_profile(Q,T)
npt.assert_almost_equal(left, right)
@pytest.mark.parametrize("Q, T", test_data)
def test_mass(Q, T):
m = Q.shape[0]
left = np.linalg.norm(core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1)
right = core.mass(Q, T)
npt.assert_almost_equal(left, right)
| [
"stumpy.core.compute_mean_std",
"stumpy.core.calculate_distance_profile",
"stumpy.core.z_norm",
"numpy.square",
"pytest.mark.parametrize",
"numpy.testing.assert_almost_equal",
"numpy.dot",
"stumpy.core.sliding_dot_product",
"numpy.array",
"numpy.sum",
"numpy.random.uniform",
"stumpy.core.rolli... | [((570, 612), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Q, T"""', 'test_data'], {}), "('Q, T', test_data)\n", (593, 612), False, 'import pytest\n'), ((785, 827), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Q, T"""', 'test_data'], {}), "('Q, T', test_data)\n", (808, 827), False, 'import pytest\n'), ((1386, 1428), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Q, T"""', 'test_data'], {}), "('Q, T', test_data)\n", (1409, 1428), False, 'import pytest\n'), ((1825, 1867), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Q, T"""', 'test_data'], {}), "('Q, T', test_data)\n", (1848, 1867), False, 'import pytest\n'), ((2135, 2177), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Q, T"""', 'test_data'], {}), "('Q, T', test_data)\n", (2158, 2177), False, 'import pytest\n'), ((711, 741), 'stumpy.core.sliding_dot_product', 'core.sliding_dot_product', (['Q', 'T'], {}), '(Q, T)\n', (735, 741), False, 'from stumpy import core\n'), ((746, 782), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left', 'right'], {}), '(left, right)\n', (769, 782), True, 'import numpy.testing as npt\n'), ((1107, 1134), 'stumpy.core.compute_mean_std', 'core.compute_mean_std', (['Q', 'm'], {}), '(Q, m)\n', (1128, 1134), False, 'from stumpy import core\n'), ((1161, 1188), 'stumpy.core.compute_mean_std', 'core.compute_mean_std', (['T', 'm'], {}), '(T, m)\n', (1182, 1188), False, 'from stumpy import core\n'), ((1192, 1236), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_μ_Q', 'right_μ_Q'], {}), '(left_μ_Q, right_μ_Q)\n', (1215, 1236), True, 'import numpy.testing as npt\n'), ((1241, 1285), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_σ_Q', 'right_σ_Q'], {}), '(left_σ_Q, right_σ_Q)\n', (1264, 1285), True, 'import numpy.testing as npt\n'), ((1290, 1334), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_M_T', 'right_M_T'], {}), '(left_M_T, right_M_T)\n', (1313, 1334), True, 'import numpy.testing as npt\n'), ((1339, 1383), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_Σ_T', 'right_Σ_T'], {}), '(left_Σ_T, right_Σ_T)\n', (1362, 1383), True, 'import numpy.testing as npt\n'), ((1594, 1624), 'stumpy.core.sliding_dot_product', 'core.sliding_dot_product', (['Q', 'T'], {}), '(Q, T)\n', (1618, 1624), False, 'from stumpy import core\n'), ((1642, 1669), 'stumpy.core.compute_mean_std', 'core.compute_mean_std', (['Q', 'm'], {}), '(Q, m)\n', (1663, 1669), False, 'from stumpy import core\n'), ((1684, 1711), 'stumpy.core.compute_mean_std', 'core.compute_mean_std', (['T', 'm'], {}), '(T, m)\n', (1705, 1711), False, 'from stumpy import core\n'), ((1723, 1781), 'stumpy.core.calculate_distance_profile', 'core.calculate_distance_profile', (['m', 'QT', 'μ_Q', 'σ_Q', 'M_T', 'Σ_T'], {}), '(m, QT, μ_Q, σ_Q, M_T, Σ_T)\n', (1754, 1781), False, 'from stumpy import core\n'), ((1786, 1822), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left', 'right'], {}), '(left, right)\n', (1809, 1822), True, 'import numpy.testing as npt\n'), ((2049, 2092), 'stumpy.core.mueen_calculate_distance_profile', 'core.mueen_calculate_distance_profile', (['Q', 'T'], {}), '(Q, T)\n', (2086, 2092), False, 'from stumpy import core\n'), ((2096, 2132), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left', 'right'], {}), '(left, right)\n', (2119, 2132), True, 'import numpy.testing as npt\n'), ((2324, 2339), 'stumpy.core.mass', 'core.mass', (['Q', 'T'], {}), '(Q, T)\n', (2333, 2339), False, 'from stumpy import core\n'), ((2344, 2380), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left', 'right'], {}), '(left, right)\n', (2367, 2380), True, 'import numpy.testing as npt\n'), ((246, 272), 'numpy.dot', 'np.dot', (['T[i:i + window]', 'Q'], {}), '(T[i:i + window], Q)\n', (252, 272), True, 'import numpy as np\n'), ((311, 349), 'numpy.array', 'np.array', (['[-1, 1, 2]'], {'dtype': 'np.float64'}), '([-1, 1, 2], dtype=np.float64)\n', (319, 349), True, 'import numpy as np\n'), ((392, 434), 'numpy.array', 'np.array', (['[9, 8100, -60]'], {'dtype': 'np.float64'}), '([9, 8100, -60], dtype=np.float64)\n', (400, 434), True, 'import numpy as np\n'), ((434, 486), 'numpy.array', 'np.array', (['[584, -11, 23, 79, 1001]'], {'dtype': 'np.float64'}), '([584, -11, 23, 79, 1001], dtype=np.float64)\n', (442, 486), True, 'import numpy as np\n'), ((490, 525), 'numpy.random.uniform', 'np.random.uniform', (['(-1000)', '(1000)', '[8]'], {}), '(-1000, 1000, [8])\n', (507, 525), True, 'import numpy as np\n'), ((527, 563), 'numpy.random.uniform', 'np.random.uniform', (['(-1000)', '(1000)', '[64]'], {}), '(-1000, 1000, [64])\n', (544, 563), True, 'import numpy as np\n'), ((896, 905), 'numpy.sum', 'np.sum', (['Q'], {}), '(Q)\n', (902, 905), True, 'import numpy as np\n'), ((986, 1011), 'stumpy.core.rolling_window', 'core.rolling_window', (['T', 'm'], {}), '(T, m)\n', (1005, 1011), False, 'from stumpy import core\n'), ((1044, 1069), 'stumpy.core.rolling_window', 'core.rolling_window', (['T', 'm'], {}), '(T, m)\n', (1063, 1069), False, 'from stumpy import core\n'), ((1561, 1575), 'stumpy.core.z_norm', 'core.z_norm', (['Q'], {}), '(Q)\n', (1572, 1575), False, 'from stumpy import core\n'), ((2013, 2027), 'stumpy.core.z_norm', 'core.z_norm', (['Q'], {}), '(Q)\n', (2024, 2027), False, 'from stumpy import core\n'), ((2288, 2302), 'stumpy.core.z_norm', 'core.z_norm', (['Q'], {}), '(Q)\n', (2299, 2302), False, 'from stumpy import core\n'), ((938, 961), 'numpy.square', 'np.square', (['(Q - left_μ_Q)'], {}), '(Q - left_μ_Q)\n', (947, 961), True, 'import numpy as np\n'), ((1529, 1554), 'stumpy.core.rolling_window', 'core.rolling_window', (['T', 'm'], {}), '(T, m)\n', (1548, 1554), False, 'from stumpy import core\n'), ((1981, 2006), 'stumpy.core.rolling_window', 'core.rolling_window', (['T', 'm'], {}), '(T, m)\n', (2000, 2006), False, 'from stumpy import core\n'), ((2256, 2281), 'stumpy.core.rolling_window', 'core.rolling_window', (['T', 'm'], {}), '(T, m)\n', (2275, 2281), False, 'from stumpy import core\n')] |
from .tree_utils import load_tree
from .tree import *
from anytree import NodeMixin, RenderTree, render
import pickle
""" Operator test functions
* root : currently empty root node of a tree
* _a : does nothing currently
* _b : does nothing currently
"""
def root():
return "Executing Operator root"
def _a():
return "Executing Operator a"
def _b():
return "Executing Operator b"
def test_func():
return "Hello World"
class Test_func_class():
def __init__(self):
pass
def test_func(self):
return "Hello World"
""" Test functions
* literally just here to test certain types of functionality
"""
# region Generate_tree_test()
def generate_tree_test():
rootNode = Node(root)
tree = TTree("test", rootNode)
a = Node(_a)
b = Node(_b)
# print('\n')
# print(rootNode.children)
# print('\n')
tree.add_node(rootNode, a)
tree.add_node(rootNode, b)
tree.print_nodes_as_list()
tree.print_tree(id=True)
# endregion
# region Saving_tree_test()
def saving_tree_test():
# For now user should start by creating a root node
root_node = Node(root)
# Maybe the user wants to create more nodes to add to the tree
a_node = Node(_a)
b_node = Node(_b)
# Then user should create a tree and initialize it with a root node
tree_to_save = TTree("root", root_node)
# Then add nodes to the tree
tree_to_save.add_node(root_node, a_node)
tree_to_save.add_node(root_node, b_node)
""" Tree in this example looks like this...
* root (0)
* ├── _a (1)
* └── _b (2)
"""
print('\n')
print("Confirm that tree matches example code:")
tree_to_save.print_tree(True)
print('\n')
from anytree.exporter import JsonExporter
# The default lambda expression tells json what the default value of an
# objects stuff should be if the value cannot be serialized
js_exporter = JsonExporter(
indent=2, sort_keys=True, default=lambda o: '<not serializable>')
with open("./ts_modeling/saved_trees/tree_to_save.json", 'w') as js_file:
js_exporter.write(tree_to_save.root, js_file)
print("Here is the json formatting:")
print(js_exporter.export(tree_to_save.root))
print('\n')
# endregion
# region Pickle_test()
def pickle_test():
# For now user should start by creating a root node
root_node = Node(root)
# Maybe the user wants to create more nodes to add to the tree
a_node = Node(_a)
b_node = Node(_b)
# Then user should create a tree and initialize it with a root node
tree_to_save = TTree("root", root_node)
tree_to_save.add_node(root_node, a_node)
tree_to_save.add_node(root_node, b_node)
# tests if pickle will serialize locally scoped functions
# (it doesn't)
def test_func2():
return "hello world 2"
# tests to see if pickle will serialize the function
# contained in the class "Test_func_class"
test_func3 = Test_func_class()
test_func_node = Node(test_func)
# test_func2_node = Node(test_func2)
# test_func3_node = Node(test_func3.test_func)
tree_to_save.add_node(root_node, test_func_node)
# tree_to_save.add_node(root_node, test_func2_node)
# tree_to_save.add_node(root_node, test_func3_node)
# print tree before saving to pickle file
tree_to_save.print_tree(id=True)
# location of the pickle file to save/load from
pickle_file_location = "./ts_modeling/saved_trees/test_pickle.pickle"
# saves tree object to file located at specified string
tree_to_save.save(pickle_file_location)
from tree_utils import load_tree
# loads tree object from pickle file
loaded_tree = load_tree(pickle_file_location)
# print loaded tree to see if 'tree_to_save' matches
loaded_tree.print_tree(id=True)
# endregion
# region Pipeline_test() {Concept}
def test1():
return "hello world (test 1), "
def test2(val: str):
return (val + " test2 added", 1)
def test3(val):
return val[0] + ", " + "test3 added, "
def test4(val: str):
return (val + " this is TEST " + str(4), 4)
def pipeline_test():
rootNode = Node(test1)
a = Node(test2)
b = Node(test3)
c = Node(test4)
pipeline = [rootNode, a, b, c]
for i in range(len(pipeline)):
if(i == 0):
result = pipeline[i].function()
else:
result = pipeline[i].function(result)
print(result[0])
# endregion
# region Test_pipeline_class()
def _preProcess():
arr = [1, 2, 3, 4, 5, 6]
print("op1...")
print("Array to process: " + str(arr))
return arr
def _denoise(arr: []):
print("")
print("op2...")
result = [a * 2 for a in arr]
print("Array after \"denoising: \"" + str(result))
return (result, "denoised")
def _scale(denoise_tuple):
print("")
print("op3...")
result = [b**2 for b in denoise_tuple[0]]
print("Array after \"scaling\": " + str(result))
return (result, denoise_tuple[1] + ", scaled")
def _plot(scale_tuple):
print("")
print("op4...")
print("Here is the data that was passed through the pipeline:")
print(str(scale_tuple[0]))
print(scale_tuple[1])
class Class_Method_Test():
def __init__(self):
pass
def method_test(self, denoise_tuple):
print("")
print("op3...")
result = [b**2 for b in denoise_tuple[0]]
print("Array after \"scaling\": " + str(result))
return (result, denoise_tuple[1] + ", scaled")
def Test_pipeline_class():
print("")
# Root of the tree
rootNode = Node(root)
# The tree itself
tree = TTree("root", rootNode)
# Tree nodes
opA = Node(_preProcess)
opB = Node(_denoise)
opC = Node(_scale)
opD = Node(_plot)
tree.add_node(rootNode, opA)
tree.add_node(opA, opB)
tree.add_node(opB, opC)
tree.add_node(opC, opD)
print("TREE:")
tree.print_tree()
#region TEST 1 =======================================================
pre_pipeline = [opA.function, opB.function, opC.function, opD.function]
pipeline_test1 = Pipeline(None, pre_pipeline)
print("\nTest 1 { build test 1 }")
pipeline_test1.print()
#endregion ===========================================================
#region TEST 2 =======================================================
pipeline_test2 = Pipeline(opD, None)
print("\nTest 2 { build test 2 }")
pipeline_test2.print()
#endregion ===========================================================
#region TEST 3 =======================================================
pipeline_test3 = Pipeline(opB, None)
print("\nTest 3 { build test 3 }")
pipeline_test3.print()
#endregion ===========================================================
#region TEST 4 =======================================================
pipeline_test4 = Pipeline(None, pre_pipeline)
print("\nTest 4 { pipeline execution }")
pipeline_test4.execute()
#endregion ===========================================================
#region TEST 5 =======================================================
print("\nTest 5 { pickling }")
print("")
print("Pipeline pre save...")
pipeline_test5 = Pipeline(None, pre_pipeline)
pipeline_test5.print()
print("")
pipeline_test5.save("./ts_modeling/saved_pipelines/pipe_test.pickle")
from tree_utils import load_pipeline
loaded_pipeline = load_pipeline(
"./ts_modeling/saved_pipelines/pipe_test.pickle")
print("Pipeline after load...")
loaded_pipeline.print()
print("")
print("Testing loaded pipeline execution...")
loaded_pipeline.execute()
#endregion ===========================================================
#region TEST 6 =======================================================
print("\nTest 6 { TTree.get_pipelines() }")
print("")
opA_2 = Node(_preProcess)
opB_2 = Node(_denoise)
opC_2 = Node(_scale)
opD_2 = Node(_plot)
opE = Node(_scale)
opF = Node(_plot)
tree.add_node(rootNode, opA_2)
tree.add_node(opA_2, opB_2)
tree.add_node(opB_2, opC_2)
tree.add_node(opC_2, opD_2)
tree.add_node(opB_2, opE)
tree.add_node(opE, opF)
print("Tree to test:")
tree.print_tree()
pipeline_list = tree.get_pipelines()
for i in range(len(pipeline_list)):
print("printing pipeline (" + str(i) + ")")
pipeline_list[i].print()
#endregion ===========================================================
#region TEST 7 =======================================================
print("\nTest 7 { TTree.generate_pipeline() }")
print("")
pipeline_test7 = tree.generate_pipeline(opB)
pipeline_test7.print()
# Uncomment the two lines bellow to test if proper exception is raised
# opZ = Node(_preProcess)
# pipeline_test7_pt2 = tree.generate_pipeline(opZ)
# Uncomment line bellow to test if proper "byid" exception raised
# pipeline_test7_pt3 = tree.generate_pipeline_byid(999)
#endregion ===========================================================
#region TEST 8 =======================================================
print("\nTest 8 { Class Method - Pipeline test }")
print("")
methodTest = Class_Method_Test()
# Tree nodes
op1 = Node(_preProcess)
op2 = Node(_denoise)
op3 = Node(methodTest.method_test)
op4 = Node(_plot)
tree2 = TTree("Test Tree 2", op1)
tree2.add_node(op1, op2)
tree2.add_node(op2, op3)
tree2.add_node(op3, op4)
print(tree2)
pipeline_test8 = Pipeline(op4)
pipeline_test8.print()
print("\nExecuting Pipeline_test8")
pipeline_test8.execute()
#endregion============================================================
#region TEST 9 =======================================================
def t9_root():
print("first op called...")
from numpy import array
forecast_input = array([40, 50, 60])
forecast_input = forecast_input.reshape((1, len(forecast_input)))
return forecast_input
def _t9op2(arr):
print("second op called..." + str(arr))
return arr
def _t9op3(arr):
print("third op called..." + str(arr))
return arr
print("\nTest 9 { Using real class - method test }")
print("")
from forecasting import mlp_model
time_series = [10, 20, 30, 40, 50, 60, 70, 80, 90]
steps = 3
test9 = mlp_model(time_series, steps)
test9.split_data()
test9.mlp.fit(test9.X, test9.y)
# print("Forecast for", forecast_input, ":", test9.forecast(forecast_input), "\n")
t9_op1 = Node(t9_root)
t9_op2 = Node(_t9op2)
t9_op3 = Node(_t9op3)
t9_method_test = Node(test9.forecast)
t9_tree = TTree("Test 9 tree", t9_op1)
t9_tree.add_node(t9_op1, t9_op2)
t9_tree.add_node(t9_op2, t9_op3)
t9_tree.add_node(t9_op3, t9_method_test)
print(t9_tree)
t9_pipeline = Pipeline(t9_method_test)
t9_pipeline.print()
print("\nExecuting pipeline t9:")
t9_pipeline.execute()
#endregion ===========================================================
print("")
# endregion
# generate_tree_test()
# saving_tree_test()
# pickle_test()
# pipeline_test()
Test_pipeline_class()
| [
"numpy.array",
"anytree.exporter.JsonExporter",
"tree_utils.load_pipeline",
"forecasting.mlp_model",
"tree_utils.load_tree"
] | [((1960, 2038), 'anytree.exporter.JsonExporter', 'JsonExporter', ([], {'indent': '(2)', 'sort_keys': '(True)', 'default': "(lambda o: '<not serializable>')"}), "(indent=2, sort_keys=True, default=lambda o: '<not serializable>')\n", (1972, 2038), False, 'from anytree.exporter import JsonExporter\n'), ((3744, 3775), 'tree_utils.load_tree', 'load_tree', (['pickle_file_location'], {}), '(pickle_file_location)\n', (3753, 3775), False, 'from tree_utils import load_tree\n'), ((7510, 7573), 'tree_utils.load_pipeline', 'load_pipeline', (['"""./ts_modeling/saved_pipelines/pipe_test.pickle"""'], {}), "('./ts_modeling/saved_pipelines/pipe_test.pickle')\n", (7523, 7573), False, 'from tree_utils import load_pipeline\n'), ((10529, 10558), 'forecasting.mlp_model', 'mlp_model', (['time_series', 'steps'], {}), '(time_series, steps)\n', (10538, 10558), False, 'from forecasting import mlp_model\n'), ((10035, 10054), 'numpy.array', 'array', (['[40, 50, 60]'], {}), '([40, 50, 60])\n', (10040, 10054), False, 'from numpy import array\n')] |
import numpy as np
import torch
import matplotlib.pylab
from fiery.utils.instance import predict_instance_segmentation_and_trajectories
DEFAULT_COLORMAP = matplotlib.pylab.cm.jet
def flow_to_image(flow: np.ndarray, autoscale: bool = False) -> np.ndarray:
"""
Applies colour map to flow which should be a 2 channel image tensor HxWx2. Returns a HxWx3 numpy image
Code adapted from: https://github.com/liruoteng/FlowNet/blob/master/models/flownet/scripts/flowlib.py
"""
u = flow[0, :, :]
v = flow[1, :, :]
# Convert to polar coordinates
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = np.max(rad)
# Normalise flow maps
if autoscale:
u /= maxrad + np.finfo(float).eps
v /= maxrad + np.finfo(float).eps
# visualise flow with cmap
return np.uint8(compute_color(u, v) * 255)
def _normalise(image: np.ndarray) -> np.ndarray:
lower = np.min(image)
delta = np.max(image) - lower
if delta == 0:
delta = 1
image = (image.astype(np.float32) - lower) / delta
return image
def apply_colour_map(
image: np.ndarray, cmap: matplotlib.colors.LinearSegmentedColormap = DEFAULT_COLORMAP, autoscale: bool = False
) -> np.ndarray:
"""
Applies a colour map to the given 1 or 2 channel numpy image. if 2 channel, must be 2xHxW.
Returns a HxWx3 numpy image
"""
if image.ndim == 2 or (image.ndim == 3 and image.shape[0] == 1):
if image.ndim == 3:
image = image[0]
# grayscale scalar image
if autoscale:
image = _normalise(image)
return cmap(image)[:, :, :3]
if image.shape[0] == 2:
# 2 dimensional UV
return flow_to_image(image, autoscale=autoscale)
if image.shape[0] == 3:
# normalise rgb channels
if autoscale:
image = _normalise(image)
return np.transpose(image, axes=[1, 2, 0])
raise Exception('Image must be 1, 2 or 3 channel to convert to colour_map (CxHxW)')
def heatmap_image(
image: np.ndarray, cmap: matplotlib.colors.LinearSegmentedColormap = DEFAULT_COLORMAP, autoscale: bool = True
) -> np.ndarray:
"""Colorize an 1 or 2 channel image with a colourmap."""
if not issubclass(image.dtype.type, np.floating):
raise ValueError(f"Expected a ndarray of float type, but got dtype {image.dtype}")
if not (image.ndim == 2 or (image.ndim == 3 and image.shape[0] in [1, 2])):
raise ValueError(f"Expected a ndarray of shape [H, W] or [1, H, W] or [2, H, W], but got shape {image.shape}")
heatmap_np = apply_colour_map(image, cmap=cmap, autoscale=autoscale)
heatmap_np = np.uint8(heatmap_np * 255)
return heatmap_np
def compute_color(u: np.ndarray, v: np.ndarray) -> np.ndarray:
assert u.shape == v.shape
[h, w] = u.shape
img = np.zeros([h, w, 3])
nan_mask = np.isnan(u) | np.isnan(v)
u[nan_mask] = 0
v[nan_mask] = 0
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u ** 2 + v ** 2)
a = np.arctan2(-v, -u) / np.pi
f_k = (a + 1) / 2 * (ncols - 1) + 1
k_0 = np.floor(f_k).astype(int)
k_1 = k_0 + 1
k_1[k_1 == ncols + 1] = 1
f = f_k - k_0
for i in range(0, np.size(colorwheel, 1)):
tmp = colorwheel[:, i]
col0 = tmp[k_0 - 1] / 255
col1 = tmp[k_1 - 1] / 255
col = (1 - f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1 - rad[idx] * (1 - col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = col * (1 - nan_mask)
return img
def make_color_wheel() -> np.ndarray:
"""
Create colour wheel.
Code adapted from https://github.com/liruoteng/FlowNet/blob/master/models/flownet/scripts/flowlib.py
"""
red_yellow = 15
yellow_green = 6
green_cyan = 4
cyan_blue = 11
blue_magenta = 13
magenta_red = 6
ncols = red_yellow + yellow_green + green_cyan + cyan_blue + blue_magenta + magenta_red
colorwheel = np.zeros([ncols, 3])
col = 0
# red_yellow
colorwheel[0:red_yellow, 0] = 255
colorwheel[0:red_yellow, 1] = np.transpose(np.floor(255 * np.arange(0, red_yellow) / red_yellow))
col += red_yellow
# yellow_green
colorwheel[col: col + yellow_green, 0] = 255 - np.transpose(
np.floor(255 * np.arange(0, yellow_green) / yellow_green)
)
colorwheel[col: col + yellow_green, 1] = 255
col += yellow_green
# green_cyan
colorwheel[col: col + green_cyan, 1] = 255
colorwheel[col: col + green_cyan, 2] = np.transpose(np.floor(255 * np.arange(0, green_cyan) / green_cyan))
col += green_cyan
# cyan_blue
colorwheel[col: col + cyan_blue, 1] = 255 - np.transpose(np.floor(255 * np.arange(0, cyan_blue) / cyan_blue))
colorwheel[col: col + cyan_blue, 2] = 255
col += cyan_blue
# blue_magenta
colorwheel[col: col + blue_magenta, 2] = 255
colorwheel[col: col + blue_magenta, 0] = np.transpose(np.floor(255 * np.arange(0, blue_magenta) / blue_magenta))
col += +blue_magenta
# magenta_red
colorwheel[col: col + magenta_red, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, magenta_red) / magenta_red))
colorwheel[col: col + magenta_red, 0] = 255
return colorwheel
def make_contour(img, colour=[0, 0, 0], double_line=False):
h, w = img.shape[:2]
out = img.copy()
# Vertical lines
out[np.arange(h), np.repeat(0, h)] = colour
out[np.arange(h), np.repeat(w - 1, h)] = colour
# Horizontal lines
out[np.repeat(0, w), np.arange(w)] = colour
out[np.repeat(h - 1, w), np.arange(w)] = colour
if double_line:
out[np.arange(h), np.repeat(1, h)] = colour
out[np.arange(h), np.repeat(w - 2, h)] = colour
# Horizontal lines
out[np.repeat(1, w), np.arange(w)] = colour
out[np.repeat(h - 2, w), np.arange(w)] = colour
return out
def plot_instance_map(instance_image, instance_map, instance_colours=None, bg_image=None):
if isinstance(instance_image, torch.Tensor):
instance_image = instance_image.cpu().numpy()
assert isinstance(instance_image, np.ndarray)
if instance_colours is None:
instance_colours = generate_instance_colours(instance_map)
if len(instance_image.shape) > 2:
instance_image = instance_image.reshape((instance_image.shape[-2], instance_image.shape[-1]))
if bg_image is None:
plot_image = 255 * np.ones((instance_image.shape[0], instance_image.shape[1], 3), dtype=np.uint8)
else:
plot_image = bg_image
for key, value in instance_colours.items():
plot_image[instance_image == key] = value
return plot_image
def visualise_output(labels, output, cfg):
# semantic_colours = np.array([[255, 255, 255], [0, 0, 0]], dtype=np.uint8)
semantic_colours = np.array([[255, 255, 255], [0, 0, 0],
[255, 179, 0],
[128, 62, 117],
[255, 104, 0],
[166, 189, 215],
[193, 0, 32],
[206, 162, 98],
[129, 112, 102],
[0, 125, 52],
[246, 118, 142]], dtype=np.uint8)
consistent_instance_seg = predict_instance_segmentation_and_trajectories(
output, compute_matched_centers=False
)
sequence_length = consistent_instance_seg.shape[1]
b = 0
video = []
for t in range(sequence_length):
out_t = []
# Ground truth
unique_ids = torch.unique(labels['instance'][b, t]).cpu().numpy()[1:]
instance_map = dict(zip(unique_ids, unique_ids))
instance_plot = plot_instance_map(labels['instance'][b, t].cpu(), instance_map)
instance_plot = make_contour(instance_plot)
semantic_seg = labels['segmentation'].squeeze(2).cpu().numpy()
semantic_plot = semantic_colours[semantic_seg[b, t]]
semantic_plot = make_contour(semantic_plot)
if cfg.INSTANCE_FLOW.ENABLED:
future_flow_plot = labels['flow'][b, t].cpu().numpy()
future_flow_plot[:, semantic_seg[b, t] != 1] = 0
future_flow_plot = flow_to_image(future_flow_plot)
future_flow_plot = make_contour(future_flow_plot)
else:
future_flow_plot = np.zeros_like(semantic_plot)
center_plot = heatmap_image(labels['centerness'][b, t, 0].cpu().numpy())
center_plot = make_contour(center_plot)
offset_plot = labels['offset'][b, t].cpu().numpy()
offset_plot[:, semantic_seg[b, t] != 1] = 0
offset_plot = flow_to_image(offset_plot)
offset_plot = make_contour(offset_plot)
out_t.append(np.concatenate([instance_plot, future_flow_plot,
semantic_plot, center_plot, offset_plot], axis=0))
# Predictions
unique_ids = torch.unique(consistent_instance_seg[b, t]).cpu().numpy()[1:]
instance_map = dict(zip(unique_ids, unique_ids))
instance_plot = plot_instance_map(consistent_instance_seg[b, t].cpu(), instance_map)
instance_plot = make_contour(instance_plot)
semantic_seg = output['segmentation'].argmax(dim=2).detach().cpu().numpy()
semantic_plot = semantic_colours[semantic_seg[b, t]]
semantic_plot = make_contour(semantic_plot)
if cfg.INSTANCE_FLOW.ENABLED:
future_flow_plot = output['instance_flow'][b, t].detach().cpu().numpy()
future_flow_plot[:, semantic_seg[b, t] != 1] = 0
future_flow_plot = flow_to_image(future_flow_plot)
future_flow_plot = make_contour(future_flow_plot)
else:
future_flow_plot = np.zeros_like(semantic_plot)
center_plot = heatmap_image(output['instance_center'][b, t, 0].detach().cpu().numpy())
center_plot = make_contour(center_plot)
offset_plot = output['instance_offset'][b, t].detach().cpu().numpy()
offset_plot[:, semantic_seg[b, t] != 1] = 0
offset_plot = flow_to_image(offset_plot)
offset_plot = make_contour(offset_plot)
out_t.append(np.concatenate([instance_plot, future_flow_plot,
semantic_plot, center_plot, offset_plot], axis=0))
out_t = np.concatenate(out_t, axis=1)
# Shape (C, H, W)
out_t = out_t.transpose((2, 0, 1))
video.append(out_t)
# Shape (B, T, C, H, W)
video = np.stack(video)[None]
return video
def convert_figure_numpy(figure):
""" Convert figure to numpy image """
figure_np = np.frombuffer(figure.canvas.tostring_rgb(), dtype=np.uint8)
figure_np = figure_np.reshape(figure.canvas.get_width_height()[::-1] + (3,))
return figure_np
def generate_instance_colours(instance_map):
# Most distinct 22 colors (kelly colors from https://stackoverflow.com/questions/470690/how-to-automatically-generate
# -n-distinct-colors)
# plus some colours from AD40k
INSTANCE_COLOURS = np.asarray([
[0, 0, 0],
[255, 179, 0],
[128, 62, 117],
[255, 104, 0],
[166, 189, 215],
[193, 0, 32],
[206, 162, 98],
[129, 112, 102],
[0, 125, 52],
[246, 118, 142],
[0, 83, 138],
[255, 122, 92],
[83, 55, 122],
[255, 142, 0],
[179, 40, 81],
[244, 200, 0],
[127, 24, 13],
[147, 170, 0],
[89, 51, 21],
[241, 58, 19],
[35, 44, 22],
[112, 224, 255],
[70, 184, 160],
[153, 0, 255],
[71, 255, 0],
[255, 0, 163],
[255, 204, 0],
[0, 255, 235],
[255, 0, 235],
[255, 0, 122],
[255, 245, 0],
[10, 190, 212],
[214, 255, 0],
[0, 204, 255],
[20, 0, 255],
[255, 255, 0],
[0, 153, 255],
[0, 255, 204],
[41, 255, 0],
[173, 0, 255],
[0, 245, 255],
[71, 0, 255],
[0, 255, 184],
[0, 92, 255],
[184, 255, 0],
[255, 214, 0],
[25, 194, 194],
[92, 0, 255],
[220, 220, 220],
[255, 9, 92],
[112, 9, 255],
[8, 255, 214],
[255, 184, 6],
[10, 255, 71],
[255, 41, 10],
[7, 255, 255],
[224, 255, 8],
[102, 8, 255],
[255, 61, 6],
[255, 194, 7],
[0, 255, 20],
[255, 8, 41],
[255, 5, 153],
[6, 51, 255],
[235, 12, 255],
[160, 150, 20],
[0, 163, 255],
[140, 140, 140],
[250, 10, 15],
[20, 255, 0],
])
return {instance_id: INSTANCE_COLOURS[global_instance_id % len(INSTANCE_COLOURS)] for
instance_id, global_instance_id in instance_map.items()
}
| [
"numpy.uint8",
"fiery.utils.instance.predict_instance_segmentation_and_trajectories",
"numpy.sqrt",
"numpy.logical_not",
"numpy.array",
"numpy.arctan2",
"numpy.arange",
"torch.unique",
"numpy.repeat",
"numpy.asarray",
"numpy.max",
"numpy.stack",
"numpy.concatenate",
"numpy.min",
"numpy.o... | [((578, 602), 'numpy.sqrt', 'np.sqrt', (['(u ** 2 + v ** 2)'], {}), '(u ** 2 + v ** 2)\n', (585, 602), True, 'import numpy as np\n'), ((616, 627), 'numpy.max', 'np.max', (['rad'], {}), '(rad)\n', (622, 627), True, 'import numpy as np\n'), ((899, 912), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (905, 912), True, 'import numpy as np\n'), ((2630, 2656), 'numpy.uint8', 'np.uint8', (['(heatmap_np * 255)'], {}), '(heatmap_np * 255)\n', (2638, 2656), True, 'import numpy as np\n'), ((2805, 2824), 'numpy.zeros', 'np.zeros', (['[h, w, 3]'], {}), '([h, w, 3])\n', (2813, 2824), True, 'import numpy as np\n'), ((2955, 2977), 'numpy.size', 'np.size', (['colorwheel', '(0)'], {}), '(colorwheel, 0)\n', (2962, 2977), True, 'import numpy as np\n'), ((2989, 3013), 'numpy.sqrt', 'np.sqrt', (['(u ** 2 + v ** 2)'], {}), '(u ** 2 + v ** 2)\n', (2996, 3013), True, 'import numpy as np\n'), ((3994, 4014), 'numpy.zeros', 'np.zeros', (['[ncols, 3]'], {}), '([ncols, 3])\n', (4002, 4014), True, 'import numpy as np\n'), ((6816, 7020), 'numpy.array', 'np.array', (['[[255, 255, 255], [0, 0, 0], [255, 179, 0], [128, 62, 117], [255, 104, 0],\n [166, 189, 215], [193, 0, 32], [206, 162, 98], [129, 112, 102], [0, 125,\n 52], [246, 118, 142]]'], {'dtype': 'np.uint8'}), '([[255, 255, 255], [0, 0, 0], [255, 179, 0], [128, 62, 117], [255, \n 104, 0], [166, 189, 215], [193, 0, 32], [206, 162, 98], [129, 112, 102],\n [0, 125, 52], [246, 118, 142]], dtype=np.uint8)\n', (6824, 7020), True, 'import numpy as np\n'), ((7340, 7429), 'fiery.utils.instance.predict_instance_segmentation_and_trajectories', 'predict_instance_segmentation_and_trajectories', (['output'], {'compute_matched_centers': '(False)'}), '(output,\n compute_matched_centers=False)\n', (7386, 7429), False, 'from fiery.utils.instance import predict_instance_segmentation_and_trajectories\n'), ((11074, 12199), 'numpy.asarray', 'np.asarray', (['[[0, 0, 0], [255, 179, 0], [128, 62, 117], [255, 104, 0], [166, 189, 215],\n [193, 0, 32], [206, 162, 98], [129, 112, 102], [0, 125, 52], [246, 118,\n 142], [0, 83, 138], [255, 122, 92], [83, 55, 122], [255, 142, 0], [179,\n 40, 81], [244, 200, 0], [127, 24, 13], [147, 170, 0], [89, 51, 21], [\n 241, 58, 19], [35, 44, 22], [112, 224, 255], [70, 184, 160], [153, 0, \n 255], [71, 255, 0], [255, 0, 163], [255, 204, 0], [0, 255, 235], [255, \n 0, 235], [255, 0, 122], [255, 245, 0], [10, 190, 212], [214, 255, 0], [\n 0, 204, 255], [20, 0, 255], [255, 255, 0], [0, 153, 255], [0, 255, 204],\n [41, 255, 0], [173, 0, 255], [0, 245, 255], [71, 0, 255], [0, 255, 184],\n [0, 92, 255], [184, 255, 0], [255, 214, 0], [25, 194, 194], [92, 0, 255\n ], [220, 220, 220], [255, 9, 92], [112, 9, 255], [8, 255, 214], [255, \n 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8], [\n 102, 8, 255], [255, 61, 6], [255, 194, 7], [0, 255, 20], [255, 8, 41],\n [255, 5, 153], [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, \n 255], [140, 140, 140], [250, 10, 15], [20, 255, 0]]'], {}), '([[0, 0, 0], [255, 179, 0], [128, 62, 117], [255, 104, 0], [166, \n 189, 215], [193, 0, 32], [206, 162, 98], [129, 112, 102], [0, 125, 52],\n [246, 118, 142], [0, 83, 138], [255, 122, 92], [83, 55, 122], [255, 142,\n 0], [179, 40, 81], [244, 200, 0], [127, 24, 13], [147, 170, 0], [89, 51,\n 21], [241, 58, 19], [35, 44, 22], [112, 224, 255], [70, 184, 160], [153,\n 0, 255], [71, 255, 0], [255, 0, 163], [255, 204, 0], [0, 255, 235], [\n 255, 0, 235], [255, 0, 122], [255, 245, 0], [10, 190, 212], [214, 255, \n 0], [0, 204, 255], [20, 0, 255], [255, 255, 0], [0, 153, 255], [0, 255,\n 204], [41, 255, 0], [173, 0, 255], [0, 245, 255], [71, 0, 255], [0, 255,\n 184], [0, 92, 255], [184, 255, 0], [255, 214, 0], [25, 194, 194], [92, \n 0, 255], [220, 220, 220], [255, 9, 92], [112, 9, 255], [8, 255, 214], [\n 255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8\n ], [102, 8, 255], [255, 61, 6], [255, 194, 7], [0, 255, 20], [255, 8, \n 41], [255, 5, 153], [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, \n 163, 255], [140, 140, 140], [250, 10, 15], [20, 255, 0]])\n', (11084, 12199), True, 'import numpy as np\n'), ((925, 938), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (931, 938), True, 'import numpy as np\n'), ((1859, 1894), 'numpy.transpose', 'np.transpose', (['image'], {'axes': '[1, 2, 0]'}), '(image, axes=[1, 2, 0])\n', (1871, 1894), True, 'import numpy as np\n'), ((2840, 2851), 'numpy.isnan', 'np.isnan', (['u'], {}), '(u)\n', (2848, 2851), True, 'import numpy as np\n'), ((2854, 2865), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (2862, 2865), True, 'import numpy as np\n'), ((3022, 3040), 'numpy.arctan2', 'np.arctan2', (['(-v)', '(-u)'], {}), '(-v, -u)\n', (3032, 3040), True, 'import numpy as np\n'), ((3214, 3236), 'numpy.size', 'np.size', (['colorwheel', '(1)'], {}), '(colorwheel, 1)\n', (3221, 3236), True, 'import numpy as np\n'), ((3468, 3487), 'numpy.logical_not', 'np.logical_not', (['idx'], {}), '(idx)\n', (3482, 3487), True, 'import numpy as np\n'), ((10357, 10386), 'numpy.concatenate', 'np.concatenate', (['out_t'], {'axis': '(1)'}), '(out_t, axis=1)\n', (10371, 10386), True, 'import numpy as np\n'), ((10526, 10541), 'numpy.stack', 'np.stack', (['video'], {}), '(video)\n', (10534, 10541), True, 'import numpy as np\n'), ((3099, 3112), 'numpy.floor', 'np.floor', (['f_k'], {}), '(f_k)\n', (3107, 3112), True, 'import numpy as np\n'), ((5392, 5404), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (5401, 5404), True, 'import numpy as np\n'), ((5406, 5421), 'numpy.repeat', 'np.repeat', (['(0)', 'h'], {}), '(0, h)\n', (5415, 5421), True, 'import numpy as np\n'), ((5440, 5452), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (5449, 5452), True, 'import numpy as np\n'), ((5454, 5473), 'numpy.repeat', 'np.repeat', (['(w - 1)', 'h'], {}), '(w - 1, h)\n', (5463, 5473), True, 'import numpy as np\n'), ((5516, 5531), 'numpy.repeat', 'np.repeat', (['(0)', 'w'], {}), '(0, w)\n', (5525, 5531), True, 'import numpy as np\n'), ((5533, 5545), 'numpy.arange', 'np.arange', (['w'], {}), '(w)\n', (5542, 5545), True, 'import numpy as np\n'), ((5564, 5583), 'numpy.repeat', 'np.repeat', (['(h - 1)', 'w'], {}), '(h - 1, w)\n', (5573, 5583), True, 'import numpy as np\n'), ((5585, 5597), 'numpy.arange', 'np.arange', (['w'], {}), '(w)\n', (5594, 5597), True, 'import numpy as np\n'), ((6427, 6505), 'numpy.ones', 'np.ones', (['(instance_image.shape[0], instance_image.shape[1], 3)'], {'dtype': 'np.uint8'}), '((instance_image.shape[0], instance_image.shape[1], 3), dtype=np.uint8)\n', (6434, 6505), True, 'import numpy as np\n'), ((8396, 8424), 'numpy.zeros_like', 'np.zeros_like', (['semantic_plot'], {}), '(semantic_plot)\n', (8409, 8424), True, 'import numpy as np\n'), ((8786, 8888), 'numpy.concatenate', 'np.concatenate', (['[instance_plot, future_flow_plot, semantic_plot, center_plot, offset_plot]'], {'axis': '(0)'}), '([instance_plot, future_flow_plot, semantic_plot, center_plot,\n offset_plot], axis=0)\n', (8800, 8888), True, 'import numpy as np\n'), ((9782, 9810), 'numpy.zeros_like', 'np.zeros_like', (['semantic_plot'], {}), '(semantic_plot)\n', (9795, 9810), True, 'import numpy as np\n'), ((10204, 10306), 'numpy.concatenate', 'np.concatenate', (['[instance_plot, future_flow_plot, semantic_plot, center_plot, offset_plot]'], {'axis': '(0)'}), '([instance_plot, future_flow_plot, semantic_plot, center_plot,\n offset_plot], axis=0)\n', (10218, 10306), True, 'import numpy as np\n'), ((695, 710), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (703, 710), True, 'import numpy as np\n'), ((737, 752), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (745, 752), True, 'import numpy as np\n'), ((5641, 5653), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (5650, 5653), True, 'import numpy as np\n'), ((5655, 5670), 'numpy.repeat', 'np.repeat', (['(1)', 'h'], {}), '(1, h)\n', (5664, 5670), True, 'import numpy as np\n'), ((5693, 5705), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (5702, 5705), True, 'import numpy as np\n'), ((5707, 5726), 'numpy.repeat', 'np.repeat', (['(w - 2)', 'h'], {}), '(w - 2, h)\n', (5716, 5726), True, 'import numpy as np\n'), ((5777, 5792), 'numpy.repeat', 'np.repeat', (['(1)', 'w'], {}), '(1, w)\n', (5786, 5792), True, 'import numpy as np\n'), ((5794, 5806), 'numpy.arange', 'np.arange', (['w'], {}), '(w)\n', (5803, 5806), True, 'import numpy as np\n'), ((5829, 5848), 'numpy.repeat', 'np.repeat', (['(h - 2)', 'w'], {}), '(h - 2, w)\n', (5838, 5848), True, 'import numpy as np\n'), ((5850, 5862), 'numpy.arange', 'np.arange', (['w'], {}), '(w)\n', (5859, 5862), True, 'import numpy as np\n'), ((4146, 4170), 'numpy.arange', 'np.arange', (['(0)', 'red_yellow'], {}), '(0, red_yellow)\n', (4155, 4170), True, 'import numpy as np\n'), ((4574, 4598), 'numpy.arange', 'np.arange', (['(0)', 'green_cyan'], {}), '(0, green_cyan)\n', (4583, 4598), True, 'import numpy as np\n'), ((4976, 5002), 'numpy.arange', 'np.arange', (['(0)', 'blue_magenta'], {}), '(0, blue_magenta)\n', (4985, 5002), True, 'import numpy as np\n'), ((4316, 4342), 'numpy.arange', 'np.arange', (['(0)', 'yellow_green'], {}), '(0, yellow_green)\n', (4325, 4342), True, 'import numpy as np\n'), ((4729, 4752), 'numpy.arange', 'np.arange', (['(0)', 'cyan_blue'], {}), '(0, cyan_blue)\n', (4738, 4752), True, 'import numpy as np\n'), ((5142, 5167), 'numpy.arange', 'np.arange', (['(0)', 'magenta_red'], {}), '(0, magenta_red)\n', (5151, 5167), True, 'import numpy as np\n'), ((7621, 7659), 'torch.unique', 'torch.unique', (["labels['instance'][b, t]"], {}), "(labels['instance'][b, t])\n", (7633, 7659), False, 'import torch\n'), ((8967, 9010), 'torch.unique', 'torch.unique', (['consistent_instance_seg[b, t]'], {}), '(consistent_instance_seg[b, t])\n', (8979, 9010), False, 'import torch\n')] |
#-------------------------------------------------------------------------------
# Copyright (c) 2012 <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#-------------------------------------------------------------------------------
__author__ = """\n""".join(['<NAME> <<EMAIL>>'])
__all__ = ['test_stabrnd']
from complex_systems.mobility.stabrnd import stabrnd
import numpy as N
import unittest
class test_stabrnd(unittest.TestCase):
def setUp(self):
pass
def test_stabrnd(self):
result = N.array([[2.92229997],[-0.78181396],[-0.44122327]])
N.random.seed(123456)
test = stabrnd(0.8, 0, 1, 0, 3, 1)
self.assertEqual(result.all(), test.all())
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.array",
"numpy.random.seed",
"complex_systems.mobility.stabrnd.stabrnd"
] | [((1763, 1778), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1776, 1778), False, 'import unittest\n'), ((1551, 1604), 'numpy.array', 'N.array', (['[[2.92229997], [-0.78181396], [-0.44122327]]'], {}), '([[2.92229997], [-0.78181396], [-0.44122327]])\n', (1558, 1604), True, 'import numpy as N\n'), ((1611, 1632), 'numpy.random.seed', 'N.random.seed', (['(123456)'], {}), '(123456)\n', (1624, 1632), True, 'import numpy as N\n'), ((1648, 1675), 'complex_systems.mobility.stabrnd.stabrnd', 'stabrnd', (['(0.8)', '(0)', '(1)', '(0)', '(3)', '(1)'], {}), '(0.8, 0, 1, 0, 3, 1)\n', (1655, 1675), False, 'from complex_systems.mobility.stabrnd import stabrnd\n')] |
import pandas as pd
import numpy as np
def downgrade_dtypes(df):
"""Downgrade column types in the dataframe from float64/int64 type to float32/int32 type
Parameters
----------
df : DataFrame
Returns
-------
df: DataFrame
the output column types will be changed from float64/int64 type to float32/int32 type
"""
# Select columns to downcast
float_cols = [c for c in df if df[c].dtype == "float64"]
int_cols = [c for c in df if df[c].dtype == "int64"]
# Downcast
df[float_cols] = df[float_cols].astype(np.float32)
df[int_cols] = df[int_cols].astype(np.int32)
return df
def create_submission_csv(file_name, test, y_test):
"""Create csv file for submission
Parameters
----------
file_name : str
The string represents the name of csv file or filepath.
Csv will consists of two columns: "ID" and "item_cnt_month"
test : DateFrame
DataFrame with test submission
y_test : ndarray
An array object of (n,) shape where n is number of submission instances in the order given in test.csv file
Returns
-------
None
"""
y_test = np.clip(y_test, 0,20)
submission = pd.DataFrame({"ID": test.index, "item_cnt_month": y_test})
submission.to_csv(file_name, index=False)
def rename_shop_ids(df):
"""Rename shop ids in Data Frame 10 -> 11, 0 -> 57, 1 -> 58,
Parameters
----------
df : DataFrame
DateFrame should contain "shop_id" column
Returns
-------
None
"""
df.loc[df["shop_id"]==11,"shop_id"] = 10
df.loc[df["shop_id"]==0,"shop_id"] = 57
df.loc[df["shop_id"]==1,"shop_id"] = 58
def get_X_y(df, target_name):
"""Split DataFrame to feature and target
Parameters
----------
df : DataFrame
target_name : str
Name of target column present in DataFrame
Returns
-------
X : DataFrame
Original DataFrame without target column
y : Series
Target Series
"""
y = df[target_name]
X = df.drop(target_name, axis=1)
return X, y
| [
"numpy.clip",
"pandas.DataFrame"
] | [((1213, 1235), 'numpy.clip', 'np.clip', (['y_test', '(0)', '(20)'], {}), '(y_test, 0, 20)\n', (1220, 1235), True, 'import numpy as np\n'), ((1252, 1310), 'pandas.DataFrame', 'pd.DataFrame', (["{'ID': test.index, 'item_cnt_month': y_test}"], {}), "({'ID': test.index, 'item_cnt_month': y_test})\n", (1264, 1310), True, 'import pandas as pd\n')] |
from __future__ import division
import gzip
import numpy as np
import pandas as pd
from astropy.io import fits
import matplotlib.pyplot as plt
from VALDextraction import VALDmail
from plot_fits import get_wavelength
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.linewidth'] = 2
plt.rcParams['xtick.major.width'] = 2
plt.rcParams['ytick.major.width'] = 2
def merge_data():
names = 'Wavelength Ele excit loggf EW'.split(' ')
d1 = pd.read_csv('Fe1.moog', names=names, delimiter=r'\s+', skiprows=1)
d1['Wavelength'] = map(lambda x: round(x, 2), d1['Wavelength'])
d2 = pd.read_csv('linelist_newEW.moog', names=names, delimiter=r'\s+', skiprows=1)
df = pd.merge(d1, d2, left_on='Wavelength', right_on='Wavelength', how='outer')
return df
def read_raw_VALD(fname):
df = []
with gzip.open(fname, 'r') as lines:
for i, line in enumerate(lines):
if i < 2:
continue
try:
line = line.split(',')[:-1] # Don't care about references
line[0] = line[0].replace("'", "") # Remove single quotes
line[1:] = map(float, line[1:]) # Convert the rest to floats
df.append(line)
except IndexError: # Reached the reference part
break
names = ('element', 'wavelength', 'EP', 'loggf',
'rad', 'stark', 'waals', 'f')
df = pd.DataFrame(df, columns=names)
return df.loc[:, ('element', 'wavelength', 'EP', 'loggf')]
def read_sun(w1, w2):
path = '/home/daniel/.plotfits/solarspectrum_01.fits'
w = get_wavelength(fits.getheader(path))
f = fits.getdata(path)
idx = (w1 <= w) & (w <= w2)
w, f = w[idx], f[idx]
f /= np.median(f)
f /= f.max()
return w, f
if __name__ == '__main__':
df = merge_data()
d = df[np.isnan(df['EW_y'])]
wavelength = np.random.choice(d['Wavelength'])
# print 'Using wrong wavelength at {:.2f}AA'.format(wavelength)
# VALDmail(wavelength=wavelength, step=1.5)
dd = read_raw_VALD('lines1.gz')
dd['EW'] = dd['loggf'] - dd['EP']*5040/5777
dd['EW'] = (dd['EW'] - min(dd['EW'])) / (max(dd['EW'])-min(dd['EW']))
dd['EW'] = dd['EW']**2
w, f = read_sun(dd['wavelength'].min(), dd['wavelength'].max())
idx = (dd['element'] == 'Fe 1') | (dd['element'] == 'Fe 2')
d1 = dd[idx]
d2 = dd[~idx]
plt.plot(w, f)
x1, x2 = plt.xlim()
y1, y2 = plt.ylim()
for line, strength in d1[['wavelength', 'EW']].values:
plt.vlines(line, 1-(1-y1)*strength, 1, color='C2', alpha=strength)
for line, strength in d2[['wavelength', 'EW']].values:
plt.vlines(line, 1-(1-y1)*strength, 1, color='C1', alpha=strength)
plt.xlim(x1, x2)
plt.ylim(y1-0.005, y2)
ax = plt.gca()
ax.xaxis.get_major_formatter().set_useOffset(False)
w_mid = (x2+x1)/2
xticks = (w_mid-0.90, w_mid-0.45, w_mid, w_mid+0.45, w_mid+0.90)
xticks = map(lambda x: round(x, 2), xticks)
plt.xticks(xticks, xticks)
plt.xlabel(r'Wavelength [$\AA$]')
plt.ylabel('Flux')
# plt.savefig('../visualSelection.pdf')
plt.show()
| [
"numpy.median",
"astropy.io.fits.getheader",
"pandas.read_csv",
"matplotlib.pyplot.xticks",
"numpy.random.choice",
"matplotlib.pyplot.gca",
"pandas.merge",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel",
"gzip.open",
"matplotlib.pyplot.vlines",
"astropy.io.f... | [((572, 638), 'pandas.read_csv', 'pd.read_csv', (['"""Fe1.moog"""'], {'names': 'names', 'delimiter': '"""\\\\s+"""', 'skiprows': '(1)'}), "('Fe1.moog', names=names, delimiter='\\\\s+', skiprows=1)\n", (583, 638), True, 'import pandas as pd\n'), ((716, 793), 'pandas.read_csv', 'pd.read_csv', (['"""linelist_newEW.moog"""'], {'names': 'names', 'delimiter': '"""\\\\s+"""', 'skiprows': '(1)'}), "('linelist_newEW.moog', names=names, delimiter='\\\\s+', skiprows=1)\n", (727, 793), True, 'import pandas as pd\n'), ((803, 877), 'pandas.merge', 'pd.merge', (['d1', 'd2'], {'left_on': '"""Wavelength"""', 'right_on': '"""Wavelength"""', 'how': '"""outer"""'}), "(d1, d2, left_on='Wavelength', right_on='Wavelength', how='outer')\n", (811, 877), True, 'import pandas as pd\n'), ((1527, 1558), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {'columns': 'names'}), '(df, columns=names)\n', (1539, 1558), True, 'import pandas as pd\n'), ((1757, 1775), 'astropy.io.fits.getdata', 'fits.getdata', (['path'], {}), '(path)\n', (1769, 1775), False, 'from astropy.io import fits\n'), ((1844, 1856), 'numpy.median', 'np.median', (['f'], {}), '(f)\n', (1853, 1856), True, 'import numpy as np\n'), ((1991, 2024), 'numpy.random.choice', 'np.random.choice', (["d['Wavelength']"], {}), "(d['Wavelength'])\n", (2007, 2024), True, 'import numpy as np\n'), ((2500, 2514), 'matplotlib.pyplot.plot', 'plt.plot', (['w', 'f'], {}), '(w, f)\n', (2508, 2514), True, 'import matplotlib.pyplot as plt\n'), ((2528, 2538), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (2536, 2538), True, 'import matplotlib.pyplot as plt\n'), ((2552, 2562), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (2560, 2562), True, 'import matplotlib.pyplot as plt\n'), ((2836, 2852), 'matplotlib.pyplot.xlim', 'plt.xlim', (['x1', 'x2'], {}), '(x1, x2)\n', (2844, 2852), True, 'import matplotlib.pyplot as plt\n'), ((2857, 2881), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(y1 - 0.005)', 'y2'], {}), '(y1 - 0.005, y2)\n', (2865, 2881), True, 'import matplotlib.pyplot as plt\n'), ((2889, 2898), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2896, 2898), True, 'import matplotlib.pyplot as plt\n'), ((3099, 3125), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xticks', 'xticks'], {}), '(xticks, xticks)\n', (3109, 3125), True, 'import matplotlib.pyplot as plt\n'), ((3131, 3164), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wavelength [$\\\\AA$]"""'], {}), "('Wavelength [$\\\\AA$]')\n", (3141, 3164), True, 'import matplotlib.pyplot as plt\n'), ((3169, 3187), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Flux"""'], {}), "('Flux')\n", (3179, 3187), True, 'import matplotlib.pyplot as plt\n'), ((3237, 3247), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3245, 3247), True, 'import matplotlib.pyplot as plt\n'), ((941, 962), 'gzip.open', 'gzip.open', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (950, 962), False, 'import gzip\n'), ((1727, 1747), 'astropy.io.fits.getheader', 'fits.getheader', (['path'], {}), '(path)\n', (1741, 1747), False, 'from astropy.io import fits\n'), ((1952, 1972), 'numpy.isnan', 'np.isnan', (["df['EW_y']"], {}), "(df['EW_y'])\n", (1960, 1972), True, 'import numpy as np\n'), ((2631, 2703), 'matplotlib.pyplot.vlines', 'plt.vlines', (['line', '(1 - (1 - y1) * strength)', '(1)'], {'color': '"""C2"""', 'alpha': 'strength'}), "(line, 1 - (1 - y1) * strength, 1, color='C2', alpha=strength)\n", (2641, 2703), True, 'import matplotlib.pyplot as plt\n'), ((2765, 2837), 'matplotlib.pyplot.vlines', 'plt.vlines', (['line', '(1 - (1 - y1) * strength)', '(1)'], {'color': '"""C1"""', 'alpha': 'strength'}), "(line, 1 - (1 - y1) * strength, 1, color='C1', alpha=strength)\n", (2775, 2837), True, 'import matplotlib.pyplot as plt\n')] |
from datetime import datetime
from sklearn.model_selection import train_test_split
from IMLearn import BaseEstimator
from challenge.agoda_cancellation_estimator import AgodaCancellationEstimator
import numpy as np
import pandas as pd
def load_data(filename: str, have_true_val: bool = True):
"""
Load Agoda booking cancellation dataset
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector in either of the following formats:
1) Single dataframe with last column representing the response
2) Tuple of pandas.DataFrame and Series
3) Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,)
"""
# TODO - replace below code with any desired preprocessing
full_data = pd.read_csv(filename).drop_duplicates()
# TODO's
# create num from order date
features = full_data[[
"no_of_children",
"no_of_extra_bed",
"no_of_room",
"guest_is_not_the_customer",
"original_selling_amount",
"no_of_adults",
]]
features = pd.concat(
[
pd.get_dummies(full_data["charge_option"],
columns=["charge_option"]),
pd.get_dummies(full_data["customer_nationality"],
columns=["customer_nationality"]),
pd.get_dummies(full_data["accommadation_type_name"],
columns=["accommadation_type_name"]),
pd.get_dummies(full_data["hotel_country_code"],
columns=["hotel_country_code"]),
pd.get_dummies(full_data["original_payment_method"],
columns=["original_payment_method"]),
pd.get_dummies(full_data["cancellation_policy_code"],
columns=["cancellation_policy_code"]),
pd.get_dummies(full_data["hotel_star_rating"],
columns=["hotel_star_rating"]),
# FIXME work, but chek later how to convert bool to 1/0
pd.get_dummies(full_data["is_first_booking"],
columns=["is_first_booking"], drop_first=True),
pd.get_dummies(full_data["is_user_logged_in"],
columns=["is_user_logged_in"], drop_first=True),
features], axis=1)
features["no_order_days"] = full_data.apply(
extract_days_diff_between_str_date,
axis=1, args=("checkout_date", "checkin_date"))
features["no_before"] = full_data.apply(
extract_days_diff_between_str_date,
axis=1, args=("checkin_date", "booking_datetime"))
labels = []
if have_true_val:
labels = full_data["cancellation_datetime"].apply(
lambda x: 0 if pd.isnull(x) else 1)
return features, labels
# first - second
def extract_days_diff_between_str_date(data_row, first_data_name,
second_date_name):
d1 = datetime.strptime(data_row[first_data_name][:10], '%Y-%m-%d')
d2 = datetime.strptime(data_row[second_date_name][:10], '%Y-%m-%d')
return (d1 - d2).days
def evaluate_and_export(estimator: BaseEstimator, X: np.ndarray,
filename: str):
"""
Export to specified file the prediction results of given estimator on given testset.
File saved is in csv format with a single column named 'predicted_values' and n_samples rows containing
predicted values.
Parameters
----------
estimator: BaseEstimator or any object implementing predict() method as in BaseEstimator (for example sklearn)
Fitted estimator to use for prediction
X: ndarray of shape (n_samples, n_features)
Test design matrix to predict its responses
filename:
path to store file at
"""
pd.DataFrame(estimator.predict(X),
columns=["predicted_values"]).to_csv(
filename, index=False)
if __name__ == '__main__':
np.random.seed(0)
# Load data
df, cancellation_labels = load_data(
"../datasets/agoda_cancellation_train.csv")
# Store model predictions over test set
test_set_week = load_data("test_set_week_1.csv", False)[0]
for i in range(1):
# Get missing columns in the training test
df, test_set_week = df.align(test_set_week, join='outer', axis=1,
fill_value=0)
train_X, test_X, train_y, test_y = \
train_test_split(df, cancellation_labels, test_size=0.001)
# Fit model over data
estimator = AgodaCancellationEstimator().fit(train_X.to_numpy(),
train_y.to_numpy())
# FIXME print
print(estimator.loss(test_X.to_numpy(), test_y.to_numpy()))
print((train_X.size, train_y.size))
print((test_X.size, test_y.size))
evaluate_and_export(estimator, test_set_week.to_numpy(),
"209381284_211997275_318164886.csv")
| [
"pandas.isnull",
"pandas.read_csv",
"datetime.datetime.strptime",
"sklearn.model_selection.train_test_split",
"numpy.random.seed",
"challenge.agoda_cancellation_estimator.AgodaCancellationEstimator",
"pandas.get_dummies"
] | [((3016, 3077), 'datetime.datetime.strptime', 'datetime.strptime', (['data_row[first_data_name][:10]', '"""%Y-%m-%d"""'], {}), "(data_row[first_data_name][:10], '%Y-%m-%d')\n", (3033, 3077), False, 'from datetime import datetime\n'), ((3087, 3149), 'datetime.datetime.strptime', 'datetime.strptime', (['data_row[second_date_name][:10]', '"""%Y-%m-%d"""'], {}), "(data_row[second_date_name][:10], '%Y-%m-%d')\n", (3104, 3149), False, 'from datetime import datetime\n'), ((4018, 4035), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4032, 4035), True, 'import numpy as np\n'), ((4512, 4570), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df', 'cancellation_labels'], {'test_size': '(0.001)'}), '(df, cancellation_labels, test_size=0.001)\n', (4528, 4570), False, 'from sklearn.model_selection import train_test_split\n'), ((820, 841), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (831, 841), True, 'import pandas as pd\n'), ((1162, 1231), 'pandas.get_dummies', 'pd.get_dummies', (["full_data['charge_option']"], {'columns': "['charge_option']"}), "(full_data['charge_option'], columns=['charge_option'])\n", (1176, 1231), True, 'import pandas as pd\n'), ((1273, 1361), 'pandas.get_dummies', 'pd.get_dummies', (["full_data['customer_nationality']"], {'columns': "['customer_nationality']"}), "(full_data['customer_nationality'], columns=[\n 'customer_nationality'])\n", (1287, 1361), True, 'import pandas as pd\n'), ((1398, 1492), 'pandas.get_dummies', 'pd.get_dummies', (["full_data['accommadation_type_name']"], {'columns': "['accommadation_type_name']"}), "(full_data['accommadation_type_name'], columns=[\n 'accommadation_type_name'])\n", (1412, 1492), True, 'import pandas as pd\n'), ((1529, 1608), 'pandas.get_dummies', 'pd.get_dummies', (["full_data['hotel_country_code']"], {'columns': "['hotel_country_code']"}), "(full_data['hotel_country_code'], columns=['hotel_country_code'])\n", (1543, 1608), True, 'import pandas as pd\n'), ((1650, 1744), 'pandas.get_dummies', 'pd.get_dummies', (["full_data['original_payment_method']"], {'columns': "['original_payment_method']"}), "(full_data['original_payment_method'], columns=[\n 'original_payment_method'])\n", (1664, 1744), True, 'import pandas as pd\n'), ((1781, 1877), 'pandas.get_dummies', 'pd.get_dummies', (["full_data['cancellation_policy_code']"], {'columns': "['cancellation_policy_code']"}), "(full_data['cancellation_policy_code'], columns=[\n 'cancellation_policy_code'])\n", (1795, 1877), True, 'import pandas as pd\n'), ((1914, 1991), 'pandas.get_dummies', 'pd.get_dummies', (["full_data['hotel_star_rating']"], {'columns': "['hotel_star_rating']"}), "(full_data['hotel_star_rating'], columns=['hotel_star_rating'])\n", (1928, 1991), True, 'import pandas as pd\n'), ((2101, 2197), 'pandas.get_dummies', 'pd.get_dummies', (["full_data['is_first_booking']"], {'columns': "['is_first_booking']", 'drop_first': '(True)'}), "(full_data['is_first_booking'], columns=['is_first_booking'],\n drop_first=True)\n", (2115, 2197), True, 'import pandas as pd\n'), ((2235, 2334), 'pandas.get_dummies', 'pd.get_dummies', (["full_data['is_user_logged_in']"], {'columns': "['is_user_logged_in']", 'drop_first': '(True)'}), "(full_data['is_user_logged_in'], columns=['is_user_logged_in'\n ], drop_first=True)\n", (2249, 2334), True, 'import pandas as pd\n'), ((4622, 4650), 'challenge.agoda_cancellation_estimator.AgodaCancellationEstimator', 'AgodaCancellationEstimator', ([], {}), '()\n', (4648, 4650), False, 'from challenge.agoda_cancellation_estimator import AgodaCancellationEstimator\n'), ((2814, 2826), 'pandas.isnull', 'pd.isnull', (['x'], {}), '(x)\n', (2823, 2826), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_ProjectionLinCompReturns [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_ProjectionLinCompReturns&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-eq-linvs-comp-proj-ret).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from numpy import arange, array, zeros, std, diff, linspace, mean, exp, sqrt, r_
from numpy import min as npmin, max as npmax
from scipy.stats import norm, lognorm
from scipy.io import loadmat
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot, xlim, ylim, subplots, ylabel, \
xlabel, title
plt.style.use('seaborn')
from CONFIG import GLOBAL_DB, TEMPORARY_DB
from ARPM_utils import save_plot, struct_to_dict
from Price2AdjustedPrice import Price2AdjustedPrice
# -
# ## Upload stock prices from db_Stocks
# +
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_Stocks'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_Stocks'), squeeze_me=True)
StocksSPX = struct_to_dict(db['StocksSPX'])
# -
# ## Compute compounded returns from dividend adjusted prices
[_, c] = Price2AdjustedPrice(StocksSPX.Date.reshape(1,-1), StocksSPX.Prices[[1]], StocksSPX.Dividends[1]) # Exxon Mobil Corporation
# ## Estimate the parameters((mu,sigma))of the invariants under the normality assumption.
mu = mean(c)
sigma = std(c,ddof=1)
# ## Compute the distribution of compounded and linear returns at horizons tau
# +
# Set projection parameters
tau = arange(63,600,63)
p_lev = array([.01, .99])
l_ = 100
scale = 0.7*npmin(diff(tau))
x_c = {}
y_c = {}
x_l = {}
y_l = {}
q_c = zeros((len(p_lev), len(tau)))
q_l = zeros((len(p_lev), len(tau)))
for k in range(len(tau)):
# compounded returns
q_c[:,k] = norm.ppf(p_lev, mu*tau[k], sigma*sqrt(tau[k]))
x_c[k] = linspace(npmin(q_c[:,k])-0.4, npmax(q_c[:,k])+0.4,l_)
y_c[k] = norm.pdf(x_c[k], mu*tau[k], sigma*sqrt(tau[k]))
y_c[k] = scale*y_c[k] / max(y_c[k])
# linear returns
q_l[:,k] = exp(q_c[:,k])-1
x_l[k] = linspace(npmin(q_l[:,k])-0.4, npmax(q_l[:,k])+0.4,l_)
y_l[k] = lognorm.pdf(x_l[k] + 1, sigma*sqrt(tau[k]), scale=exp(mu*tau[k]))
y_l[k] = scale*y_l[k] / max(y_l[k])
# -
# ## Create a figure showing the pdf of both linear and compounded returns at certain points in the future
# ## and print the quantiles at the confidence levels 0.01 and 0.99.
# +
col = [.8, .8, .8]
f, ax = subplots(2,1)
plt.sca(ax[0])
plot(r_[0, tau], r_['-1',zeros((2,1)), q_c].T, color='r')
for k in range(len(tau)):
xx =r_[tau[k], tau[k]+y_c[k].T, tau[k]]
yy =r_[x_c[k][0], x_c[k].T, x_c[k][-1]]
plt.fill_between(xx, yy, color=col)
xlim([0, npmax(xx)*1.01])
ylim([npmin(yy)*1.2, npmax(yy)*1.2])
xlabel('horizon (years)')
ylabel('return range')
plt.xticks(r_[0,tau],r_[0,tau]/252)
plt.grid(True)
title('Compounded return propagation')
plt.sca(ax[1])
plot(r_[0, tau], r_['-1',zeros((2,1)), q_l].T, color='r')
for k in range(len(tau)):
xx =r_[tau[k], tau[k]+y_l[k].T, tau[k]]
yy =r_[x_l[k][0], x_l[k].T, x_l[k][-1]]
plt.fill_between(xx, yy, color=col)
xlim([0, npmax(xx)*1.01])
ylim([npmin(yy)*1.1, npmax(yy)*1.1])
xlabel('horizon (years)')
ylabel('return range')
plt.xticks(r_[0,tau],r_[0,tau]/252)
plt.grid(True)
title('Linear return propagation')
plt.tight_layout();
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
| [
"matplotlib.pyplot.grid",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"numpy.array",
"numpy.arange",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.style.use",
"numpy.diff",
"numpy.max",
"numpy.exp",
"numpy.min",
"matplotlib.pyplot.xticks",
"... | [((1088, 1112), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (1101, 1112), True, 'import matplotlib.pyplot as plt\n'), ((1499, 1530), 'ARPM_utils.struct_to_dict', 'struct_to_dict', (["db['StocksSPX']"], {}), "(db['StocksSPX'])\n", (1513, 1530), False, 'from ARPM_utils import save_plot, struct_to_dict\n'), ((1831, 1838), 'numpy.mean', 'mean', (['c'], {}), '(c)\n', (1835, 1838), False, 'from numpy import arange, array, zeros, std, diff, linspace, mean, exp, sqrt, r_\n'), ((1847, 1861), 'numpy.std', 'std', (['c'], {'ddof': '(1)'}), '(c, ddof=1)\n', (1850, 1861), False, 'from numpy import arange, array, zeros, std, diff, linspace, mean, exp, sqrt, r_\n'), ((1980, 1999), 'numpy.arange', 'arange', (['(63)', '(600)', '(63)'], {}), '(63, 600, 63)\n', (1986, 1999), False, 'from numpy import arange, array, zeros, std, diff, linspace, mean, exp, sqrt, r_\n'), ((2006, 2025), 'numpy.array', 'array', (['[0.01, 0.99]'], {}), '([0.01, 0.99])\n', (2011, 2025), False, 'from numpy import arange, array, zeros, std, diff, linspace, mean, exp, sqrt, r_\n'), ((2908, 2922), 'matplotlib.pyplot.subplots', 'subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (2916, 2922), False, 'from matplotlib.pyplot import plot, xlim, ylim, subplots, ylabel, xlabel, title\n'), ((2922, 2936), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[0]'], {}), '(ax[0])\n', (2929, 2936), True, 'import matplotlib.pyplot as plt\n'), ((3212, 3237), 'matplotlib.pyplot.xlabel', 'xlabel', (['"""horizon (years)"""'], {}), "('horizon (years)')\n", (3218, 3237), False, 'from matplotlib.pyplot import plot, xlim, ylim, subplots, ylabel, xlabel, title\n'), ((3238, 3260), 'matplotlib.pyplot.ylabel', 'ylabel', (['"""return range"""'], {}), "('return range')\n", (3244, 3260), False, 'from matplotlib.pyplot import plot, xlim, ylim, subplots, ylabel, xlabel, title\n'), ((3261, 3301), 'matplotlib.pyplot.xticks', 'plt.xticks', (['r_[0, tau]', '(r_[0, tau] / 252)'], {}), '(r_[0, tau], r_[0, tau] / 252)\n', (3271, 3301), True, 'import matplotlib.pyplot as plt\n'), ((3297, 3311), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3305, 3311), True, 'import matplotlib.pyplot as plt\n'), ((3312, 3350), 'matplotlib.pyplot.title', 'title', (['"""Compounded return propagation"""'], {}), "('Compounded return propagation')\n", (3317, 3350), False, 'from matplotlib.pyplot import plot, xlim, ylim, subplots, ylabel, xlabel, title\n'), ((3351, 3365), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[1]'], {}), '(ax[1])\n', (3358, 3365), True, 'import matplotlib.pyplot as plt\n'), ((3641, 3666), 'matplotlib.pyplot.xlabel', 'xlabel', (['"""horizon (years)"""'], {}), "('horizon (years)')\n", (3647, 3666), False, 'from matplotlib.pyplot import plot, xlim, ylim, subplots, ylabel, xlabel, title\n'), ((3667, 3689), 'matplotlib.pyplot.ylabel', 'ylabel', (['"""return range"""'], {}), "('return range')\n", (3673, 3689), False, 'from matplotlib.pyplot import plot, xlim, ylim, subplots, ylabel, xlabel, title\n'), ((3690, 3730), 'matplotlib.pyplot.xticks', 'plt.xticks', (['r_[0, tau]', '(r_[0, tau] / 252)'], {}), '(r_[0, tau], r_[0, tau] / 252)\n', (3700, 3730), True, 'import matplotlib.pyplot as plt\n'), ((3726, 3740), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3734, 3740), True, 'import matplotlib.pyplot as plt\n'), ((3741, 3775), 'matplotlib.pyplot.title', 'title', (['"""Linear return propagation"""'], {}), "('Linear return propagation')\n", (3746, 3775), False, 'from matplotlib.pyplot import plot, xlim, ylim, subplots, ylabel, xlabel, title\n'), ((3776, 3794), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3792, 3794), True, 'import matplotlib.pyplot as plt\n'), ((733, 771), 'os.path.abspath', 'path.abspath', (['"""../../functions-legacy"""'], {}), "('../../functions-legacy')\n", (745, 771), True, 'import os.path as path\n'), ((3113, 3148), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['xx', 'yy'], {'color': 'col'}), '(xx, yy, color=col)\n', (3129, 3148), True, 'import matplotlib.pyplot as plt\n'), ((3542, 3577), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['xx', 'yy'], {'color': 'col'}), '(xx, yy, color=col)\n', (3558, 3577), True, 'import matplotlib.pyplot as plt\n'), ((1330, 1366), 'os.path.join', 'os.path.join', (['GLOBAL_DB', '"""db_Stocks"""'], {}), "(GLOBAL_DB, 'db_Stocks')\n", (1342, 1366), False, 'import os\n'), ((2051, 2060), 'numpy.diff', 'diff', (['tau'], {}), '(tau)\n', (2055, 2060), False, 'from numpy import arange, array, zeros, std, diff, linspace, mean, exp, sqrt, r_\n'), ((2491, 2505), 'numpy.exp', 'exp', (['q_c[:, k]'], {}), '(q_c[:, k])\n', (2494, 2505), False, 'from numpy import arange, array, zeros, std, diff, linspace, mean, exp, sqrt, r_\n'), ((1428, 1467), 'os.path.join', 'os.path.join', (['TEMPORARY_DB', '"""db_Stocks"""'], {}), "(TEMPORARY_DB, 'db_Stocks')\n", (1440, 1467), False, 'import os\n'), ((2272, 2284), 'numpy.sqrt', 'sqrt', (['tau[k]'], {}), '(tau[k])\n', (2276, 2284), False, 'from numpy import arange, array, zeros, std, diff, linspace, mean, exp, sqrt, r_\n'), ((2308, 2324), 'numpy.min', 'npmin', (['q_c[:, k]'], {}), '(q_c[:, k])\n', (2313, 2324), True, 'from numpy import min as npmin, max as npmax\n'), ((2329, 2345), 'numpy.max', 'npmax', (['q_c[:, k]'], {}), '(q_c[:, k])\n', (2334, 2345), True, 'from numpy import min as npmin, max as npmax\n'), ((2400, 2412), 'numpy.sqrt', 'sqrt', (['tau[k]'], {}), '(tau[k])\n', (2404, 2412), False, 'from numpy import arange, array, zeros, std, diff, linspace, mean, exp, sqrt, r_\n'), ((2529, 2545), 'numpy.min', 'npmin', (['q_l[:, k]'], {}), '(q_l[:, k])\n', (2534, 2545), True, 'from numpy import min as npmin, max as npmax\n'), ((2550, 2566), 'numpy.max', 'npmax', (['q_l[:, k]'], {}), '(q_l[:, k])\n', (2555, 2566), True, 'from numpy import min as npmin, max as npmax\n'), ((2617, 2629), 'numpy.sqrt', 'sqrt', (['tau[k]'], {}), '(tau[k])\n', (2621, 2629), False, 'from numpy import arange, array, zeros, std, diff, linspace, mean, exp, sqrt, r_\n'), ((2637, 2653), 'numpy.exp', 'exp', (['(mu * tau[k])'], {}), '(mu * tau[k])\n', (2640, 2653), False, 'from numpy import arange, array, zeros, std, diff, linspace, mean, exp, sqrt, r_\n'), ((3158, 3167), 'numpy.max', 'npmax', (['xx'], {}), '(xx)\n', (3163, 3167), True, 'from numpy import min as npmin, max as npmax\n'), ((3181, 3190), 'numpy.min', 'npmin', (['yy'], {}), '(yy)\n', (3186, 3190), True, 'from numpy import min as npmin, max as npmax\n'), ((3196, 3205), 'numpy.max', 'npmax', (['yy'], {}), '(yy)\n', (3201, 3205), True, 'from numpy import min as npmin, max as npmax\n'), ((3587, 3596), 'numpy.max', 'npmax', (['xx'], {}), '(xx)\n', (3592, 3596), True, 'from numpy import min as npmin, max as npmax\n'), ((3610, 3619), 'numpy.min', 'npmin', (['yy'], {}), '(yy)\n', (3615, 3619), True, 'from numpy import min as npmin, max as npmax\n'), ((3625, 3634), 'numpy.max', 'npmax', (['yy'], {}), '(yy)\n', (3630, 3634), True, 'from numpy import min as npmin, max as npmax\n'), ((2962, 2975), 'numpy.zeros', 'zeros', (['(2, 1)'], {}), '((2, 1))\n', (2967, 2975), False, 'from numpy import arange, array, zeros, std, diff, linspace, mean, exp, sqrt, r_\n'), ((3391, 3404), 'numpy.zeros', 'zeros', (['(2, 1)'], {}), '((2, 1))\n', (3396, 3404), False, 'from numpy import arange, array, zeros, std, diff, linspace, mean, exp, sqrt, r_\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import threading
from pathlib import Path
import numpy as np
import torch
def fasta_file_path(prefix_path):
return prefix_path + ".fasta"
class FastaDataset(torch.utils.data.Dataset):
"""
For loading protein sequence datasets in the common FASTA data format
"""
def __init__(self, path: str, cache_indices=False):
self.fn = fasta_file_path(path)
self.threadlocal = threading.local()
self.cache = Path(f"{path}.fasta.idx.npy")
if cache_indices:
if self.cache.exists():
self.offsets, self.sizes = np.load(self.cache)
else:
self.offsets, self.sizes = self._build_index(path)
np.save(self.cache, np.stack([self.offsets, self.sizes]))
else:
self.offsets, self.sizes = self._build_index(path)
def _get_file(self):
if not hasattr(self.threadlocal, "f"):
self.threadlocal.f = open(self.fn, "r")
return self.threadlocal.f
def __getitem__(self, idx):
f = self._get_file()
f.seek(self.offsets[idx])
desc = f.readline().strip()
line = f.readline()
seq = ""
while line != "" and line[0] != ">":
seq += line.strip()
line = f.readline()
return desc, seq
def __len__(self):
return self.offsets.size
def _build_index(self, path: str):
# Use grep and awk to get 100M/s on local SSD.
# Should process your enormous 100G fasta in ~10 min single core...
path = fasta_file_path(path)
bytes_offsets = subprocess.check_output(
f"cat {path} | tqdm --bytes --total $(wc -c < {path})"
"| grep --byte-offset '^>' -o | cut -d: -f1",
shell=True,
)
fasta_lengths = subprocess.check_output(
f"cat {path} | tqdm --bytes --total $(wc -c < {path})"
"| awk '/^>/ {print \"\";next;} { printf(\"%s\",$0);}' | tail -n+2 | awk '{print length($1)}'",
shell=True,
)
bytes_np = np.fromstring(bytes_offsets, dtype=np.int64, sep=" ")
sizes_np = np.fromstring(fasta_lengths, dtype=np.int64, sep=" ")
return bytes_np, sizes_np
def __setstate__(self, state):
self.__dict__ = state
self.threadlocal = threading.local()
def __getstate__(self):
d = {}
for i, v in self.__dict__.items():
if i != "threadlocal":
d[i] = v
return d
def __del__(self):
if hasattr(self.threadlocal, "f"):
self.threadlocal.f.close()
del self.threadlocal.f
@staticmethod
def exists(path):
return os.path.exists(fasta_file_path(path))
class EncodedFastaDataset(FastaDataset):
"""
The FastaDataset returns raw sequences - this allows us to return
indices with a dictionary instead.
"""
def __init__(self, path, dictionary):
super().__init__(path, cache_indices=True)
self.dictionary = dictionary
def __getitem__(self, idx):
desc, seq = super().__getitem__(idx)
return self.dictionary.encode_line(seq, line_tokenizer=list).long()
| [
"subprocess.check_output",
"threading.local",
"pathlib.Path",
"numpy.fromstring",
"numpy.stack",
"numpy.load"
] | [((638, 655), 'threading.local', 'threading.local', ([], {}), '()\n', (653, 655), False, 'import threading\n'), ((678, 707), 'pathlib.Path', 'Path', (['f"""{path}.fasta.idx.npy"""'], {}), "(f'{path}.fasta.idx.npy')\n", (682, 707), False, 'from pathlib import Path\n'), ((1861, 2004), 'subprocess.check_output', 'subprocess.check_output', (['f"""cat {path} | tqdm --bytes --total $(wc -c < {path})| grep --byte-offset \'^>\' -o | cut -d: -f1"""'], {'shell': '(True)'}), '(\n f"cat {path} | tqdm --bytes --total $(wc -c < {path})| grep --byte-offset \'^>\' -o | cut -d: -f1"\n , shell=True)\n', (1884, 2004), False, 'import subprocess\n'), ((2074, 2273), 'subprocess.check_output', 'subprocess.check_output', (['f"""cat {path} | tqdm --bytes --total $(wc -c < {path})| awk \'/^>/ {{print "";next;}} {{ printf("%s",$0);}}\' | tail -n+2 | awk \'{{print length($1)}}\'"""'], {'shell': '(True)'}), '(\n f\'cat {path} | tqdm --bytes --total $(wc -c < {path})| awk \\\'/^>/ {{print "";next;}} {{ printf("%s",$0);}}\\\' | tail -n+2 | awk \\\'{{print length($1)}}\\\'\'\n , shell=True)\n', (2097, 2273), False, 'import subprocess\n'), ((2332, 2385), 'numpy.fromstring', 'np.fromstring', (['bytes_offsets'], {'dtype': 'np.int64', 'sep': '""" """'}), "(bytes_offsets, dtype=np.int64, sep=' ')\n", (2345, 2385), True, 'import numpy as np\n'), ((2406, 2459), 'numpy.fromstring', 'np.fromstring', (['fasta_lengths'], {'dtype': 'np.int64', 'sep': '""" """'}), "(fasta_lengths, dtype=np.int64, sep=' ')\n", (2419, 2459), True, 'import numpy as np\n'), ((2592, 2609), 'threading.local', 'threading.local', ([], {}), '()\n', (2607, 2609), False, 'import threading\n'), ((816, 835), 'numpy.load', 'np.load', (['self.cache'], {}), '(self.cache)\n', (823, 835), True, 'import numpy as np\n'), ((960, 996), 'numpy.stack', 'np.stack', (['[self.offsets, self.sizes]'], {}), '([self.offsets, self.sizes])\n', (968, 996), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from litNlp.predict import SA_Model_Predict
import numpy as np
# 加载模型的字典项
tokenize_path = 'model/tokenizer.pickle'
# train_method : 模型训练方式,默认 textcnn ,可选:bilstm , gru
train_method = 'textcnn'
# 模型的保存位置,后续用于推理
sa_model_path_m = 'model/{}.h5'.format(train_method)
# 开始输入待测样例
predict_text = ['这个我不喜欢', '这个我喜欢不']
# 加载模型
model = SA_Model_Predict(tokenize_path, sa_model_path_m, max_len=100)
# 开始推理
sa_score = model.predict(predict_text)
# 情感极性概率
print(np.asarray(sa_score)[:,1])
# 情感label输出
print(np.argmax(np.asarray(sa_score), axis=1)) | [
"litNlp.predict.SA_Model_Predict",
"numpy.asarray"
] | [((352, 413), 'litNlp.predict.SA_Model_Predict', 'SA_Model_Predict', (['tokenize_path', 'sa_model_path_m'], {'max_len': '(100)'}), '(tokenize_path, sa_model_path_m, max_len=100)\n', (368, 413), False, 'from litNlp.predict import SA_Model_Predict\n'), ((475, 495), 'numpy.asarray', 'np.asarray', (['sa_score'], {}), '(sa_score)\n', (485, 495), True, 'import numpy as np\n'), ((530, 550), 'numpy.asarray', 'np.asarray', (['sa_score'], {}), '(sa_score)\n', (540, 550), True, 'import numpy as np\n')] |
import os
import glob
import random
import numpy as np
import torchaudio as T
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
def create_dataloader(params, train, is_distributed=False):
dataset = AudioDataset(params, train)
return DataLoader(
dataset=dataset,
batch_size=params.batch_size,
shuffle=not is_distributed,
sampler=DistributedSampler(dataset) if is_distributed else None,
num_workers=0,
pin_memory=True,
drop_last=True,
)
class AudioDataset(Dataset):
def __init__(self, params, train):
self.params = params
self.train = train
self.path = params.path
self.wav_list = glob.glob(
os.path.join(self.path, "**", "*.wav"), recursive=True
)
self.mapping = [i for i in range(len(self.wav_list))]
self.downsample = T.transforms.Resample(
params.new_sample_rate,
params.sample_rate,
resampling_method="sinc_interpolation",
)
def __len__(self):
return len(self.wav_list)
def __getitem__(self, idx):
return self.my_getitem(idx)
def shuffle_mapping(self):
random.shuffle(self.mapping)
def my_getitem(self, idx):
wavpath = self.wav_list[idx]
id = os.path.basename(wavpath).split(".")[0]
audio, sr = T.load_wav(wavpath)
if self.params.new_sample_rate != sr:
raise ValueError(f"Invalid sample rate {sr}.")
start = np.random.randint(0, audio.shape[1] - self.params.n_segment - 1)
if audio.shape[0] == 2:
audio = audio[0, :]
audio = audio.squeeze(0)[start : start + self.params.n_segment]
lr_audio = self.downsample(audio)
lr_audio = lr_audio / 32767.5
audio = audio / 32767.5
return {"audio": audio, "lr_audio": lr_audio, "id": id}
| [
"random.shuffle",
"os.path.join",
"torchaudio.transforms.Resample",
"numpy.random.randint",
"torch.utils.data.distributed.DistributedSampler",
"os.path.basename",
"torchaudio.load_wav"
] | [((924, 1033), 'torchaudio.transforms.Resample', 'T.transforms.Resample', (['params.new_sample_rate', 'params.sample_rate'], {'resampling_method': '"""sinc_interpolation"""'}), "(params.new_sample_rate, params.sample_rate,\n resampling_method='sinc_interpolation')\n", (945, 1033), True, 'import torchaudio as T\n'), ((1244, 1272), 'random.shuffle', 'random.shuffle', (['self.mapping'], {}), '(self.mapping)\n', (1258, 1272), False, 'import random\n'), ((1415, 1434), 'torchaudio.load_wav', 'T.load_wav', (['wavpath'], {}), '(wavpath)\n', (1425, 1434), True, 'import torchaudio as T\n'), ((1557, 1621), 'numpy.random.randint', 'np.random.randint', (['(0)', '(audio.shape[1] - self.params.n_segment - 1)'], {}), '(0, audio.shape[1] - self.params.n_segment - 1)\n', (1574, 1621), True, 'import numpy as np\n'), ((770, 808), 'os.path.join', 'os.path.join', (['self.path', '"""**"""', '"""*.wav"""'], {}), "(self.path, '**', '*.wav')\n", (782, 808), False, 'import os\n'), ((430, 457), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['dataset'], {}), '(dataset)\n', (448, 457), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((1355, 1380), 'os.path.basename', 'os.path.basename', (['wavpath'], {}), '(wavpath)\n', (1371, 1380), False, 'import os\n')] |
import os
import numpy as np
import torch
from config.adacrowd import cfg
from datasets.adacrowd.WE.loading_data import loading_data
from datasets.adacrowd.WE.setting import cfg_data
from trainer_adacrowd import Trainer_AdaCrowd
seed = cfg.SEED
if seed is not None:
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
gpus = cfg.GPU_ID
if len(gpus) == 1:
torch.cuda.set_device(gpus[0])
torch.backends.cudnn.benchmark = True
data_mode = cfg.DATASET
net = cfg.NET
print("Net: {}".format(net))
assert net in ['CSRNet_GBN', 'Res101_GBN',
'Res101_SFCN_GBN'], "Invalid network"
pwd = os.path.split(os.path.realpath(__file__))[0]
cc_trainer = Trainer_AdaCrowd(loading_data, cfg_data, pwd)
cc_trainer.forward()
| [
"torch.manual_seed",
"os.path.realpath",
"trainer_adacrowd.Trainer_AdaCrowd",
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.cuda.set_device"
] | [((698, 743), 'trainer_adacrowd.Trainer_AdaCrowd', 'Trainer_AdaCrowd', (['loading_data', 'cfg_data', 'pwd'], {}), '(loading_data, cfg_data, pwd)\n', (714, 743), False, 'from trainer_adacrowd import Trainer_AdaCrowd\n'), ((273, 293), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (287, 293), True, 'import numpy as np\n'), ((298, 321), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (315, 321), False, 'import torch\n'), ((326, 354), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (348, 354), False, 'import torch\n'), ((397, 427), 'torch.cuda.set_device', 'torch.cuda.set_device', (['gpus[0]'], {}), '(gpus[0])\n', (418, 427), False, 'import torch\n'), ((654, 680), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (670, 680), False, 'import os\n')] |
"""
Functions to test if two floats are equal to within relative and absolute
tolerances. This dynamically chooses a cython implementation if available.
"""
from debtcollector import removals
from numpy import allclose as _allclose, isinf
from dit import ditParams
__all__ = (
'close',
'allclose',
)
@removals.remove(message="Use numpy.isclose instead",
version='1.0.2')
def close__cython(x, y, rtol=None, atol=None): # pylint: disable=missing-docstring
if rtol is None:
rtol = ditParams['rtol']
if atol is None:
atol = ditParams['atol']
return close_(x, y, rtol, atol)
@removals.remove(message="Use numpy.isclose instead",
version='1.0.2')
def close__python(x, y, rtol=None, atol=None): # pylint: disable=missing-docstring
if rtol is None:
rtol = ditParams['rtol']
if atol is None:
atol = ditParams['atol']
# Make sure they are both inf or non-inf
xinf = isinf(x)
yinf = isinf(y)
if not xinf == yinf:
return False
if xinf:
# If they are inf, make sure the signs are the same.
xgz = x > 0
ygz = y > 0
if (xgz and not ygz) or (not xgz and ygz):
return False
else:
# Otherwise, make sure they are close.
return abs(x - y) <= atol + rtol * abs(y)
return True
close_docstring = \
"""Returns True if the scalars x and y are close.
The relative error rtol must be positive and << 1.0
The absolute error atol usually comes into play when y is very small or
zero; it says how small x must be also.
If rtol or atol are unspecified, they are taken from ditParams['rtol']
and ditParams['atol'].
"""
cython_doc = "\nNote: This version is cythonified.\n"
close__python.__doc__ = close_docstring
close__cython.__doc__ = close_docstring + cython_doc
# Load the cython function if possible
try:
from ._close import close as close_
close = close__cython
except ImportError:
close = close__python
@removals.remove(message="Use numpy.allclose instead",
version='1.0.2')
def allclose(x, y, rtol=None, atol=None):
"""Returns True if all components of x and y are close.
The relative error rtol must be positive and << 1.0
The absolute error atol usually comes into play for those elements of y that
are very small or zero; it says how small x must be also.
If rtol or atol are unspecified, they are taken from ditParams['rtol']
and ditParams['atol'].
"""
if rtol is None:
rtol = ditParams['rtol']
if atol is None:
atol = ditParams['atol']
return _allclose(x, y, rtol=rtol, atol=atol)
| [
"debtcollector.removals.remove",
"numpy.isinf",
"numpy.allclose"
] | [((315, 384), 'debtcollector.removals.remove', 'removals.remove', ([], {'message': '"""Use numpy.isclose instead"""', 'version': '"""1.0.2"""'}), "(message='Use numpy.isclose instead', version='1.0.2')\n", (330, 384), False, 'from debtcollector import removals\n'), ((633, 702), 'debtcollector.removals.remove', 'removals.remove', ([], {'message': '"""Use numpy.isclose instead"""', 'version': '"""1.0.2"""'}), "(message='Use numpy.isclose instead', version='1.0.2')\n", (648, 702), False, 'from debtcollector import removals\n'), ((2006, 2076), 'debtcollector.removals.remove', 'removals.remove', ([], {'message': '"""Use numpy.allclose instead"""', 'version': '"""1.0.2"""'}), "(message='Use numpy.allclose instead', version='1.0.2')\n", (2021, 2076), False, 'from debtcollector import removals\n'), ((969, 977), 'numpy.isinf', 'isinf', (['x'], {}), '(x)\n', (974, 977), False, 'from numpy import allclose as _allclose, isinf\n'), ((989, 997), 'numpy.isinf', 'isinf', (['y'], {}), '(y)\n', (994, 997), False, 'from numpy import allclose as _allclose, isinf\n'), ((2628, 2665), 'numpy.allclose', '_allclose', (['x', 'y'], {'rtol': 'rtol', 'atol': 'atol'}), '(x, y, rtol=rtol, atol=atol)\n', (2637, 2665), True, 'from numpy import allclose as _allclose, isinf\n')] |
import pandas as pd
from Stock import get_stock_data_2min_56days
from Twitter_CEO import get_CEOs_twitter_posts
from Twitter_Company import get_company_twitter_posts
from Analysis import find_stock_movement
from statistical_tests import contingency_table_company, contingency_table_ceo, run_chisquared_company, run_chisquared_ceo
# Parameters
TIME_BEFORE_TWEET = 2 # in minutes
TIME_AFTER_TWEET = 4 # in minutes
SENSITIVITY = 0.0004 # threshold for significant price movement
# Load list of companies
companies = pd.read_csv("assets/twitter_accounts.csv")
symbols = companies["symbol"].tolist()
# Fetch stock data
stock_prices = get_stock_data_2min_56days(symbols)
# Fetch Twitter CEO Posts
CEO_tweets = get_CEOs_twitter_posts(companies)
# Fetch Twitter Company Posts
company_tweets = get_company_twitter_posts(companies)
# Add Stock Price Movement in new column
CEO_tweets['Movement'] = CEO_tweets.apply(lambda x: find_stock_movement(
x['Symbol'], x['Datetime'], time_after_tweet=TIME_AFTER_TWEET, time_before_tweet=TIME_BEFORE_TWEET, sensitivity=SENSITIVITY, df=stock_prices), axis=1)
company_tweets['Movement'] = company_tweets.apply(lambda x: find_stock_movement(
x['Symbol'], x['Datetime'], time_after_tweet=TIME_AFTER_TWEET, time_before_tweet=TIME_BEFORE_TWEET, sensitivity=SENSITIVITY, df=stock_prices), axis=1)
CEO_tweets.to_csv("output/twitter_sentiment_ceos.csv")
company_tweets.to_csv("output/twitter_sentiment_companies.csv")
print(CEO_tweets)
print(company_tweets)
# Statistical tests Company Posts & CEO Posts
sentiment_company = pd.read_csv("output/twitter_sentiment_companies.csv")
sentiment_ceo = pd.read_csv("output/twitter_sentiment_ceos.csv")
contingency_table_company(sentiment_company)
contingency_table_ceo(sentiment_ceo)
run_chisquared_company(sentiment_company)
run_chisquared_ceo(sentiment_ceo)
# Cramer's V companies
import numpy as np
data = np.array([[55,75,67], [238,237,227], [468,638,461]])
chi2 = 11.41
n = np.sum(data)
minDim = min(data.shape)-1
V = np.sqrt((chi2/n) / minDim)
print(V)
# Cramer's V CEOs
import numpy as np
data = np.array([[5,2,6], [9,6,19], [77,96,98]])
chi2 = 7.89
n = np.sum(data)
minDim = min(data.shape)-1
V = np.sqrt((chi2/n) / minDim)
print(V) | [
"Analysis.find_stock_movement",
"statistical_tests.contingency_table_company",
"statistical_tests.contingency_table_ceo",
"statistical_tests.run_chisquared_company",
"numpy.sqrt",
"pandas.read_csv",
"statistical_tests.run_chisquared_ceo",
"numpy.array",
"numpy.sum",
"Twitter_Company.get_company_tw... | [((519, 561), 'pandas.read_csv', 'pd.read_csv', (['"""assets/twitter_accounts.csv"""'], {}), "('assets/twitter_accounts.csv')\n", (530, 561), True, 'import pandas as pd\n'), ((636, 671), 'Stock.get_stock_data_2min_56days', 'get_stock_data_2min_56days', (['symbols'], {}), '(symbols)\n', (662, 671), False, 'from Stock import get_stock_data_2min_56days\n'), ((712, 745), 'Twitter_CEO.get_CEOs_twitter_posts', 'get_CEOs_twitter_posts', (['companies'], {}), '(companies)\n', (734, 745), False, 'from Twitter_CEO import get_CEOs_twitter_posts\n'), ((793, 829), 'Twitter_Company.get_company_twitter_posts', 'get_company_twitter_posts', (['companies'], {}), '(companies)\n', (818, 829), False, 'from Twitter_Company import get_company_twitter_posts\n'), ((1565, 1618), 'pandas.read_csv', 'pd.read_csv', (['"""output/twitter_sentiment_companies.csv"""'], {}), "('output/twitter_sentiment_companies.csv')\n", (1576, 1618), True, 'import pandas as pd\n'), ((1635, 1683), 'pandas.read_csv', 'pd.read_csv', (['"""output/twitter_sentiment_ceos.csv"""'], {}), "('output/twitter_sentiment_ceos.csv')\n", (1646, 1683), True, 'import pandas as pd\n'), ((1685, 1729), 'statistical_tests.contingency_table_company', 'contingency_table_company', (['sentiment_company'], {}), '(sentiment_company)\n', (1710, 1729), False, 'from statistical_tests import contingency_table_company, contingency_table_ceo, run_chisquared_company, run_chisquared_ceo\n'), ((1730, 1766), 'statistical_tests.contingency_table_ceo', 'contingency_table_ceo', (['sentiment_ceo'], {}), '(sentiment_ceo)\n', (1751, 1766), False, 'from statistical_tests import contingency_table_company, contingency_table_ceo, run_chisquared_company, run_chisquared_ceo\n'), ((1767, 1808), 'statistical_tests.run_chisquared_company', 'run_chisquared_company', (['sentiment_company'], {}), '(sentiment_company)\n', (1789, 1808), False, 'from statistical_tests import contingency_table_company, contingency_table_ceo, run_chisquared_company, run_chisquared_ceo\n'), ((1809, 1842), 'statistical_tests.run_chisquared_ceo', 'run_chisquared_ceo', (['sentiment_ceo'], {}), '(sentiment_ceo)\n', (1827, 1842), False, 'from statistical_tests import contingency_table_company, contingency_table_ceo, run_chisquared_company, run_chisquared_ceo\n'), ((1894, 1952), 'numpy.array', 'np.array', (['[[55, 75, 67], [238, 237, 227], [468, 638, 461]]'], {}), '([[55, 75, 67], [238, 237, 227], [468, 638, 461]])\n', (1902, 1952), True, 'import numpy as np\n'), ((1964, 1976), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (1970, 1976), True, 'import numpy as np\n'), ((2008, 2034), 'numpy.sqrt', 'np.sqrt', (['(chi2 / n / minDim)'], {}), '(chi2 / n / minDim)\n', (2015, 2034), True, 'import numpy as np\n'), ((2089, 2136), 'numpy.array', 'np.array', (['[[5, 2, 6], [9, 6, 19], [77, 96, 98]]'], {}), '([[5, 2, 6], [9, 6, 19], [77, 96, 98]])\n', (2097, 2136), True, 'import numpy as np\n'), ((2147, 2159), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (2153, 2159), True, 'import numpy as np\n'), ((2191, 2217), 'numpy.sqrt', 'np.sqrt', (['(chi2 / n / minDim)'], {}), '(chi2 / n / minDim)\n', (2198, 2217), True, 'import numpy as np\n'), ((924, 1095), 'Analysis.find_stock_movement', 'find_stock_movement', (["x['Symbol']", "x['Datetime']"], {'time_after_tweet': 'TIME_AFTER_TWEET', 'time_before_tweet': 'TIME_BEFORE_TWEET', 'sensitivity': 'SENSITIVITY', 'df': 'stock_prices'}), "(x['Symbol'], x['Datetime'], time_after_tweet=\n TIME_AFTER_TWEET, time_before_tweet=TIME_BEFORE_TWEET, sensitivity=\n SENSITIVITY, df=stock_prices)\n", (943, 1095), False, 'from Analysis import find_stock_movement\n'), ((1160, 1331), 'Analysis.find_stock_movement', 'find_stock_movement', (["x['Symbol']", "x['Datetime']"], {'time_after_tweet': 'TIME_AFTER_TWEET', 'time_before_tweet': 'TIME_BEFORE_TWEET', 'sensitivity': 'SENSITIVITY', 'df': 'stock_prices'}), "(x['Symbol'], x['Datetime'], time_after_tweet=\n TIME_AFTER_TWEET, time_before_tweet=TIME_BEFORE_TWEET, sensitivity=\n SENSITIVITY, df=stock_prices)\n", (1179, 1331), False, 'from Analysis import find_stock_movement\n')] |
import numpy as np
import pandas as pd
import simpy
from sim_utils.audit import Audit
from sim_utils.data import Data
from sim_utils.patient import Patient
import warnings
warnings.filterwarnings("ignore")
class Model(object):
def __init__(self, scenario):
"""
"""
self.env = simpy.Environment()
self.params = scenario
self.data = Data(self.params)
self.audit = Audit()
self.patients = []
self.patient_id_count = 0
# Set up 1D NumPy array for patient counts per unit
number_hospitals = self.data.units.shape[0]
self.unit_occupancy = np.zeros(number_hospitals)
self.unit_admissions = np.zeros(number_hospitals)
# Count displaced patients
self.unit_occupancy_displaced_preferred = np.zeros(number_hospitals)
self.unit_occupancy_displaced_destination = np.zeros(number_hospitals)
self.unit_occupancy_waiting_preferred = np.zeros(number_hospitals)
# Set up tracker dictionary (total patients updated after warmup)
self.tracker = {
'total_patients': 0,
'total_patients_asu': 0,
'total_patients_waited': 0,
'total_patients_displaced': 0,
'current_patients': 0,
'current_asu_patients_all': 0,
'current_asu_patients_allocated': 0,
'current_asu_patients_unallocated': 0,
'current_asu_patients_displaced': 0,
'patient_waiting_time': []
}
def assign_asu_los(self, patient):
"""Assign length of stay based on assigned ASU unit"""
los_mean = self.data.units.iloc[patient.assigned_asu_index]['los_mean']
los_sd = los_mean * self.params.los_cv
los = max(np.random.normal(los_mean, los_sd), 0.01)
patient.los_asu = los
return
def end_run_routine(self):
"""
Data handling at end of run
"""
self.global_audit = pd.DataFrame(self.audit.global_audit)
self.occupancy_audit = pd.DataFrame(
self.audit.audit_unit_occupancy, columns=self.data.units_name)
self.admissions_by_unit = pd.Series(
self.unit_admissions, index=self.data.units_name)
self.occupancy_percent_audit = pd.DataFrame(
self.audit.audit_unit_occupancy_percent,
columns=self.data.units_name)
self.unit_occupancy_displaced_preferred_audit = pd.DataFrame(
self.audit.audit_unit_occupancy_displaced_preferred,
columns=self.data.units_name)
self.unit_occupancy_displaced_destination_audit = pd.DataFrame(
self.audit.audit_unit_occupancy_displaced_destination,
columns=self.data.units_name)
self.unit_occupancy_waiting_preferred_audit = pd.DataFrame(
self.audit.audit_unit_occupancy_waiting_preferred,
columns=self.data.units_name)
if len(self.tracker['patient_waiting_time']) > 0:
self.average_wait_time_all = (np.sum(
self.tracker['patient_waiting_time']) /
self.tracker['total_patients_asu'])
else:
self.average_wait_time_all = 0
if len(self.tracker['patient_waiting_time']) > 0:
self.average_wait_time_waiters = np.mean(
self.tracker['patient_waiting_time'])
else:
self.average_wait_time_waiters = 0
if len(self.tracker['patient_waiting_time']) > 0:
self.maximum_wait_time = np.max(
self.tracker['patient_waiting_time'])
else: self.maximum_wait_time = 0
def generate_patient_arrival(self):
"""SimPy process. Generate patients. Assign unit and length of stay.
Pass patient to hospital bed allocation"""
# Continuous loop of patient arrivals
while True:
# Put patient attributes in a dictionary, and pass to Patient object
patient_dict = dict()
self.patient_id_count += 1
if self.env.now >= self.params.sim_warmup:
self.tracker['total_patients'] += 1
patient_dict['id'] = self.patient_id_count
patient_dict['lsoa_index'] = np.random.choice(
range(self.data.lsoa_count), p=self.data.admission_probs)
patient_dict['lsoa'] = \
self.data.lsoa_list[patient_dict['lsoa_index']]
patient_dict['pref_unit_postcode'] = (
self.data.pref_unit.loc[patient_dict['lsoa']][
'Preferred_unit_postcode'])
patient_dict['pref_unit_name'] = (
self.data.pref_unit.loc[patient_dict['lsoa']]
['Preferred_unit_name'])
patient_dict['pref_unit_index'] = (
self.data.units.loc[patient_dict['pref_unit_name']]['Index'])
patient_dict['patient_region'] = (
self.data.units.loc[patient_dict['pref_unit_name']]['region'])
patient = Patient(patient_dict, self.params)
self.patients.append(patient)
# Pass patient to patient journey
process = self.patient_journey(patient)
self.env.process(process)
# Wait for next patient arrival
time_to_next = (self.data.interarrival_interval /
self.params.scale_admissions)
yield self.env.timeout(time_to_next)
# Return to top of while loop
def patient_journey(self, patient):
"""
Patient journey:
1) Check if acute care is needed
2) Acute care (find appropriate place)
3) ESD
"""
self.tracker['current_patients'] += 1
patient.time_in = self.env.now
if patient.use_asu:
if self.env.now >= self.params.sim_warmup:
self.tracker['total_patients_asu'] += 1
self.tracker['current_asu_patients_all'] += 1
self.tracker['current_asu_patients_unallocated'] += 1
self.unit_occupancy_waiting_preferred[patient.pref_unit_index] += 1
# Look to allocate patient to unit
unallocated = True
while unallocated:
# Check if preferred unit has capacity
capacity = self.data.units_capacity[patient.pref_unit_index]
occupied_beds = self.unit_occupancy[patient.pref_unit_index]
spare_beds = capacity - occupied_beds
if spare_beds > 0:
# Spare capacity in preferred unit
self.unit_occupancy[patient.pref_unit_index] += 1
if self.env.now >= self.params.sim_warmup:
self.unit_admissions[patient.pref_unit_index] += 1
patient.assigned_asu_index = patient.pref_unit_index
patient.assigned_asu_postcode = \
self.data.units_postcode[patient.pref_unit_index]
patient.assigned_asu_name = \
self.data.units_name[patient.pref_unit_index]
patient.waiting_for_asu = False
patient.displaced = False
unallocated = False
# Check other units if allowed
elif self.params.allow_non_preferred_asu:
choice_order = list(
self.data.units_index_by_time.loc[patient.lsoa])
for unit in choice_order:
# Check if limited to regions and unit in region
if (self.params.restrict_non_preferred_to_regions and
self.data.unit_region[unit] != patient.patient_region):
# Skip remainder of loop if unit not in patient region
continue
# Check unit is allowed for pool use
if self.data.allow_pool_use[unit] == 0:
continue
# Calculate number of spare beds
capacity = self.data.units_capacity[unit]
occupied_beds = self.unit_occupancy[unit]
spare_beds = capacity - occupied_beds
if spare_beds > 0:
# Bed available in alternative unit
self.unit_occupancy[unit] += 1
patient.assigned_asu_index = unit
patient.assigned_asu_postcode = \
self.data.units_postcode[unit]
patient.assigned_asu_name = \
self.data.units_name[unit]
patient.waiting_for_asu = False
patient.displaced = True
unallocated = False
self.unit_occupancy_displaced_preferred[
patient.pref_unit_index] += 1
self.unit_occupancy_displaced_destination[unit] += 1
if self.env.now >= self.params.sim_warmup:
self.tracker['total_patients_displaced'] += 1
self.unit_admissions[unit] += 1
# End loop
break
# Wait for 0.25 day before searching again (if necessary)
if unallocated:
yield self.env.timeout(0.25)
# Unit allocated; adjust trackers and patient values
self.tracker['current_asu_patients_allocated'] += 1
self.tracker['current_asu_patients_unallocated'] -= 1
if patient.displaced:
self.tracker['current_asu_patients_displaced'] += 1
self.unit_occupancy_waiting_preferred[patient.pref_unit_index] -= 1
patient.time_asu_allocated = self.env.now
patient.time_waiting_for_asu = \
patient.time_asu_allocated - patient.time_in
patient.waited_for_asu = \
True if patient.time_waiting_for_asu > 0 else False
if patient.waited_for_asu:
if self.env.now >= self.params.sim_warmup:
self.tracker['total_patients_waited'] += 1
self.tracker['patient_waiting_time'].append(
patient.time_waiting_for_asu)
# Stay in ASU
self.assign_asu_los(patient)
yield self.env.timeout(patient.los_asu)
# End of ASU; adjust trackers
self.tracker['current_asu_patients_all'] -= 1
self.tracker['current_asu_patients_allocated'] -= 1
self.unit_occupancy[patient.assigned_asu_index] -= 1
if patient.displaced:
self.unit_occupancy_displaced_preferred[
patient.pref_unit_index] -= 1
self.unit_occupancy_displaced_destination[
patient.assigned_asu_index] -= 1
self.tracker['current_asu_patients_displaced'] -= 1
# TODO Add ESD?
# End of patient journey
self.tracker['current_patients'] -= 1
self.patients.remove(patient)
del patient
def run(self):
# Initialise processes that will run on model run.
self.env.process(self.generate_patient_arrival())
self.env.process(self.audit.perform_global_audit(self))
# Run
run_duration = self.params.sim_warmup + self.params.sim_duration
self.env.run(until=run_duration)
# End of run
self.end_run_routine()
| [
"pandas.Series",
"numpy.random.normal",
"numpy.mean",
"sim_utils.patient.Patient",
"simpy.Environment",
"numpy.max",
"numpy.sum",
"numpy.zeros",
"sim_utils.audit.Audit",
"pandas.DataFrame",
"sim_utils.data.Data",
"warnings.filterwarnings"
] | [((175, 208), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (198, 208), False, 'import warnings\n'), ((312, 331), 'simpy.Environment', 'simpy.Environment', ([], {}), '()\n', (329, 331), False, 'import simpy\n'), ((383, 400), 'sim_utils.data.Data', 'Data', (['self.params'], {}), '(self.params)\n', (387, 400), False, 'from sim_utils.data import Data\n'), ((422, 429), 'sim_utils.audit.Audit', 'Audit', ([], {}), '()\n', (427, 429), False, 'from sim_utils.audit import Audit\n'), ((634, 660), 'numpy.zeros', 'np.zeros', (['number_hospitals'], {}), '(number_hospitals)\n', (642, 660), True, 'import numpy as np\n'), ((692, 718), 'numpy.zeros', 'np.zeros', (['number_hospitals'], {}), '(number_hospitals)\n', (700, 718), True, 'import numpy as np\n'), ((805, 831), 'numpy.zeros', 'np.zeros', (['number_hospitals'], {}), '(number_hospitals)\n', (813, 831), True, 'import numpy as np\n'), ((884, 910), 'numpy.zeros', 'np.zeros', (['number_hospitals'], {}), '(number_hospitals)\n', (892, 910), True, 'import numpy as np\n'), ((959, 985), 'numpy.zeros', 'np.zeros', (['number_hospitals'], {}), '(number_hospitals)\n', (967, 985), True, 'import numpy as np\n'), ((1972, 2009), 'pandas.DataFrame', 'pd.DataFrame', (['self.audit.global_audit'], {}), '(self.audit.global_audit)\n', (1984, 2009), True, 'import pandas as pd\n'), ((2042, 2117), 'pandas.DataFrame', 'pd.DataFrame', (['self.audit.audit_unit_occupancy'], {'columns': 'self.data.units_name'}), '(self.audit.audit_unit_occupancy, columns=self.data.units_name)\n', (2054, 2117), True, 'import pandas as pd\n'), ((2166, 2225), 'pandas.Series', 'pd.Series', (['self.unit_admissions'], {'index': 'self.data.units_name'}), '(self.unit_admissions, index=self.data.units_name)\n', (2175, 2225), True, 'import pandas as pd\n'), ((2279, 2367), 'pandas.DataFrame', 'pd.DataFrame', (['self.audit.audit_unit_occupancy_percent'], {'columns': 'self.data.units_name'}), '(self.audit.audit_unit_occupancy_percent, columns=self.data.\n units_name)\n', (2291, 2367), True, 'import pandas as pd\n'), ((2446, 2546), 'pandas.DataFrame', 'pd.DataFrame', (['self.audit.audit_unit_occupancy_displaced_preferred'], {'columns': 'self.data.units_name'}), '(self.audit.audit_unit_occupancy_displaced_preferred, columns=\n self.data.units_name)\n', (2458, 2546), True, 'import pandas as pd\n'), ((2626, 2728), 'pandas.DataFrame', 'pd.DataFrame', (['self.audit.audit_unit_occupancy_displaced_destination'], {'columns': 'self.data.units_name'}), '(self.audit.audit_unit_occupancy_displaced_destination, columns\n =self.data.units_name)\n', (2638, 2728), True, 'import pandas as pd\n'), ((2804, 2902), 'pandas.DataFrame', 'pd.DataFrame', (['self.audit.audit_unit_occupancy_waiting_preferred'], {'columns': 'self.data.units_name'}), '(self.audit.audit_unit_occupancy_waiting_preferred, columns=\n self.data.units_name)\n', (2816, 2902), True, 'import pandas as pd\n'), ((1764, 1798), 'numpy.random.normal', 'np.random.normal', (['los_mean', 'los_sd'], {}), '(los_mean, los_sd)\n', (1780, 1798), True, 'import numpy as np\n'), ((3321, 3366), 'numpy.mean', 'np.mean', (["self.tracker['patient_waiting_time']"], {}), "(self.tracker['patient_waiting_time'])\n", (3328, 3366), True, 'import numpy as np\n'), ((3548, 3592), 'numpy.max', 'np.max', (["self.tracker['patient_waiting_time']"], {}), "(self.tracker['patient_waiting_time'])\n", (3554, 3592), True, 'import numpy as np\n'), ((5034, 5068), 'sim_utils.patient.Patient', 'Patient', (['patient_dict', 'self.params'], {}), '(patient_dict, self.params)\n', (5041, 5068), False, 'from sim_utils.patient import Patient\n'), ((3032, 3076), 'numpy.sum', 'np.sum', (["self.tracker['patient_waiting_time']"], {}), "(self.tracker['patient_waiting_time'])\n", (3038, 3076), True, 'import numpy as np\n')] |
import gzip
import io
import logging
import os
import six
import arff
import numpy as np
import scipy.sparse
from six.moves import cPickle as pickle
import xmltodict
from .data_feature import OpenMLDataFeature
from ..exceptions import PyOpenMLError
from .._api_calls import _perform_api_call
logger = logging.getLogger(__name__)
class OpenMLDataset(object):
"""Dataset object.
Allows fetching and uploading datasets to OpenML.
Parameters
----------
name : str
Name of the dataset
description : str
Description of the dataset
FIXME : which of these do we actually nee?
"""
def __init__(self, dataset_id=None, name=None, version=None, description=None,
format=None, creator=None, contributor=None,
collection_date=None, upload_date=None, language=None,
licence=None, url=None, default_target_attribute=None,
row_id_attribute=None, ignore_attribute=None,
version_label=None, citation=None, tag=None, visibility=None,
original_data_url=None, paper_url=None, update_comment=None,
md5_checksum=None, data_file=None, features=None, qualities=None):
# Attributes received by querying the RESTful API
self.dataset_id = int(dataset_id) if dataset_id is not None else None
self.name = name
self.version = int(version)
self.description = description
self.format = format
self.creator = creator
self.contributor = contributor
self.collection_date = collection_date
self.upload_date = upload_date
self.language = language
self.licence = licence
self.url = url
self.default_target_attribute = default_target_attribute
self.row_id_attribute = row_id_attribute
self.ignore_attributes = None
if isinstance(ignore_attribute, six.string_types):
self.ignore_attributes = [ignore_attribute]
elif isinstance(ignore_attribute, list):
self.ignore_attributes = ignore_attribute
elif ignore_attribute is None:
pass
else:
raise ValueError('wrong data type for ignore_attribute. Should be list. ')
self.version_label = version_label
self.citation = citation
self.tag = tag
self.visibility = visibility
self.original_data_url = original_data_url
self.paper_url = paper_url
self.update_comment = update_comment
self.md5_cheksum = md5_checksum
self.data_file = data_file
self.features = None
self.qualities = None
if features is not None:
self.features = {}
for idx, xmlfeature in enumerate(features['oml:feature']):
feature = OpenMLDataFeature(int(xmlfeature['oml:index']),
xmlfeature['oml:name'],
xmlfeature['oml:data_type'],
None, # todo add nominal values (currently not in database)
int(xmlfeature.get('oml:number_of_missing_values', 0)))
if idx != feature.index:
raise ValueError('Data features not provided in right order')
self.features[feature.index] = feature
self.qualities = _check_qualities(qualities)
if data_file is not None:
if self._data_features_supported():
self.data_pickle_file = data_file.replace('.arff', '.pkl')
if os.path.exists(self.data_pickle_file):
logger.debug("Data pickle file already exists.")
else:
try:
data = self._get_arff(self.format)
except OSError as e:
logger.critical("Please check that the data file %s is there "
"and can be read.", self.data_file)
raise e
categorical = [False if type(type_) != list else True
for name, type_ in data['attributes']]
attribute_names = [name for name, type_ in data['attributes']]
if isinstance(data['data'], tuple):
X = data['data']
X_shape = (max(X[1]) + 1, max(X[2]) + 1)
X = scipy.sparse.coo_matrix(
(X[0], (X[1], X[2])), shape=X_shape, dtype=np.float32)
X = X.tocsr()
elif isinstance(data['data'], list):
X = np.array(data['data'], dtype=np.float32)
else:
raise Exception()
with open(self.data_pickle_file, "wb") as fh:
pickle.dump((X, categorical, attribute_names), fh, -1)
logger.debug("Saved dataset %d: %s to file %s" %
(self.dataset_id, self.name, self.data_pickle_file))
def push_tag(self, tag):
"""Annotates this data set with a tag on the server.
Parameters
----------
tag : str
Tag to attach to the dataset.
"""
data = {'data_id': self.dataset_id, 'tag': tag}
_perform_api_call("/data/tag", data=data)
def remove_tag(self, tag):
"""Removes a tag from this dataset on the server.
Parameters
----------
tag : str
Tag to attach to the dataset.
"""
data = {'data_id': self.dataset_id, 'tag': tag}
_perform_api_call("/data/untag", data=data)
def __eq__(self, other):
if type(other) != OpenMLDataset:
return False
elif self.id == other._id or \
(self.name == other._name and self.version == other._version):
return True
else:
return False
def _get_arff(self, format):
"""Read ARFF file and return decoded arff.
Reads the file referenced in self.data_file.
Returns
-------
arff_string :
Decoded arff.
"""
# TODO: add a partial read method which only returns the attribute
# headers of the corresponding .arff file!
# A random number after which we consider a file for too large on a
# 32 bit system...currently 120mb (just a little bit more than covtype)
import struct
if not self._data_features_supported():
raise PyOpenMLError('Dataset not compatible, PyOpenML cannot handle string features')
filename = self.data_file
bits = (8 * struct.calcsize("P"))
if bits != 64 and os.path.getsize(filename) > 120000000:
return NotImplementedError("File too big")
if format.lower() == 'arff':
return_type = arff.DENSE
elif format.lower() == 'sparse_arff':
return_type = arff.COO
else:
raise ValueError('Unknown data format %s' % format)
def decode_arff(fh):
decoder = arff.ArffDecoder()
return decoder.decode(fh, encode_nominal=True,
return_type=return_type)
if filename[-3:] == ".gz":
with gzip.open(filename) as fh:
return decode_arff(fh)
else:
with io.open(filename, encoding='utf8') as fh:
return decode_arff(fh)
def get_data(self, target=None,
include_row_id=False,
include_ignore_attributes=False,
return_categorical_indicator=False,
return_attribute_names=False
):
"""Returns dataset content as numpy arrays / sparse matrices.
Parameters
----------
Returns
-------
"""
rval = []
if not self._data_features_supported():
raise PyOpenMLError('Dataset not compatible, PyOpenML cannot handle string features')
path = self.data_pickle_file
if not os.path.exists(path):
raise ValueError("Cannot find a pickle file for dataset %s at "
"location %s " % (self.name, path))
else:
with open(path, "rb") as fh:
data, categorical, attribute_names = pickle.load(fh)
to_exclude = []
if include_row_id is False:
if not self.row_id_attribute:
pass
else:
if isinstance(self.row_id_attribute, six.string_types):
to_exclude.append(self.row_id_attribute)
else:
to_exclude.extend(self.row_id_attribute)
if include_ignore_attributes is False:
if not self.ignore_attributes:
pass
else:
if isinstance(self.ignore_attributes, six.string_types):
to_exclude.append(self.ignore_attributes)
else:
to_exclude.extend(self.ignore_attributes)
if len(to_exclude) > 0:
logger.info("Going to remove the following attributes:"
" %s" % to_exclude)
keep = np.array([True if column not in to_exclude else False
for column in attribute_names])
data = data[:, keep]
categorical = [cat for cat, k in zip(categorical, keep) if k]
attribute_names = [att for att, k in
zip(attribute_names, keep) if k]
if target is None:
rval.append(data)
else:
if isinstance(target, six.string_types):
target = [target]
targets = np.array([True if column in target else False
for column in attribute_names])
if np.sum(targets) > 1:
raise NotImplementedError(
"Number of requested targets %d is not implemented." %
np.sum(targets)
)
target_categorical = [
cat for cat, column in
six.moves.zip(categorical, attribute_names)
if column in target
]
target_dtype = int if target_categorical[0] else float
try:
x = data[:, ~targets]
y = data[:, targets].astype(target_dtype)
if len(y.shape) == 2 and y.shape[1] == 1:
y = y[:, 0]
categorical = [cat for cat, t in
zip(categorical, targets) if not t]
attribute_names = [att for att, k in
zip(attribute_names, targets) if not k]
except KeyError as e:
import sys
sys.stdout.flush()
raise e
if scipy.sparse.issparse(y):
y = np.asarray(y.todense()).astype(target_dtype).flatten()
rval.append(x)
rval.append(y)
if return_categorical_indicator:
rval.append(categorical)
if return_attribute_names:
rval.append(attribute_names)
if len(rval) == 1:
return rval[0]
else:
return rval
def retrieve_class_labels(self, target_name='class'):
"""Reads the datasets arff to determine the class-labels.
If the task has no class labels (for example a regression problem)
it returns None. Necessary because the data returned by get_data
only contains the indices of the classes, while OpenML needs the real
classname when uploading the results of a run.
Parameters
----------
target_name : str
Name of the target attribute
Returns
-------
list
"""
# TODO improve performance, currently reads the whole file
# Should make a method that only reads the attributes
arffFileName = self.data_file
if self.format.lower() == 'arff':
return_type = arff.DENSE
elif self.format.lower() == 'sparse_arff':
return_type = arff.COO
else:
raise ValueError('Unknown data format %s' % self.format)
with io.open(arffFileName, encoding='utf8') as fh:
arffData = arff.ArffDecoder().decode(fh, return_type=return_type)
dataAttributes = dict(arffData['attributes'])
if target_name in dataAttributes:
return dataAttributes[target_name]
else:
return None
def get_features_by_type(self, data_type, exclude=None,
exclude_ignore_attributes=True,
exclude_row_id_attribute=True):
'''
Returns indices of features of a given type, e.g., all nominal features.
Can use additional parameters to exclude various features by index or ontology.
Parameters
----------
data_type : str
The data type to return (e.g., nominal, numeric, date, string)
exclude : list(int)
Indices to exclude (and adapt the return values as if these indices
are not present)
exclude_ignore_attributes : bool
Whether to exclude the defined ignore attributes (and adapt the
return values as if these indices are not present)
exclude_row_id_attribute : bool
Whether to exclude the defined row id attributes (and adapt the
return values as if these indices are not present)
Returns
-------
result : list
a list of indices that have the specified data type
'''
if data_type not in OpenMLDataFeature.LEGAL_DATA_TYPES:
raise TypeError("Illegal feature type requested")
if self.ignore_attributes is not None:
if not isinstance(self.ignore_attributes, list):
raise TypeError("ignore_attributes should be a list")
if self.row_id_attribute is not None:
if not isinstance(self.row_id_attribute, six.string_types):
raise TypeError("row id attribute should be a str")
if exclude is not None:
if not isinstance(exclude, list):
raise TypeError("Exclude should be a list")
# assert all(isinstance(elem, str) for elem in exclude), "Exclude should be a list of strings"
to_exclude = []
if exclude is not None:
to_exclude.extend(exclude)
if exclude_ignore_attributes and self.ignore_attributes is not None:
to_exclude.extend(self.ignore_attributes)
if exclude_row_id_attribute and self.row_id_attribute is not None:
to_exclude.append(self.row_id_attribute)
result = []
offset = 0
# this function assumes that everything in to_exclude will be 'excluded' from the dataset (hence the offset)
for idx in self.features:
name = self.features[idx].name
if name in to_exclude:
offset += 1
else:
if self.features[idx].data_type == data_type:
result.append(idx-offset)
return result
def publish(self):
"""Publish the dataset on the OpenML server.
Upload the dataset description and dataset content to openml.
Returns
-------
self
"""
file_elements = {'description': self._to_xml()}
file_dictionary = {}
if self.data_file is not None:
file_dictionary['dataset'] = self.data_file
return_value = _perform_api_call("/data/", file_dictionary=file_dictionary,
file_elements=file_elements)
self.dataset_id = int(xmltodict.parse(return_value)['oml:upload_data_set']['oml:id'])
return self
def _to_xml(self):
"""Serialize object to xml for upload
Returns
-------
xml_dataset : str
XML description of the data.
"""
xml_dataset = ('<oml:data_set_description '
'xmlns:oml="http://openml.org/openml">\n')
props = ['id', 'name', 'version', 'description', 'format', 'creator',
'contributor', 'collection_date', 'upload_date', 'language',
'licence', 'url', 'default_target_attribute',
'row_id_attribute', 'ignore_attribute', 'version_label',
'citation', 'tag', 'visibility', 'original_data_url',
'paper_url', 'update_comment', 'md5_checksum'] # , 'data_file']
for prop in props:
content = getattr(self, prop, None)
if content is not None:
xml_dataset += "<oml:{0}>{1}</oml:{0}>\n".format(prop, content)
xml_dataset += "</oml:data_set_description>"
return xml_dataset
def _data_features_supported(self):
if self.features is not None:
for idx in self.features:
if self.features[idx].data_type not in ['numeric', 'nominal']:
return False
return True
return True
def _check_qualities(qualities):
if qualities is not None:
qualities_ = {}
for xmlquality in qualities:
name = xmlquality['oml:name']
if xmlquality.get('oml:value', None) is None:
value = float('NaN')
elif xmlquality['oml:value'] == 'null':
value = float('NaN')
else:
value = float(xmlquality['oml:value'])
qualities_[name] = value
return qualities_
else:
return None
| [
"logging.getLogger",
"struct.calcsize",
"os.path.exists",
"os.path.getsize",
"xmltodict.parse",
"gzip.open",
"six.moves.cPickle.load",
"arff.ArffDecoder",
"six.moves.cPickle.dump",
"io.open",
"numpy.array",
"numpy.sum",
"sys.stdout.flush",
"six.moves.zip"
] | [((305, 332), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (322, 332), False, 'import logging\n'), ((6772, 6792), 'struct.calcsize', 'struct.calcsize', (['"""P"""'], {}), "('P')\n", (6787, 6792), False, 'import struct\n'), ((7200, 7218), 'arff.ArffDecoder', 'arff.ArffDecoder', ([], {}), '()\n', (7216, 7218), False, 'import arff\n'), ((8174, 8194), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (8188, 8194), False, 'import os\n'), ((9332, 9423), 'numpy.array', 'np.array', (['[(True if column not in to_exclude else False) for column in attribute_names]'], {}), '([(True if column not in to_exclude else False) for column in\n attribute_names])\n', (9340, 9423), True, 'import numpy as np\n'), ((9848, 9927), 'numpy.array', 'np.array', (['[(True if column in target else False) for column in attribute_names]'], {}), '([(True if column in target else False) for column in attribute_names])\n', (9856, 9927), True, 'import numpy as np\n'), ((12409, 12447), 'io.open', 'io.open', (['arffFileName'], {'encoding': '"""utf8"""'}), "(arffFileName, encoding='utf8')\n", (12416, 12447), False, 'import io\n'), ((3623, 3660), 'os.path.exists', 'os.path.exists', (['self.data_pickle_file'], {}), '(self.data_pickle_file)\n', (3637, 3660), False, 'import os\n'), ((6820, 6845), 'os.path.getsize', 'os.path.getsize', (['filename'], {}), '(filename)\n', (6835, 6845), False, 'import os\n'), ((7390, 7409), 'gzip.open', 'gzip.open', (['filename'], {}), '(filename)\n', (7399, 7409), False, 'import gzip\n'), ((7487, 7521), 'io.open', 'io.open', (['filename'], {'encoding': '"""utf8"""'}), "(filename, encoding='utf8')\n", (7494, 7521), False, 'import io\n'), ((8445, 8460), 'six.moves.cPickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (8456, 8460), True, 'from six.moves import cPickle as pickle\n'), ((9973, 9988), 'numpy.sum', 'np.sum', (['targets'], {}), '(targets)\n', (9979, 9988), True, 'import numpy as np\n'), ((10256, 10299), 'six.moves.zip', 'six.moves.zip', (['categorical', 'attribute_names'], {}), '(categorical, attribute_names)\n', (10269, 10299), False, 'import six\n'), ((10944, 10962), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10960, 10962), False, 'import sys\n'), ((12478, 12496), 'arff.ArffDecoder', 'arff.ArffDecoder', ([], {}), '()\n', (12494, 12496), False, 'import arff\n'), ((15964, 15993), 'xmltodict.parse', 'xmltodict.parse', (['return_value'], {}), '(return_value)\n', (15979, 15993), False, 'import xmltodict\n'), ((4927, 4981), 'six.moves.cPickle.dump', 'pickle.dump', (['(X, categorical, attribute_names)', 'fh', '(-1)'], {}), '((X, categorical, attribute_names), fh, -1)\n', (4938, 4981), True, 'from six.moves import cPickle as pickle\n'), ((10132, 10147), 'numpy.sum', 'np.sum', (['targets'], {}), '(targets)\n', (10138, 10147), True, 'import numpy as np\n'), ((4727, 4767), 'numpy.array', 'np.array', (["data['data']"], {'dtype': 'np.float32'}), "(data['data'], dtype=np.float32)\n", (4735, 4767), True, 'import numpy as np\n')] |
import pathlib
import warnings
import numpy as np
import pytest
import xarray as xr
from tests.fixtures import generate_dataset
from xcdat.dataset import (
_has_cf_compliant_time,
_keep_single_var,
_postprocess_dataset,
_preprocess_non_cf_dataset,
_split_time_units_attr,
decode_non_cf_time,
open_dataset,
open_mfdataset,
)
from xcdat.logger import setup_custom_logger
logger = setup_custom_logger("xcdat.dataset", propagate=True)
class TestOpenDataset:
@pytest.fixture(autouse=True)
def setup(self, tmp_path):
# Create temporary directory to save files.
dir = tmp_path / "input_data"
dir.mkdir()
self.file_path = f"{dir}/file.nc"
def test_non_cf_compliant_time_is_not_decoded(self):
ds = generate_dataset(cf_compliant=False, has_bounds=True)
ds.to_netcdf(self.file_path)
result = open_dataset(self.file_path, decode_times=False)
expected = generate_dataset(cf_compliant=False, has_bounds=True)
assert result.identical(expected)
def test_non_cf_compliant_time_is_decoded(self):
ds = generate_dataset(cf_compliant=False, has_bounds=False)
ds.to_netcdf(self.file_path)
result = open_dataset(self.file_path, data_var="ts")
# Generate an expected dataset with decoded non-CF compliant time units.
expected = generate_dataset(cf_compliant=True, has_bounds=True)
expected_time_data = np.array(
[
"2000-01-01T00:00:00.000000000",
"2000-02-01T00:00:00.000000000",
"2000-03-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-05-01T00:00:00.000000000",
"2000-06-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-08-01T00:00:00.000000000",
"2000-09-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-11-01T00:00:00.000000000",
"2000-12-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-02-01T00:00:00.000000000",
"2001-03-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
)
expected["time"] = xr.DataArray(
name="time",
data=expected_time_data,
dims="time",
attrs={
"units": "months since 2000-01-01",
"calendar": "standard",
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected.time_bnds.data[:] = np.array(
[
["1999-12-16T12:00:00.000000000", "2000-01-16T12:00:00.000000000"],
["2000-01-16T12:00:00.000000000", "2000-02-15T12:00:00.000000000"],
["2000-02-15T12:00:00.000000000", "2000-03-16T12:00:00.000000000"],
["2000-03-16T12:00:00.000000000", "2000-04-16T00:00:00.000000000"],
["2000-04-16T00:00:00.000000000", "2000-05-16T12:00:00.000000000"],
["2000-05-16T12:00:00.000000000", "2000-06-16T00:00:00.000000000"],
["2000-06-16T00:00:00.000000000", "2000-07-16T12:00:00.000000000"],
["2000-07-16T12:00:00.000000000", "2000-08-16T12:00:00.000000000"],
["2000-08-16T12:00:00.000000000", "2000-09-16T00:00:00.000000000"],
["2000-09-16T00:00:00.000000000", "2000-10-16T12:00:00.000000000"],
["2000-10-16T12:00:00.000000000", "2000-11-16T00:00:00.000000000"],
["2000-11-16T00:00:00.000000000", "2000-12-16T12:00:00.000000000"],
["2000-12-16T12:00:00.000000000", "2001-01-16T12:00:00.000000000"],
["2001-01-16T12:00:00.000000000", "2001-02-15T00:00:00.000000000"],
["2001-02-15T00:00:00.000000000", "2001-03-15T00:00:00.000000000"],
],
dtype="datetime64[ns]",
)
expected.time.encoding = {
# Set source as result source because it changes every test run.
"source": result.time.encoding["source"],
"dtype": np.dtype(np.int64),
"original_shape": expected.time.data.shape,
"units": "months since 2000-01-01",
"calendar": "standard",
}
assert result.identical(expected)
assert result.time.encoding == expected.time.encoding
def test_preserves_lat_and_lon_bounds_if_they_exist(self):
ds = generate_dataset(cf_compliant=True, has_bounds=True)
# Suppress UserWarning regarding missing time.encoding "units" because
# it is not relevant to this test.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ds.to_netcdf(self.file_path)
result = open_dataset(self.file_path, data_var="ts")
expected = ds.copy()
assert result.identical(expected)
def test_keeps_specified_var(self):
ds = generate_dataset(cf_compliant=True, has_bounds=True)
# Create a modified version of the Dataset with a new var
ds_mod = ds.copy()
ds_mod["tas"] = ds_mod.ts.copy()
# Suppress UserWarning regarding missing time.encoding "units" because
# it is not relevant to this test.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ds_mod.to_netcdf(self.file_path)
result = open_dataset(self.file_path, data_var="ts")
expected = ds.copy()
assert result.identical(expected)
class TestOpenMfDataset:
@pytest.fixture(autouse=True)
def setUp(self, tmp_path):
# Create temporary directory to save files.
dir = tmp_path / "input_data"
dir.mkdir()
self.file_path1 = f"{dir}/file1.nc"
self.file_path2 = f"{dir}/file2.nc"
def test_non_cf_compliant_time_is_not_decoded(self):
ds1 = generate_dataset(cf_compliant=False, has_bounds=True)
ds1.to_netcdf(self.file_path1)
ds2 = generate_dataset(cf_compliant=False, has_bounds=True)
ds2 = ds2.rename_vars({"ts": "tas"})
ds2.to_netcdf(self.file_path2)
result = open_mfdataset([self.file_path1, self.file_path2], decode_times=False)
expected = ds1.merge(ds2)
assert result.identical(expected)
def test_non_cf_compliant_time_is_decoded(self):
ds1 = generate_dataset(cf_compliant=False, has_bounds=False)
ds2 = generate_dataset(cf_compliant=False, has_bounds=False)
ds2 = ds2.rename_vars({"ts": "tas"})
ds1.to_netcdf(self.file_path1)
ds2.to_netcdf(self.file_path2)
result = open_mfdataset(
[self.file_path1, self.file_path2],
data_var="ts",
)
# Generate an expected dataset, which is a combination of both datasets
# with decoded time units and coordinate bounds.
expected = generate_dataset(cf_compliant=True, has_bounds=True)
expected_time_data = np.array(
[
"2000-01-01T00:00:00.000000000",
"2000-02-01T00:00:00.000000000",
"2000-03-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-05-01T00:00:00.000000000",
"2000-06-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-08-01T00:00:00.000000000",
"2000-09-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-11-01T00:00:00.000000000",
"2000-12-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-02-01T00:00:00.000000000",
"2001-03-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
)
expected["time"] = xr.DataArray(
name="time",
data=expected_time_data,
dims="time",
attrs={
"units": "months since 2000-01-01",
"calendar": "standard",
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected.time_bnds.data[:] = np.array(
[
["1999-12-16T12:00:00.000000000", "2000-01-16T12:00:00.000000000"],
["2000-01-16T12:00:00.000000000", "2000-02-15T12:00:00.000000000"],
["2000-02-15T12:00:00.000000000", "2000-03-16T12:00:00.000000000"],
["2000-03-16T12:00:00.000000000", "2000-04-16T00:00:00.000000000"],
["2000-04-16T00:00:00.000000000", "2000-05-16T12:00:00.000000000"],
["2000-05-16T12:00:00.000000000", "2000-06-16T00:00:00.000000000"],
["2000-06-16T00:00:00.000000000", "2000-07-16T12:00:00.000000000"],
["2000-07-16T12:00:00.000000000", "2000-08-16T12:00:00.000000000"],
["2000-08-16T12:00:00.000000000", "2000-09-16T00:00:00.000000000"],
["2000-09-16T00:00:00.000000000", "2000-10-16T12:00:00.000000000"],
["2000-10-16T12:00:00.000000000", "2000-11-16T00:00:00.000000000"],
["2000-11-16T00:00:00.000000000", "2000-12-16T12:00:00.000000000"],
["2000-12-16T12:00:00.000000000", "2001-01-16T12:00:00.000000000"],
["2001-01-16T12:00:00.000000000", "2001-02-15T00:00:00.000000000"],
["2001-02-15T00:00:00.000000000", "2001-03-15T00:00:00.000000000"],
],
dtype="datetime64[ns]",
)
expected.time.encoding = {
# Set source as result source because it changes every test run.
"source": result.time.encoding["source"],
"dtype": np.dtype(np.int64),
"original_shape": expected.time.data.shape,
"units": "months since 2000-01-01",
"calendar": "standard",
}
assert result.identical(expected)
assert result.time.encoding == expected.time.encoding
def test_keeps_specified_var(self):
ds1 = generate_dataset(cf_compliant=True, has_bounds=True)
ds2 = generate_dataset(cf_compliant=True, has_bounds=True)
ds2 = ds2.rename_vars({"ts": "tas"})
# Suppress UserWarning regarding missing time.encoding "units" because
# it is not relevant to this test.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ds1.to_netcdf(self.file_path1)
ds2.to_netcdf(self.file_path2)
result = open_mfdataset([self.file_path1, self.file_path2], data_var="ts")
# Generate an expected dataset with decoded non-CF compliant time units.
expected = generate_dataset(cf_compliant=True, has_bounds=True)
assert result.identical(expected)
class TestHasCFCompliantTime:
@pytest.fixture(autouse=True)
def setUp(self, tmp_path):
# Create temporary directory to save files.
self.dir = tmp_path / "input_data"
self.dir.mkdir()
# Paths to the dummy datasets.
self.file_path = f"{self.dir}/file.nc"
def test_non_cf_compliant_time(self):
# Generate dummy dataset with non-CF compliant time units
ds = generate_dataset(cf_compliant=False, has_bounds=False)
ds.to_netcdf(self.file_path)
result = _has_cf_compliant_time(self.file_path)
# Check that False is returned when the dataset has non-cf_compliant time
assert result is False
def test_no_time_axis(self):
# Generate dummy dataset with CF compliant time
ds = generate_dataset(cf_compliant=True, has_bounds=False)
# remove time axis
ds = ds.isel(time=0)
ds = ds.squeeze(drop=True)
ds = ds.reset_coords()
ds = ds.drop_vars("time")
ds.to_netcdf(self.file_path)
result = _has_cf_compliant_time(self.file_path)
# Check that None is returned when there is no time axis
assert result is None
def test_glob_cf_compliant_time(self):
# Generate dummy datasets with CF compliant time
ds = generate_dataset(cf_compliant=True, has_bounds=False)
ds.to_netcdf(self.file_path)
result = _has_cf_compliant_time(f"{self.dir}/*.nc")
# Check that the wildcard path input is correctly evaluated
assert result is True
def test_list_cf_compliant_time(self):
# Generate dummy datasets with CF compliant time units
ds = generate_dataset(cf_compliant=True, has_bounds=False)
ds.to_netcdf(self.file_path)
flist = [self.file_path, self.file_path, self.file_path]
result = _has_cf_compliant_time(flist)
# Check that the list input is correctly evaluated
assert result is True
def test_cf_compliant_time_with_string_path(self):
# Generate dummy dataset with CF compliant time units
ds = generate_dataset(cf_compliant=True, has_bounds=False)
ds.to_netcdf(self.file_path)
result = _has_cf_compliant_time(self.file_path)
# Check that True is returned when the dataset has cf_compliant time
assert result is True
def test_cf_compliant_time_with_pathlib_path(self):
# Generate dummy dataset with CF compliant time units
ds = generate_dataset(cf_compliant=True, has_bounds=False)
ds.to_netcdf(self.file_path)
result = _has_cf_compliant_time(pathlib.Path(self.file_path))
# Check that True is returned when the dataset has cf_compliant time
assert result is True
def test_cf_compliant_time_with_list_of_list_of_strings(self):
# Generate dummy dataset with CF compliant time units
ds = generate_dataset(cf_compliant=True, has_bounds=False)
ds.to_netcdf(self.file_path)
result = _has_cf_compliant_time([self.file_path])
# Check that True is returned when the dataset has cf_compliant time
assert result is True
def test_cf_compliant_time_with_list_of_list_of_pathlib_paths(self):
# Generate dummy dataset with CF compliant time units
ds = generate_dataset(cf_compliant=True, has_bounds=False)
ds.to_netcdf(self.file_path)
result = _has_cf_compliant_time([[pathlib.Path(self.file_path)]])
# Check that True is returned when the dataset has cf_compliant time
assert result is True
class TestDecodeNonCFTimeUnits:
@pytest.fixture(autouse=True)
def setup(self):
time = xr.DataArray(
name="time",
data=[1, 2, 3],
dims=["time"],
attrs={
"bounds": "time_bnds",
"axis": "T",
"long_name": "time",
"standard_name": "time",
"calendar": "noleap",
},
)
time_bnds = xr.DataArray(
name="time_bnds",
data=[[0, 1], [1, 2], [2, 3]],
dims=["time", "bnds"],
)
time_bnds.encoding = {
"zlib": False,
"shuffle": False,
"complevel": 0,
"fletcher32": False,
"contiguous": False,
"chunksizes": (1, 2),
"source": "None",
"original_shape": (1980, 2),
"dtype": np.dtype("float64"),
}
self.ds = xr.Dataset({"time": time, "time_bnds": time_bnds})
def test_raises_error_if_function_is_called_on_already_decoded_cf_compliant_dataset(
self,
):
ds = generate_dataset(cf_compliant=True, has_bounds=True)
with pytest.raises(KeyError):
decode_non_cf_time(ds)
def test_decodes_months_with_a_reference_date_at_the_start_of_the_month(self):
ds = self.ds.copy()
ds.time.attrs["units"] = "months since 2000-01-01"
result = decode_non_cf_time(ds)
expected = xr.Dataset(
{
"time": xr.DataArray(
name="time",
data=np.array(
["2000-02-01", "2000-03-01", "2000-04-01"],
dtype="datetime64",
),
dims=["time"],
attrs=ds.time.attrs,
),
"time_bnds": xr.DataArray(
name="time_bnds",
data=np.array(
[
["2000-01-01", "2000-02-01"],
["2000-02-01", "2000-03-01"],
["2000-03-01", "2000-04-01"],
],
dtype="datetime64",
),
dims=["time", "bnds"],
attrs=ds.time_bnds.attrs,
),
}
)
assert result.identical(expected)
expected.time.encoding = {
"source": "None",
"dtype": np.dtype(np.int64),
"original_shape": expected.time.data.shape,
"units": ds.time.attrs["units"],
"calendar": ds.time.attrs["calendar"],
}
expected.time_bnds.encoding = ds.time_bnds.encoding
assert result.time.encoding == expected.time.encoding
assert result.time_bnds.encoding == expected.time_bnds.encoding
def test_decodes_months_with_a_reference_date_at_the_middle_of_the_month(self):
ds = self.ds.copy()
ds.time.attrs["units"] = "months since 2000-01-15"
result = decode_non_cf_time(ds)
expected = xr.Dataset(
{
"time": xr.DataArray(
name="time",
data=np.array(
["2000-02-15", "2000-03-15", "2000-04-15"],
dtype="datetime64",
),
dims=["time"],
attrs=ds.time.attrs,
),
"time_bnds": xr.DataArray(
name="time_bnds",
data=np.array(
[
["2000-01-15", "2000-02-15"],
["2000-02-15", "2000-03-15"],
["2000-03-15", "2000-04-15"],
],
dtype="datetime64",
),
dims=["time", "bnds"],
attrs=ds.time_bnds.attrs,
),
}
)
assert result.identical(expected)
expected.time.encoding = {
"source": "None",
"dtype": np.dtype(np.int64),
"original_shape": expected.time.data.shape,
"units": ds.time.attrs["units"],
"calendar": ds.time.attrs["calendar"],
}
expected.time_bnds.encoding = ds.time_bnds.encoding
assert result.time.encoding == expected.time.encoding
assert result.time_bnds.encoding == expected.time_bnds.encoding
def test_decodes_months_with_a_reference_date_at_the_end_of_the_month(self):
ds = self.ds.copy()
ds.time.attrs["units"] = "months since 1999-12-31"
result = decode_non_cf_time(ds)
expected = xr.Dataset(
{
"time": xr.DataArray(
name="time",
data=np.array(
["2000-01-31", "2000-02-29", "2000-03-31"],
dtype="datetime64",
),
dims=["time"],
attrs=ds.time.attrs,
),
"time_bnds": xr.DataArray(
name="time_bnds",
data=np.array(
[
["1999-12-31", "2000-01-31"],
["2000-01-31", "2000-02-29"],
["2000-02-29", "2000-03-31"],
],
dtype="datetime64",
),
dims=["time", "bnds"],
attrs=ds.time_bnds.attrs,
),
}
)
assert result.identical(expected)
expected.time.encoding = {
"source": "None",
"dtype": np.dtype(np.int64),
"original_shape": expected.time.data.shape,
"units": ds.time.attrs["units"],
"calendar": ds.time.attrs["calendar"],
}
expected.time_bnds.encoding = ds.time_bnds.encoding
assert result.time.encoding == expected.time.encoding
assert result.time_bnds.encoding == expected.time_bnds.encoding
def test_decodes_months_with_a_reference_date_on_a_leap_year(self):
ds = self.ds.copy()
ds.time.attrs["units"] = "months since 2000-02-29"
result = decode_non_cf_time(ds)
expected = xr.Dataset(
{
"time": xr.DataArray(
name="time",
data=np.array(
["2000-03-29", "2000-04-29", "2000-05-29"],
dtype="datetime64",
),
dims=["time"],
attrs=ds.time.attrs,
),
"time_bnds": xr.DataArray(
name="time_bnds",
data=np.array(
[
["2000-02-29", "2000-03-29"],
["2000-03-29", "2000-04-29"],
["2000-04-29", "2000-05-29"],
],
dtype="datetime64",
),
dims=["time", "bnds"],
attrs=ds.time_bnds.attrs,
),
}
)
assert result.identical(expected)
expected.time.encoding = {
"source": "None",
"dtype": np.dtype(np.int64),
"original_shape": expected.time.data.shape,
"units": ds.time.attrs["units"],
"calendar": ds.time.attrs["calendar"],
}
expected.time_bnds.encoding = ds.time_bnds.encoding
assert result.time.encoding == expected.time.encoding
assert result.time_bnds.encoding == expected.time_bnds.encoding
def test_decodes_years_with_a_reference_date_at_the_middle_of_the_year(self):
ds = self.ds.copy()
ds.time.attrs["units"] = "years since 2000-06-01"
result = decode_non_cf_time(ds)
expected = xr.Dataset(
{
"time": xr.DataArray(
name="time",
data=np.array(
["2001-06-01", "2002-06-01", "2003-06-01"],
dtype="datetime64",
),
dims=["time"],
attrs=ds.time.attrs,
),
"time_bnds": xr.DataArray(
name="time_bnds",
data=np.array(
[
["2000-06-01", "2001-06-01"],
["2001-06-01", "2002-06-01"],
["2002-06-01", "2003-06-01"],
],
dtype="datetime64",
),
dims=["time", "bnds"],
attrs=ds.time_bnds.attrs,
),
}
)
assert result.identical(expected)
expected.time.encoding = {
"source": "None",
"dtype": np.dtype(np.int64),
"original_shape": expected.time.data.shape,
"units": ds.time.attrs["units"],
"calendar": ds.time.attrs["calendar"],
}
expected.time_bnds.encoding = ds.time_bnds.encoding
assert result.time.encoding == expected.time.encoding
assert result.time_bnds.encoding == expected.time_bnds.encoding
def test_decodes_years_with_a_reference_date_on_a_leap_year(self):
ds = self.ds.copy()
ds.time.attrs["units"] = "years since 2000-02-29"
result = decode_non_cf_time(ds)
expected = xr.Dataset(
{
"time": xr.DataArray(
name="time",
data=[
np.datetime64("2001-02-28"),
np.datetime64("2002-02-28"),
np.datetime64("2003-02-28"),
],
dims=["time"],
),
"time_bnds": xr.DataArray(
name="time_bnds",
data=np.array(
[
["2000-02-29", "2001-02-28"],
["2001-02-28", "2002-02-28"],
["2002-02-28", "2003-02-28"],
],
dtype="datetime64",
),
dims=["time", "bnds"],
attrs=ds.time_bnds.attrs,
),
}
)
expected.time.attrs = ds.time.attrs
assert result.identical(expected)
expected.time.encoding = {
"source": "None",
"dtype": np.dtype(np.int64),
"original_shape": expected.time.data.shape,
"units": ds.time.attrs["units"],
"calendar": ds.time.attrs["calendar"],
}
expected.time_bnds.encoding = ds.time_bnds.encoding
assert result.time.encoding == expected.time.encoding
assert result.time_bnds.encoding == expected.time_bnds.encoding
class TestPostProcessDataset:
@pytest.fixture(autouse=True)
def setup(self):
self.ds = generate_dataset(cf_compliant=True, has_bounds=True)
def test_keeps_specified_var(self):
ds = generate_dataset(cf_compliant=True, has_bounds=True)
# Create a modified version of the Dataset with a new var
ds_mod = ds.copy()
ds_mod["tas"] = ds_mod.ts.copy()
result = _postprocess_dataset(ds, data_var="ts")
expected = ds.copy()
assert result.identical(expected)
def test_centers_time(self):
ds = generate_dataset(cf_compliant=True, has_bounds=True)
uncentered_time = np.array(
[
"2000-01-31T12:00:00.000000000",
"2000-02-29T12:00:00.000000000",
"2000-03-31T12:00:00.000000000",
"2000-04-30T00:00:00.000000000",
"2000-05-31T12:00:00.000000000",
"2000-06-30T00:00:00.000000000",
"2000-07-31T12:00:00.000000000",
"2000-08-31T12:00:00.000000000",
"2000-09-30T00:00:00.000000000",
"2000-10-16T12:00:00.000000000",
"2000-11-30T00:00:00.000000000",
"2000-12-31T12:00:00.000000000",
"2001-01-31T12:00:00.000000000",
"2001-02-28T00:00:00.000000000",
"2001-12-31T12:00:00.000000000",
],
dtype="datetime64[ns]",
)
ds.time.data[:] = uncentered_time
ds.time.encoding = {
"source": None,
"dtype": np.dtype(np.int64),
"original_shape": ds.time.data.shape,
"units": "days since 2000-01-01",
"calendar": "standard",
"_FillValue": False,
}
# Compare result of the method against the expected.
result = _postprocess_dataset(ds, center_times=True)
expected = ds.copy()
expected_time_data = np.array(
[
"2000-01-16T12:00:00.000000000",
"2000-02-15T12:00:00.000000000",
"2000-03-16T12:00:00.000000000",
"2000-04-16T00:00:00.000000000",
"2000-05-16T12:00:00.000000000",
"2000-06-16T00:00:00.000000000",
"2000-07-16T12:00:00.000000000",
"2000-08-16T12:00:00.000000000",
"2000-09-16T00:00:00.000000000",
"2000-10-16T12:00:00.000000000",
"2000-11-16T00:00:00.000000000",
"2000-12-16T12:00:00.000000000",
"2001-01-16T12:00:00.000000000",
"2001-02-15T00:00:00.000000000",
"2001-12-16T12:00:00.000000000",
],
dtype="datetime64[ns]",
)
expected = expected.assign_coords(
{
"time": xr.DataArray(
name="time",
data=expected_time_data,
coords={"time": expected_time_data},
dims="time",
attrs={
"long_name": "time",
"standard_name": "time",
"axis": "T",
"bounds": "time_bnds",
},
)
}
)
expected.time.encoding = {
"source": None,
"dtype": np.dtype("int64"),
"original_shape": (15,),
"units": "days since 2000-01-01",
"calendar": "standard",
"_FillValue": False,
}
# Update time bounds with centered time coordinates.
time_bounds = ds.time_bnds.copy()
time_bounds["time"] = expected.time
expected["time_bnds"] = time_bounds
# Compare result of the function against the expected.
assert result.identical(expected)
assert result.time.encoding == expected.time.encoding
def test_raises_error_if_dataset_has_no_time_coords_but_center_times_is_true(self):
ds = generate_dataset(cf_compliant=True, has_bounds=False)
ds = ds.drop_dims("time")
with pytest.raises(ValueError):
_postprocess_dataset(ds, center_times=True)
def test_adds_missing_lat_and_lon_bounds(self):
# Create expected dataset without bounds.
ds = generate_dataset(cf_compliant=True, has_bounds=False)
data_vars = list(ds.data_vars.keys())
assert "lat_bnds" not in data_vars
assert "lon_bnds" not in data_vars
result = _postprocess_dataset(ds, add_bounds=True)
result_data_vars = list(result.data_vars.keys())
assert "lat_bnds" in result_data_vars
assert "lon_bnds" in result_data_vars
def test_orients_longitude_bounds_from_180_to_360_and_sorts_with_prime_meridian_cell(
self,
):
# Chunk the dataset to test method also works with Dask.
ds = xr.Dataset(
coords={
"lon": xr.DataArray(
name="lon",
data=np.array([-180, -1, 0, 1, 179]),
dims=["lon"],
attrs={"units": "degrees_east", "axis": "X", "bounds": "lon_bnds"},
)
},
data_vars={
"lon_bnds": xr.DataArray(
name="lon_bnds",
data=np.array(
[
[-180.5, -1.5],
[-1.5, -0.5],
[-0.5, 0.5],
[0.5, 1.5],
[1.5, 179.5],
]
),
dims=["lon", "bnds"],
attrs={"is_generated": "True"},
),
},
).chunk({"lon": 2})
result = _postprocess_dataset(
ds, data_var=None, center_times=False, add_bounds=True, lon_orient=(0, 360)
)
expected = xr.Dataset(
coords={
"lon": xr.DataArray(
name="lon",
data=np.array([0.0, 1.0, 179.0, 180.0, 359.0, 360.0]),
dims=["lon"],
attrs={"units": "degrees_east", "axis": "X", "bounds": "lon_bnds"},
)
},
data_vars={
"lon_bnds": xr.DataArray(
name="lon_bnds",
data=np.array(
[
[0, 0.5],
[0.5, 1.5],
[1.5, 179.5],
[179.5, 358.5],
[358.5, 359.5],
[359.5, 360],
]
),
dims=["lon", "bnds"],
attrs={"is_generated": "True"},
),
},
)
assert result.identical(expected)
def test_raises_error_if_dataset_has_no_longitude_coords_but_lon_orient_is_specified(
self,
):
ds = generate_dataset(cf_compliant=True, has_bounds=False)
ds = ds.drop_dims("lon")
with pytest.raises(ValueError):
_postprocess_dataset(ds, lon_orient=(0, 360))
class TestKeepSingleVar:
@pytest.fixture(autouse=True)
def setup(self):
self.ds = generate_dataset(cf_compliant=True, has_bounds=True)
self.ds_mod = self.ds.copy()
self.ds_mod["tas"] = self.ds_mod.ts.copy()
def tests_raises_error_if_only_bounds_data_variables_exist(self):
ds = self.ds.copy()
ds = ds.drop_vars("ts")
with pytest.raises(ValueError):
_keep_single_var(ds, key="ts")
def test_raises_error_if_specified_data_var_does_not_exist(self):
ds = self.ds_mod.copy()
with pytest.raises(ValueError):
_keep_single_var(ds, key="nonexistent")
def test_raises_error_if_specified_data_var_is_a_bounds_var(self):
ds = self.ds_mod.copy()
with pytest.raises(ValueError):
_keep_single_var(ds, key="lat_bnds")
def test_returns_dataset_with_specified_data_var(self):
result = _keep_single_var(self.ds_mod, key="ts")
expected = self.ds.copy()
assert result.identical(expected)
assert not result.identical(self.ds_mod)
def test_bounds_always_persist(self):
ds = _keep_single_var(self.ds_mod, key="ts")
assert ds.get("lat_bnds") is not None
assert ds.get("lon_bnds") is not None
assert ds.get("time_bnds") is not None
class TestPreProcessNonCFDataset:
@pytest.fixture(autouse=True)
def setup(self):
self.ds = generate_dataset(cf_compliant=False, has_bounds=True)
def test_user_specified_callable_results_in_subsetting_dataset_on_time_slice(self):
def callable(ds):
return ds.isel(time=slice(0, 1))
ds = self.ds.copy()
result = _preprocess_non_cf_dataset(ds, callable)
expected = ds.copy().isel(time=slice(0, 1))
expected["time"] = xr.DataArray(
name="time",
data=np.array(
["2000-01-01"],
dtype="datetime64",
),
dims=["time"],
)
expected["time_bnds"] = xr.DataArray(
name="time_bnds",
data=np.array(
[["1999-12-01", "2000-01-01"]],
dtype="datetime64",
),
dims=["time", "bnds"],
)
expected.time.attrs = ds.time.attrs
expected.time_bnds.attrs = ds.time_bnds.attrs
assert result.identical(expected)
class TestSplitTimeUnitsAttr:
def test_raises_error_if_units_attr_is_none(self):
with pytest.raises(KeyError):
_split_time_units_attr(None) # type: ignore
def test_splits_units_attr_to_unit_and_reference_date(self):
assert _split_time_units_attr("months since 1800") == ("months", "1800")
assert _split_time_units_attr("months since 1800-01-01") == (
"months",
"1800-01-01",
)
assert _split_time_units_attr("months since 1800-01-01 00:00:00") == (
"months",
"1800-01-01 00:00:00",
)
| [
"numpy.array",
"xcdat.dataset._postprocess_dataset",
"pytest.fixture",
"xcdat.dataset.open_mfdataset",
"pathlib.Path",
"numpy.datetime64",
"warnings.simplefilter",
"numpy.dtype",
"xcdat.dataset._preprocess_non_cf_dataset",
"xarray.Dataset",
"xcdat.dataset.open_dataset",
"xcdat.dataset.decode_n... | [((413, 465), 'xcdat.logger.setup_custom_logger', 'setup_custom_logger', (['"""xcdat.dataset"""'], {'propagate': '(True)'}), "('xcdat.dataset', propagate=True)\n", (432, 465), False, 'from xcdat.logger import setup_custom_logger\n'), ((496, 524), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (510, 524), False, 'import pytest\n'), ((5703, 5731), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (5717, 5731), False, 'import pytest\n'), ((11023, 11051), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (11037, 11051), False, 'import pytest\n'), ((14609, 14637), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (14623, 14637), False, 'import pytest\n'), ((25700, 25728), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (25714, 25728), False, 'import pytest\n'), ((32972, 33000), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (32986, 33000), False, 'import pytest\n'), ((34307, 34335), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (34321, 34335), False, 'import pytest\n'), ((779, 832), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(False)', 'has_bounds': '(True)'}), '(cf_compliant=False, has_bounds=True)\n', (795, 832), False, 'from tests.fixtures import generate_dataset\n'), ((888, 936), 'xcdat.dataset.open_dataset', 'open_dataset', (['self.file_path'], {'decode_times': '(False)'}), '(self.file_path, decode_times=False)\n', (900, 936), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((956, 1009), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(False)', 'has_bounds': '(True)'}), '(cf_compliant=False, has_bounds=True)\n', (972, 1009), False, 'from tests.fixtures import generate_dataset\n'), ((1119, 1173), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(False)', 'has_bounds': '(False)'}), '(cf_compliant=False, has_bounds=False)\n', (1135, 1173), False, 'from tests.fixtures import generate_dataset\n'), ((1229, 1272), 'xcdat.dataset.open_dataset', 'open_dataset', (['self.file_path'], {'data_var': '"""ts"""'}), "(self.file_path, data_var='ts')\n", (1241, 1272), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((1374, 1426), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(True)'}), '(cf_compliant=True, has_bounds=True)\n', (1390, 1426), False, 'from tests.fixtures import generate_dataset\n'), ((1456, 2013), 'numpy.array', 'np.array', (["['2000-01-01T00:00:00.000000000', '2000-02-01T00:00:00.000000000',\n '2000-03-01T00:00:00.000000000', '2000-04-01T00:00:00.000000000',\n '2000-05-01T00:00:00.000000000', '2000-06-01T00:00:00.000000000',\n '2000-07-01T00:00:00.000000000', '2000-08-01T00:00:00.000000000',\n '2000-09-01T00:00:00.000000000', '2000-10-01T00:00:00.000000000',\n '2000-11-01T00:00:00.000000000', '2000-12-01T00:00:00.000000000',\n '2001-01-01T00:00:00.000000000', '2001-02-01T00:00:00.000000000',\n '2001-03-01T00:00:00.000000000']"], {'dtype': '"""datetime64[ns]"""'}), "(['2000-01-01T00:00:00.000000000', '2000-02-01T00:00:00.000000000',\n '2000-03-01T00:00:00.000000000', '2000-04-01T00:00:00.000000000',\n '2000-05-01T00:00:00.000000000', '2000-06-01T00:00:00.000000000',\n '2000-07-01T00:00:00.000000000', '2000-08-01T00:00:00.000000000',\n '2000-09-01T00:00:00.000000000', '2000-10-01T00:00:00.000000000',\n '2000-11-01T00:00:00.000000000', '2000-12-01T00:00:00.000000000',\n '2001-01-01T00:00:00.000000000', '2001-02-01T00:00:00.000000000',\n '2001-03-01T00:00:00.000000000'], dtype='datetime64[ns]')\n", (1464, 2013), True, 'import numpy as np\n'), ((2303, 2525), 'xarray.DataArray', 'xr.DataArray', ([], {'name': '"""time"""', 'data': 'expected_time_data', 'dims': '"""time"""', 'attrs': "{'units': 'months since 2000-01-01', 'calendar': 'standard', 'axis': 'T',\n 'long_name': 'time', 'standard_name': 'time', 'bounds': 'time_bnds'}"}), "(name='time', data=expected_time_data, dims='time', attrs={\n 'units': 'months since 2000-01-01', 'calendar': 'standard', 'axis': 'T',\n 'long_name': 'time', 'standard_name': 'time', 'bounds': 'time_bnds'})\n", (2315, 2525), True, 'import xarray as xr\n'), ((2724, 3851), 'numpy.array', 'np.array', (["[['1999-12-16T12:00:00.000000000', '2000-01-16T12:00:00.000000000'], [\n '2000-01-16T12:00:00.000000000', '2000-02-15T12:00:00.000000000'], [\n '2000-02-15T12:00:00.000000000', '2000-03-16T12:00:00.000000000'], [\n '2000-03-16T12:00:00.000000000', '2000-04-16T00:00:00.000000000'], [\n '2000-04-16T00:00:00.000000000', '2000-05-16T12:00:00.000000000'], [\n '2000-05-16T12:00:00.000000000', '2000-06-16T00:00:00.000000000'], [\n '2000-06-16T00:00:00.000000000', '2000-07-16T12:00:00.000000000'], [\n '2000-07-16T12:00:00.000000000', '2000-08-16T12:00:00.000000000'], [\n '2000-08-16T12:00:00.000000000', '2000-09-16T00:00:00.000000000'], [\n '2000-09-16T00:00:00.000000000', '2000-10-16T12:00:00.000000000'], [\n '2000-10-16T12:00:00.000000000', '2000-11-16T00:00:00.000000000'], [\n '2000-11-16T00:00:00.000000000', '2000-12-16T12:00:00.000000000'], [\n '2000-12-16T12:00:00.000000000', '2001-01-16T12:00:00.000000000'], [\n '2001-01-16T12:00:00.000000000', '2001-02-15T00:00:00.000000000'], [\n '2001-02-15T00:00:00.000000000', '2001-03-15T00:00:00.000000000']]"], {'dtype': '"""datetime64[ns]"""'}), "([['1999-12-16T12:00:00.000000000', '2000-01-16T12:00:00.000000000'\n ], ['2000-01-16T12:00:00.000000000', '2000-02-15T12:00:00.000000000'],\n ['2000-02-15T12:00:00.000000000', '2000-03-16T12:00:00.000000000'], [\n '2000-03-16T12:00:00.000000000', '2000-04-16T00:00:00.000000000'], [\n '2000-04-16T00:00:00.000000000', '2000-05-16T12:00:00.000000000'], [\n '2000-05-16T12:00:00.000000000', '2000-06-16T00:00:00.000000000'], [\n '2000-06-16T00:00:00.000000000', '2000-07-16T12:00:00.000000000'], [\n '2000-07-16T12:00:00.000000000', '2000-08-16T12:00:00.000000000'], [\n '2000-08-16T12:00:00.000000000', '2000-09-16T00:00:00.000000000'], [\n '2000-09-16T00:00:00.000000000', '2000-10-16T12:00:00.000000000'], [\n '2000-10-16T12:00:00.000000000', '2000-11-16T00:00:00.000000000'], [\n '2000-11-16T00:00:00.000000000', '2000-12-16T12:00:00.000000000'], [\n '2000-12-16T12:00:00.000000000', '2001-01-16T12:00:00.000000000'], [\n '2001-01-16T12:00:00.000000000', '2001-02-15T00:00:00.000000000'], [\n '2001-02-15T00:00:00.000000000', '2001-03-15T00:00:00.000000000']],\n dtype='datetime64[ns]')\n", (2732, 3851), True, 'import numpy as np\n'), ((4608, 4660), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(True)'}), '(cf_compliant=True, has_bounds=True)\n', (4624, 4660), False, 'from tests.fixtures import generate_dataset\n'), ((4927, 4970), 'xcdat.dataset.open_dataset', 'open_dataset', (['self.file_path'], {'data_var': '"""ts"""'}), "(self.file_path, data_var='ts')\n", (4939, 4970), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((5097, 5149), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(True)'}), '(cf_compliant=True, has_bounds=True)\n', (5113, 5149), False, 'from tests.fixtures import generate_dataset\n'), ((5555, 5598), 'xcdat.dataset.open_dataset', 'open_dataset', (['self.file_path'], {'data_var': '"""ts"""'}), "(self.file_path, data_var='ts')\n", (5567, 5598), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((6033, 6086), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(False)', 'has_bounds': '(True)'}), '(cf_compliant=False, has_bounds=True)\n', (6049, 6086), False, 'from tests.fixtures import generate_dataset\n'), ((6140, 6193), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(False)', 'has_bounds': '(True)'}), '(cf_compliant=False, has_bounds=True)\n', (6156, 6193), False, 'from tests.fixtures import generate_dataset\n'), ((6296, 6366), 'xcdat.dataset.open_mfdataset', 'open_mfdataset', (['[self.file_path1, self.file_path2]'], {'decode_times': '(False)'}), '([self.file_path1, self.file_path2], decode_times=False)\n', (6310, 6366), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((6512, 6566), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(False)', 'has_bounds': '(False)'}), '(cf_compliant=False, has_bounds=False)\n', (6528, 6566), False, 'from tests.fixtures import generate_dataset\n'), ((6581, 6635), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(False)', 'has_bounds': '(False)'}), '(cf_compliant=False, has_bounds=False)\n', (6597, 6635), False, 'from tests.fixtures import generate_dataset\n'), ((6778, 6843), 'xcdat.dataset.open_mfdataset', 'open_mfdataset', (['[self.file_path1, self.file_path2]'], {'data_var': '"""ts"""'}), "([self.file_path1, self.file_path2], data_var='ts')\n", (6792, 6843), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((7036, 7088), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(True)'}), '(cf_compliant=True, has_bounds=True)\n', (7052, 7088), False, 'from tests.fixtures import generate_dataset\n'), ((7118, 7675), 'numpy.array', 'np.array', (["['2000-01-01T00:00:00.000000000', '2000-02-01T00:00:00.000000000',\n '2000-03-01T00:00:00.000000000', '2000-04-01T00:00:00.000000000',\n '2000-05-01T00:00:00.000000000', '2000-06-01T00:00:00.000000000',\n '2000-07-01T00:00:00.000000000', '2000-08-01T00:00:00.000000000',\n '2000-09-01T00:00:00.000000000', '2000-10-01T00:00:00.000000000',\n '2000-11-01T00:00:00.000000000', '2000-12-01T00:00:00.000000000',\n '2001-01-01T00:00:00.000000000', '2001-02-01T00:00:00.000000000',\n '2001-03-01T00:00:00.000000000']"], {'dtype': '"""datetime64[ns]"""'}), "(['2000-01-01T00:00:00.000000000', '2000-02-01T00:00:00.000000000',\n '2000-03-01T00:00:00.000000000', '2000-04-01T00:00:00.000000000',\n '2000-05-01T00:00:00.000000000', '2000-06-01T00:00:00.000000000',\n '2000-07-01T00:00:00.000000000', '2000-08-01T00:00:00.000000000',\n '2000-09-01T00:00:00.000000000', '2000-10-01T00:00:00.000000000',\n '2000-11-01T00:00:00.000000000', '2000-12-01T00:00:00.000000000',\n '2001-01-01T00:00:00.000000000', '2001-02-01T00:00:00.000000000',\n '2001-03-01T00:00:00.000000000'], dtype='datetime64[ns]')\n", (7126, 7675), True, 'import numpy as np\n'), ((7965, 8187), 'xarray.DataArray', 'xr.DataArray', ([], {'name': '"""time"""', 'data': 'expected_time_data', 'dims': '"""time"""', 'attrs': "{'units': 'months since 2000-01-01', 'calendar': 'standard', 'axis': 'T',\n 'long_name': 'time', 'standard_name': 'time', 'bounds': 'time_bnds'}"}), "(name='time', data=expected_time_data, dims='time', attrs={\n 'units': 'months since 2000-01-01', 'calendar': 'standard', 'axis': 'T',\n 'long_name': 'time', 'standard_name': 'time', 'bounds': 'time_bnds'})\n", (7977, 8187), True, 'import xarray as xr\n'), ((8386, 9513), 'numpy.array', 'np.array', (["[['1999-12-16T12:00:00.000000000', '2000-01-16T12:00:00.000000000'], [\n '2000-01-16T12:00:00.000000000', '2000-02-15T12:00:00.000000000'], [\n '2000-02-15T12:00:00.000000000', '2000-03-16T12:00:00.000000000'], [\n '2000-03-16T12:00:00.000000000', '2000-04-16T00:00:00.000000000'], [\n '2000-04-16T00:00:00.000000000', '2000-05-16T12:00:00.000000000'], [\n '2000-05-16T12:00:00.000000000', '2000-06-16T00:00:00.000000000'], [\n '2000-06-16T00:00:00.000000000', '2000-07-16T12:00:00.000000000'], [\n '2000-07-16T12:00:00.000000000', '2000-08-16T12:00:00.000000000'], [\n '2000-08-16T12:00:00.000000000', '2000-09-16T00:00:00.000000000'], [\n '2000-09-16T00:00:00.000000000', '2000-10-16T12:00:00.000000000'], [\n '2000-10-16T12:00:00.000000000', '2000-11-16T00:00:00.000000000'], [\n '2000-11-16T00:00:00.000000000', '2000-12-16T12:00:00.000000000'], [\n '2000-12-16T12:00:00.000000000', '2001-01-16T12:00:00.000000000'], [\n '2001-01-16T12:00:00.000000000', '2001-02-15T00:00:00.000000000'], [\n '2001-02-15T00:00:00.000000000', '2001-03-15T00:00:00.000000000']]"], {'dtype': '"""datetime64[ns]"""'}), "([['1999-12-16T12:00:00.000000000', '2000-01-16T12:00:00.000000000'\n ], ['2000-01-16T12:00:00.000000000', '2000-02-15T12:00:00.000000000'],\n ['2000-02-15T12:00:00.000000000', '2000-03-16T12:00:00.000000000'], [\n '2000-03-16T12:00:00.000000000', '2000-04-16T00:00:00.000000000'], [\n '2000-04-16T00:00:00.000000000', '2000-05-16T12:00:00.000000000'], [\n '2000-05-16T12:00:00.000000000', '2000-06-16T00:00:00.000000000'], [\n '2000-06-16T00:00:00.000000000', '2000-07-16T12:00:00.000000000'], [\n '2000-07-16T12:00:00.000000000', '2000-08-16T12:00:00.000000000'], [\n '2000-08-16T12:00:00.000000000', '2000-09-16T00:00:00.000000000'], [\n '2000-09-16T00:00:00.000000000', '2000-10-16T12:00:00.000000000'], [\n '2000-10-16T12:00:00.000000000', '2000-11-16T00:00:00.000000000'], [\n '2000-11-16T00:00:00.000000000', '2000-12-16T12:00:00.000000000'], [\n '2000-12-16T12:00:00.000000000', '2001-01-16T12:00:00.000000000'], [\n '2001-01-16T12:00:00.000000000', '2001-02-15T00:00:00.000000000'], [\n '2001-02-15T00:00:00.000000000', '2001-03-15T00:00:00.000000000']],\n dtype='datetime64[ns]')\n", (8394, 9513), True, 'import numpy as np\n'), ((10248, 10300), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(True)'}), '(cf_compliant=True, has_bounds=True)\n', (10264, 10300), False, 'from tests.fixtures import generate_dataset\n'), ((10315, 10367), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(True)'}), '(cf_compliant=True, has_bounds=True)\n', (10331, 10367), False, 'from tests.fixtures import generate_dataset\n'), ((10724, 10789), 'xcdat.dataset.open_mfdataset', 'open_mfdataset', (['[self.file_path1, self.file_path2]'], {'data_var': '"""ts"""'}), "([self.file_path1, self.file_path2], data_var='ts')\n", (10738, 10789), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((10891, 10943), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(True)'}), '(cf_compliant=True, has_bounds=True)\n', (10907, 10943), False, 'from tests.fixtures import generate_dataset\n'), ((11412, 11466), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(False)', 'has_bounds': '(False)'}), '(cf_compliant=False, has_bounds=False)\n', (11428, 11466), False, 'from tests.fixtures import generate_dataset\n'), ((11522, 11560), 'xcdat.dataset._has_cf_compliant_time', '_has_cf_compliant_time', (['self.file_path'], {}), '(self.file_path)\n', (11544, 11560), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((11778, 11831), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(False)'}), '(cf_compliant=True, has_bounds=False)\n', (11794, 11831), False, 'from tests.fixtures import generate_dataset\n'), ((12043, 12081), 'xcdat.dataset._has_cf_compliant_time', '_has_cf_compliant_time', (['self.file_path'], {}), '(self.file_path)\n', (12065, 12081), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((12292, 12345), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(False)'}), '(cf_compliant=True, has_bounds=False)\n', (12308, 12345), False, 'from tests.fixtures import generate_dataset\n'), ((12401, 12443), 'xcdat.dataset._has_cf_compliant_time', '_has_cf_compliant_time', (['f"""{self.dir}/*.nc"""'], {}), "(f'{self.dir}/*.nc')\n", (12423, 12443), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((12663, 12716), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(False)'}), '(cf_compliant=True, has_bounds=False)\n', (12679, 12716), False, 'from tests.fixtures import generate_dataset\n'), ((12837, 12866), 'xcdat.dataset._has_cf_compliant_time', '_has_cf_compliant_time', (['flist'], {}), '(flist)\n', (12859, 12866), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((13088, 13141), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(False)'}), '(cf_compliant=True, has_bounds=False)\n', (13104, 13141), False, 'from tests.fixtures import generate_dataset\n'), ((13197, 13235), 'xcdat.dataset._has_cf_compliant_time', '_has_cf_compliant_time', (['self.file_path'], {}), '(self.file_path)\n', (13219, 13235), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((13476, 13529), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(False)'}), '(cf_compliant=True, has_bounds=False)\n', (13492, 13529), False, 'from tests.fixtures import generate_dataset\n'), ((13889, 13942), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(False)'}), '(cf_compliant=True, has_bounds=False)\n', (13905, 13942), False, 'from tests.fixtures import generate_dataset\n'), ((13998, 14038), 'xcdat.dataset._has_cf_compliant_time', '_has_cf_compliant_time', (['[self.file_path]'], {}), '([self.file_path])\n', (14020, 14038), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((14296, 14349), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(False)'}), '(cf_compliant=True, has_bounds=False)\n', (14312, 14349), False, 'from tests.fixtures import generate_dataset\n'), ((14674, 14850), 'xarray.DataArray', 'xr.DataArray', ([], {'name': '"""time"""', 'data': '[1, 2, 3]', 'dims': "['time']", 'attrs': "{'bounds': 'time_bnds', 'axis': 'T', 'long_name': 'time', 'standard_name':\n 'time', 'calendar': 'noleap'}"}), "(name='time', data=[1, 2, 3], dims=['time'], attrs={'bounds':\n 'time_bnds', 'axis': 'T', 'long_name': 'time', 'standard_name': 'time',\n 'calendar': 'noleap'})\n", (14686, 14850), True, 'import xarray as xr\n'), ((15017, 15105), 'xarray.DataArray', 'xr.DataArray', ([], {'name': '"""time_bnds"""', 'data': '[[0, 1], [1, 2], [2, 3]]', 'dims': "['time', 'bnds']"}), "(name='time_bnds', data=[[0, 1], [1, 2], [2, 3]], dims=['time',\n 'bnds'])\n", (15029, 15105), True, 'import xarray as xr\n'), ((15506, 15556), 'xarray.Dataset', 'xr.Dataset', (["{'time': time, 'time_bnds': time_bnds}"], {}), "({'time': time, 'time_bnds': time_bnds})\n", (15516, 15556), True, 'import xarray as xr\n'), ((15681, 15733), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(True)'}), '(cf_compliant=True, has_bounds=True)\n', (15697, 15733), False, 'from tests.fixtures import generate_dataset\n'), ((15997, 16019), 'xcdat.dataset.decode_non_cf_time', 'decode_non_cf_time', (['ds'], {}), '(ds)\n', (16015, 16019), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((17638, 17660), 'xcdat.dataset.decode_non_cf_time', 'decode_non_cf_time', (['ds'], {}), '(ds)\n', (17656, 17660), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((19276, 19298), 'xcdat.dataset.decode_non_cf_time', 'decode_non_cf_time', (['ds'], {}), '(ds)\n', (19294, 19298), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((20905, 20927), 'xcdat.dataset.decode_non_cf_time', 'decode_non_cf_time', (['ds'], {}), '(ds)\n', (20923, 20927), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((22543, 22565), 'xcdat.dataset.decode_non_cf_time', 'decode_non_cf_time', (['ds'], {}), '(ds)\n', (22561, 22565), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((24170, 24192), 'xcdat.dataset.decode_non_cf_time', 'decode_non_cf_time', (['ds'], {}), '(ds)\n', (24188, 24192), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((25768, 25820), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(True)'}), '(cf_compliant=True, has_bounds=True)\n', (25784, 25820), False, 'from tests.fixtures import generate_dataset\n'), ((25875, 25927), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(True)'}), '(cf_compliant=True, has_bounds=True)\n', (25891, 25927), False, 'from tests.fixtures import generate_dataset\n'), ((26081, 26120), 'xcdat.dataset._postprocess_dataset', '_postprocess_dataset', (['ds'], {'data_var': '"""ts"""'}), "(ds, data_var='ts')\n", (26101, 26120), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((26239, 26291), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(True)'}), '(cf_compliant=True, has_bounds=True)\n', (26255, 26291), False, 'from tests.fixtures import generate_dataset\n'), ((26319, 26876), 'numpy.array', 'np.array', (["['2000-01-31T12:00:00.000000000', '2000-02-29T12:00:00.000000000',\n '2000-03-31T12:00:00.000000000', '2000-04-30T00:00:00.000000000',\n '2000-05-31T12:00:00.000000000', '2000-06-30T00:00:00.000000000',\n '2000-07-31T12:00:00.000000000', '2000-08-31T12:00:00.000000000',\n '2000-09-30T00:00:00.000000000', '2000-10-16T12:00:00.000000000',\n '2000-11-30T00:00:00.000000000', '2000-12-31T12:00:00.000000000',\n '2001-01-31T12:00:00.000000000', '2001-02-28T00:00:00.000000000',\n '2001-12-31T12:00:00.000000000']"], {'dtype': '"""datetime64[ns]"""'}), "(['2000-01-31T12:00:00.000000000', '2000-02-29T12:00:00.000000000',\n '2000-03-31T12:00:00.000000000', '2000-04-30T00:00:00.000000000',\n '2000-05-31T12:00:00.000000000', '2000-06-30T00:00:00.000000000',\n '2000-07-31T12:00:00.000000000', '2000-08-31T12:00:00.000000000',\n '2000-09-30T00:00:00.000000000', '2000-10-16T12:00:00.000000000',\n '2000-11-30T00:00:00.000000000', '2000-12-31T12:00:00.000000000',\n '2001-01-31T12:00:00.000000000', '2001-02-28T00:00:00.000000000',\n '2001-12-31T12:00:00.000000000'], dtype='datetime64[ns]')\n", (26327, 26876), True, 'import numpy as np\n'), ((27533, 27576), 'xcdat.dataset._postprocess_dataset', '_postprocess_dataset', (['ds'], {'center_times': '(True)'}), '(ds, center_times=True)\n', (27553, 27576), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((27635, 28192), 'numpy.array', 'np.array', (["['2000-01-16T12:00:00.000000000', '2000-02-15T12:00:00.000000000',\n '2000-03-16T12:00:00.000000000', '2000-04-16T00:00:00.000000000',\n '2000-05-16T12:00:00.000000000', '2000-06-16T00:00:00.000000000',\n '2000-07-16T12:00:00.000000000', '2000-08-16T12:00:00.000000000',\n '2000-09-16T00:00:00.000000000', '2000-10-16T12:00:00.000000000',\n '2000-11-16T00:00:00.000000000', '2000-12-16T12:00:00.000000000',\n '2001-01-16T12:00:00.000000000', '2001-02-15T00:00:00.000000000',\n '2001-12-16T12:00:00.000000000']"], {'dtype': '"""datetime64[ns]"""'}), "(['2000-01-16T12:00:00.000000000', '2000-02-15T12:00:00.000000000',\n '2000-03-16T12:00:00.000000000', '2000-04-16T00:00:00.000000000',\n '2000-05-16T12:00:00.000000000', '2000-06-16T00:00:00.000000000',\n '2000-07-16T12:00:00.000000000', '2000-08-16T12:00:00.000000000',\n '2000-09-16T00:00:00.000000000', '2000-10-16T12:00:00.000000000',\n '2000-11-16T00:00:00.000000000', '2000-12-16T12:00:00.000000000',\n '2001-01-16T12:00:00.000000000', '2001-02-15T00:00:00.000000000',\n '2001-12-16T12:00:00.000000000'], dtype='datetime64[ns]')\n", (27643, 28192), True, 'import numpy as np\n'), ((29716, 29769), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(False)'}), '(cf_compliant=True, has_bounds=False)\n', (29732, 29769), False, 'from tests.fixtures import generate_dataset\n'), ((30017, 30070), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(False)'}), '(cf_compliant=True, has_bounds=False)\n', (30033, 30070), False, 'from tests.fixtures import generate_dataset\n'), ((30222, 30263), 'xcdat.dataset._postprocess_dataset', '_postprocess_dataset', (['ds'], {'add_bounds': '(True)'}), '(ds, add_bounds=True)\n', (30242, 30263), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((31514, 31615), 'xcdat.dataset._postprocess_dataset', '_postprocess_dataset', (['ds'], {'data_var': 'None', 'center_times': '(False)', 'add_bounds': '(True)', 'lon_orient': '(0, 360)'}), '(ds, data_var=None, center_times=False, add_bounds=True,\n lon_orient=(0, 360))\n', (31534, 31615), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((32753, 32806), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(False)'}), '(cf_compliant=True, has_bounds=False)\n', (32769, 32806), False, 'from tests.fixtures import generate_dataset\n'), ((33040, 33092), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(True)', 'has_bounds': '(True)'}), '(cf_compliant=True, has_bounds=True)\n', (33056, 33092), False, 'from tests.fixtures import generate_dataset\n'), ((33865, 33904), 'xcdat.dataset._keep_single_var', '_keep_single_var', (['self.ds_mod'], {'key': '"""ts"""'}), "(self.ds_mod, key='ts')\n", (33881, 33904), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((34087, 34126), 'xcdat.dataset._keep_single_var', '_keep_single_var', (['self.ds_mod'], {'key': '"""ts"""'}), "(self.ds_mod, key='ts')\n", (34103, 34126), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((34375, 34428), 'tests.fixtures.generate_dataset', 'generate_dataset', ([], {'cf_compliant': '(False)', 'has_bounds': '(True)'}), '(cf_compliant=False, has_bounds=True)\n', (34391, 34428), False, 'from tests.fixtures import generate_dataset\n'), ((34636, 34676), 'xcdat.dataset._preprocess_non_cf_dataset', '_preprocess_non_cf_dataset', (['ds', 'callable'], {}), '(ds, callable)\n', (34662, 34676), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((4256, 4274), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (4264, 4274), True, 'import numpy as np\n'), ((4797, 4822), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (4820, 4822), False, 'import warnings\n'), ((4836, 4867), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (4857, 4867), False, 'import warnings\n'), ((5421, 5446), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (5444, 5446), False, 'import warnings\n'), ((5460, 5491), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (5481, 5491), False, 'import warnings\n'), ((9918, 9936), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (9926, 9936), True, 'import numpy as np\n'), ((10549, 10574), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (10572, 10574), False, 'import warnings\n'), ((10588, 10619), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (10609, 10619), False, 'import warnings\n'), ((13608, 13636), 'pathlib.Path', 'pathlib.Path', (['self.file_path'], {}), '(self.file_path)\n', (13620, 13636), False, 'import pathlib\n'), ((15457, 15476), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (15465, 15476), True, 'import numpy as np\n'), ((15748, 15771), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (15761, 15771), False, 'import pytest\n'), ((15785, 15807), 'xcdat.dataset.decode_non_cf_time', 'decode_non_cf_time', (['ds'], {}), '(ds)\n', (15803, 15807), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((17072, 17090), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (17080, 17090), True, 'import numpy as np\n'), ((18713, 18731), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (18721, 18731), True, 'import numpy as np\n'), ((20351, 20369), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (20359, 20369), True, 'import numpy as np\n'), ((21980, 21998), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (21988, 21998), True, 'import numpy as np\n'), ((23618, 23636), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (23626, 23636), True, 'import numpy as np\n'), ((25287, 25305), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (25295, 25305), True, 'import numpy as np\n'), ((27259, 27277), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (27267, 27277), True, 'import numpy as np\n'), ((29073, 29090), 'numpy.dtype', 'np.dtype', (['"""int64"""'], {}), "('int64')\n", (29081, 29090), True, 'import numpy as np\n'), ((29818, 29843), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (29831, 29843), False, 'import pytest\n'), ((29857, 29900), 'xcdat.dataset._postprocess_dataset', '_postprocess_dataset', (['ds'], {'center_times': '(True)'}), '(ds, center_times=True)\n', (29877, 29900), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((32855, 32880), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (32868, 32880), False, 'import pytest\n'), ((32894, 32939), 'xcdat.dataset._postprocess_dataset', '_postprocess_dataset', (['ds'], {'lon_orient': '(0, 360)'}), '(ds, lon_orient=(0, 360))\n', (32914, 32939), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((33327, 33352), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (33340, 33352), False, 'import pytest\n'), ((33366, 33396), 'xcdat.dataset._keep_single_var', '_keep_single_var', (['ds'], {'key': '"""ts"""'}), "(ds, key='ts')\n", (33382, 33396), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((33514, 33539), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (33527, 33539), False, 'import pytest\n'), ((33553, 33592), 'xcdat.dataset._keep_single_var', '_keep_single_var', (['ds'], {'key': '"""nonexistent"""'}), "(ds, key='nonexistent')\n", (33569, 33592), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((33711, 33736), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (33724, 33736), False, 'import pytest\n'), ((33750, 33786), 'xcdat.dataset._keep_single_var', '_keep_single_var', (['ds'], {'key': '"""lat_bnds"""'}), "(ds, key='lat_bnds')\n", (33766, 33786), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((35430, 35453), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (35443, 35453), False, 'import pytest\n'), ((35467, 35495), 'xcdat.dataset._split_time_units_attr', '_split_time_units_attr', (['None'], {}), '(None)\n', (35489, 35495), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((35593, 35636), 'xcdat.dataset._split_time_units_attr', '_split_time_units_attr', (['"""months since 1800"""'], {}), "('months since 1800')\n", (35615, 35636), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((35674, 35723), 'xcdat.dataset._split_time_units_attr', '_split_time_units_attr', (['"""months since 1800-01-01"""'], {}), "('months since 1800-01-01')\n", (35696, 35723), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((35802, 35860), 'xcdat.dataset._split_time_units_attr', '_split_time_units_attr', (['"""months since 1800-01-01 00:00:00"""'], {}), "('months since 1800-01-01 00:00:00')\n", (35824, 35860), False, 'from xcdat.dataset import _has_cf_compliant_time, _keep_single_var, _postprocess_dataset, _preprocess_non_cf_dataset, _split_time_units_attr, decode_non_cf_time, open_dataset, open_mfdataset\n'), ((28536, 28734), 'xarray.DataArray', 'xr.DataArray', ([], {'name': '"""time"""', 'data': 'expected_time_data', 'coords': "{'time': expected_time_data}", 'dims': '"""time"""', 'attrs': "{'long_name': 'time', 'standard_name': 'time', 'axis': 'T', 'bounds':\n 'time_bnds'}"}), "(name='time', data=expected_time_data, coords={'time':\n expected_time_data}, dims='time', attrs={'long_name': 'time',\n 'standard_name': 'time', 'axis': 'T', 'bounds': 'time_bnds'})\n", (28548, 28734), True, 'import xarray as xr\n'), ((34812, 34856), 'numpy.array', 'np.array', (["['2000-01-01']"], {'dtype': '"""datetime64"""'}), "(['2000-01-01'], dtype='datetime64')\n", (34820, 34856), True, 'import numpy as np\n'), ((35035, 35095), 'numpy.array', 'np.array', (["[['1999-12-01', '2000-01-01']]"], {'dtype': '"""datetime64"""'}), "([['1999-12-01', '2000-01-01']], dtype='datetime64')\n", (35043, 35095), True, 'import numpy as np\n'), ((14430, 14458), 'pathlib.Path', 'pathlib.Path', (['self.file_path'], {}), '(self.file_path)\n', (14442, 14458), False, 'import pathlib\n'), ((16161, 16233), 'numpy.array', 'np.array', (["['2000-02-01', '2000-03-01', '2000-04-01']"], {'dtype': '"""datetime64"""'}), "(['2000-02-01', '2000-03-01', '2000-04-01'], dtype='datetime64')\n", (16169, 16233), True, 'import numpy as np\n'), ((16507, 16632), 'numpy.array', 'np.array', (["[['2000-01-01', '2000-02-01'], ['2000-02-01', '2000-03-01'], ['2000-03-01',\n '2000-04-01']]"], {'dtype': '"""datetime64"""'}), "([['2000-01-01', '2000-02-01'], ['2000-02-01', '2000-03-01'], [\n '2000-03-01', '2000-04-01']], dtype='datetime64')\n", (16515, 16632), True, 'import numpy as np\n'), ((17802, 17874), 'numpy.array', 'np.array', (["['2000-02-15', '2000-03-15', '2000-04-15']"], {'dtype': '"""datetime64"""'}), "(['2000-02-15', '2000-03-15', '2000-04-15'], dtype='datetime64')\n", (17810, 17874), True, 'import numpy as np\n'), ((18148, 18273), 'numpy.array', 'np.array', (["[['2000-01-15', '2000-02-15'], ['2000-02-15', '2000-03-15'], ['2000-03-15',\n '2000-04-15']]"], {'dtype': '"""datetime64"""'}), "([['2000-01-15', '2000-02-15'], ['2000-02-15', '2000-03-15'], [\n '2000-03-15', '2000-04-15']], dtype='datetime64')\n", (18156, 18273), True, 'import numpy as np\n'), ((19440, 19512), 'numpy.array', 'np.array', (["['2000-01-31', '2000-02-29', '2000-03-31']"], {'dtype': '"""datetime64"""'}), "(['2000-01-31', '2000-02-29', '2000-03-31'], dtype='datetime64')\n", (19448, 19512), True, 'import numpy as np\n'), ((19786, 19911), 'numpy.array', 'np.array', (["[['1999-12-31', '2000-01-31'], ['2000-01-31', '2000-02-29'], ['2000-02-29',\n '2000-03-31']]"], {'dtype': '"""datetime64"""'}), "([['1999-12-31', '2000-01-31'], ['2000-01-31', '2000-02-29'], [\n '2000-02-29', '2000-03-31']], dtype='datetime64')\n", (19794, 19911), True, 'import numpy as np\n'), ((21069, 21141), 'numpy.array', 'np.array', (["['2000-03-29', '2000-04-29', '2000-05-29']"], {'dtype': '"""datetime64"""'}), "(['2000-03-29', '2000-04-29', '2000-05-29'], dtype='datetime64')\n", (21077, 21141), True, 'import numpy as np\n'), ((21415, 21540), 'numpy.array', 'np.array', (["[['2000-02-29', '2000-03-29'], ['2000-03-29', '2000-04-29'], ['2000-04-29',\n '2000-05-29']]"], {'dtype': '"""datetime64"""'}), "([['2000-02-29', '2000-03-29'], ['2000-03-29', '2000-04-29'], [\n '2000-04-29', '2000-05-29']], dtype='datetime64')\n", (21423, 21540), True, 'import numpy as np\n'), ((22707, 22779), 'numpy.array', 'np.array', (["['2001-06-01', '2002-06-01', '2003-06-01']"], {'dtype': '"""datetime64"""'}), "(['2001-06-01', '2002-06-01', '2003-06-01'], dtype='datetime64')\n", (22715, 22779), True, 'import numpy as np\n'), ((23053, 23178), 'numpy.array', 'np.array', (["[['2000-06-01', '2001-06-01'], ['2001-06-01', '2002-06-01'], ['2002-06-01',\n '2003-06-01']]"], {'dtype': '"""datetime64"""'}), "([['2000-06-01', '2001-06-01'], ['2001-06-01', '2002-06-01'], [\n '2002-06-01', '2003-06-01']], dtype='datetime64')\n", (23061, 23178), True, 'import numpy as np\n'), ((24678, 24803), 'numpy.array', 'np.array', (["[['2000-02-29', '2001-02-28'], ['2001-02-28', '2002-02-28'], ['2002-02-28',\n '2003-02-28']]"], {'dtype': '"""datetime64"""'}), "([['2000-02-29', '2001-02-28'], ['2001-02-28', '2002-02-28'], [\n '2002-02-28', '2003-02-28']], dtype='datetime64')\n", (24686, 24803), True, 'import numpy as np\n'), ((24360, 24387), 'numpy.datetime64', 'np.datetime64', (['"""2001-02-28"""'], {}), "('2001-02-28')\n", (24373, 24387), True, 'import numpy as np\n'), ((24413, 24440), 'numpy.datetime64', 'np.datetime64', (['"""2002-02-28"""'], {}), "('2002-02-28')\n", (24426, 24440), True, 'import numpy as np\n'), ((24466, 24493), 'numpy.datetime64', 'np.datetime64', (['"""2003-02-28"""'], {}), "('2003-02-28')\n", (24479, 24493), True, 'import numpy as np\n'), ((31780, 31828), 'numpy.array', 'np.array', (['[0.0, 1.0, 179.0, 180.0, 359.0, 360.0]'], {}), '([0.0, 1.0, 179.0, 180.0, 359.0, 360.0])\n', (31788, 31828), True, 'import numpy as np\n'), ((32113, 32210), 'numpy.array', 'np.array', (['[[0, 0.5], [0.5, 1.5], [1.5, 179.5], [179.5, 358.5], [358.5, 359.5], [359.5,\n 360]]'], {}), '([[0, 0.5], [0.5, 1.5], [1.5, 179.5], [179.5, 358.5], [358.5, 359.5\n ], [359.5, 360]])\n', (32121, 32210), True, 'import numpy as np\n'), ((30730, 30761), 'numpy.array', 'np.array', (['[-180, -1, 0, 1, 179]'], {}), '([-180, -1, 0, 1, 179])\n', (30738, 30761), True, 'import numpy as np\n'), ((31046, 31125), 'numpy.array', 'np.array', (['[[-180.5, -1.5], [-1.5, -0.5], [-0.5, 0.5], [0.5, 1.5], [1.5, 179.5]]'], {}), '([[-180.5, -1.5], [-1.5, -0.5], [-0.5, 0.5], [0.5, 1.5], [1.5, 179.5]])\n', (31054, 31125), True, 'import numpy as np\n')] |
from functools import partial
from . import utils
import numpy as np
import jax.numpy as jnp
import jax.random as random
from jax import grad, jit, vmap, lax, jacrev, jacfwd, jvp, vjp, hessian
#class Lattice(seed, cell_params, sim_params,
def random_c0(subkeys, odds_c, n):
"""Make random initial conditions given odds ratio of cell types."""
n_ctypes = len(odds_c)
n_c = (n * odds_c / odds_c.sum()).astype(int)
n_c = n_c.at[0].add(n - n_c.sum())
c0 = jnp.repeat(jnp.arange(n_ctypes), n_c)
nmap = np.ndim(subkeys) - 1
fun = lambda sk: random.permutation(sk, c0)
for _ in range(nmap):
fun = vmap(fun)
return n_c, fun(subkeys)
@jit
def dE_swap(ij, c, W, AL):
"""
Energy differential after swapping cells i and j.
Depends only on i, j, and their neighbors
"""
new_c = c.at[ij].set(c[ij[::-1]])
E_local = -W[ c[ij, None], c[AL[ij]]].sum()
E_local_swap = -W[new_c[ij, None], new_c[AL[ij]]].sum()
return E_local_swap - E_local
@jit
def quadratic_form(a, G):
"""Quadratic form of column vector `a` induced by matrix `G`"""
return a.T @ G @ a
@jit
def P_swap(dE, beta):
"""
Probability of a swap between cells i and j. Symmetric w.r.t. i and j.
"""
# Glauber dynamics probability
# return 1 / (1 + jnp.exp(beta * dE))
# Metropolis acceptance probability
return jnp.minimum(1., jnp.exp(-beta * dE))
@jit
def swap_ij(c, ij):
"""Swaps cells i and j in the cell type state vector `c`. """
cji = c[ij][::-1]
return c.at[ij].set(cji)
@jit
def accept_swap(c, P, ij):
"""
Returns cell state and log-probability after swapping i <--> j
"""
return swap_ij(c, ij)
@jit
def reject_swap(c, P, ij):
"""
Returns cell state and log-probability after rejecting i <--> j
"""
return c, jnp.log(1 - P)
@jit
def make_swap(c, P, ij, accept):
"""
Returns cell state vector and log-probability of event
after an accepted/rejected swap of cells `i` and `j`.
"""
return lax.cond(accept, accept_swap, reject_swap, c, P, ij)
@jit
def get_random_pair(key, AL):
"""Returns indices of a pair of adjacent cells"""
i, Aj = random.randint(
key=key, shape=(2,), minval=jnp.array([0, 0]), maxval=jnp.array(AL.shape)
)
j = AL[i, Aj]
return jnp.array([i, j])
@jit
def take_MC_step(key, c, beta, W, AL, n):
"""
Randomly selects a swap between adjacent cells and accepts/rejects.
Acceptance is based on Metropolis algorithm.
"""
key, sk1, sk2 = random.split(key, 3)
# Pick random interface and acceptance threshold
ij = get_random_pair(sk1, AL)
thresh = random.uniform(key=sk2)
# Take a Metropolis step
dE = dE_swap(ij, c, W, AL)
P = P_swap(dE, beta)
accept = P > thresh
new_c = make_swap(c, P, ij, accept)
expected_dE = P * dE
return key, new_c, expected_dE
@jit
def propose_swap(key, c, beta, W, AL):
"""
"""
ij = get_random_pair(key, AL)
c_swap = swap_ij(c, ij)
dE = dE_swap(ij, c, W, AL)
P = P_swap(dE, beta)
return ij, c_swap, dE, P
@jit
def local_alignment(c, A, k, I, O):
s = I[c] @ O
s_swap = I[c_swap] @ O
m_diff_nb = (A_k * diff_nb) @ s / n_diff_nb
@jit
def local_alignment_change(ij, c, c_swap, AL, k, I, O):
A_k = get_knn_adjacency_matrix(AL, k)
# cells that are neighbors (within k radii) of
# `i` but not `j` and vice-versa - i.e. different neighbors
diff_nb = jnp.expand_dims(jnp.logical_xor(*A_k[ij]), 1)
n_diff_nb = 4 * k + 2
s = I[c] @ O
s_swap = I[c_swap] @ O
m_diff_nb = (A_k * diff_nb) @ s / n_diff_nb
m_diff_nb_swap = (A_k * diff_nb) @ s_swap / n_diff_nb
return ((m_diff_nb_swap ** 2) - (m_diff_nb ** 2)).sum()
mapped_local_alignment_change = vmap(
local_alignment_change, in_axes=(None, None, None, None, 0, None, None)
)
#@jit
def take_MC_step2(args, step):
"""
Randomly selects a swap between adjacent cells and accepts/rejects.
Acceptance is based on Metropolis algorithm.
"""
key, c_t, beta_t, W, AL, *align_args = args
c = c_t[step]
beta = beta_t[step]
new_key, sk1, sk2 = random.split(key, 3)
# Propose a random swap
ij, c_swap, dE, P = propose_swap(sk1, c, beta, W, AL)
expected_d_eta = P * mapped_local_alignment_change(
ij, c, c_swap, AL, *align_args
).mean()
# Accept/reject
thresh = random.uniform(key=sk2)
do_swap = P > thresh
new_c = lax.cond(do_swap, lambda: c_swap, lambda: c)
return (
new_key, c_t.at[step + 1].set(new_c), beta_t, W, AL, *align_args
), expected_d_eta
@partial(jit, static_argnums=(2, 3, 4))
def simulate(theta, args, nsweeps, n, n_ctypes):
key, c, t, _, *more_args = args
beta_t = jnp.power(10., -utils.map_linear(t, theta[0], theta[1]))
W = jnp.eye(n_ctypes) * theta[2]
new_args, expected_d_etas = lax.scan(
take_MC_step2,
(key, c, beta_t, W, *more_args),
jnp.repeat(jnp.arange(nsweeps), n),
)
return new_args, expected_d_etas
@partial(jit, static_argnums=(2, 3, 4))
def simulate_loss(theta, args, nsweeps, n, n_ctypes):
return simulate(theta, args, nsweeps, n, n_ctypes)[1].mean()
@partial(jit, static_argnums=(2, 3))
def update(theta, args, nt, lr):
"""Performs one update step on T."""
# Compute the gradients on replicates
eta, grads = jax.value_and_grad(
simulate,
)(T, key, l, nt)
new_T = T - grads * lr_toy
return new_T, loss, grads
@partial(jit, static_argnums=3)
def update_toy(T, key, l, nt, lr_toy):
"""Performs one update step on T."""
# Compute the gradients on replicates
loss, grads = jax.value_and_grad(
simulate_loss,
)(T, key, l, nt)
new_T = T - grads * lr_toy
return new_T, loss, grads
@jit
def MC_iteration(step, args):
key, c, *extra = args
key, c, expected_dE = take_MC_step(*args)
return key, c, *extra
@jit
def MC_sweep(key, c, beta, W, AL, n):
args = (key, c, beta, W, AL, n)
return lax.fori_loop(0, n, MC_iteration, args)
@jit
def n_cmatch_t(c_t, AL):
"""Returns number of homotypic interfaces at each time-point."""
return cmatch_t(c_t, c_t[:, AL]).sum(axis=(1, 2)) // 2
@jit
def get_E_cell(c, W):
return W[c[:, None], c[AL]].mean(axis=1)
#### sorting metrics
def get_identity(n_ctypes):
"""Returns the (n_ctypes, n_ctypes) identity matrix."""
return jnp.eye(n_ctypes, dtype=int)
def get_difference_matrix(n_ctypes):
"""
Returns a (n_ctypes, n_ctypes - 1) matrix `O` with -1 on the principal
diagonal and 1 elsewhere. `O @ u` thus computes a difference on the
components of `u`.
"""
return 1 - 2 * jnp.eye(n_ctypes, n_ctypes - 1, dtype=int)
@jit
def get_num_neighbors(k):
return 1 + 3 * k * (k + 1)
@jit
def pow_matrix(A, k):
return lax.fori_loop(1, k, lambda i, M: jnp.matmul(M, A), A)
@jit
def get_knn_adjacency_matrix(AL, k):
n, nnb = AL.shape
diag_true = jnp.diag(jnp.ones(n, dtype=bool))
A = adjacency_matrix_from_adjacency_list(AL, dtype=bool)
A = A | diag_true
A = pow_matrix(A, k)
return A
equal_vec_scalar = vmap(lambda a, b: a == b, (0, None))
equal_outer_1d_1d = vmap(equal_vec_scalar, (None, 0))
equal_outer_1d_2d = vmap(equal_outer_1d_1d, (None, 0))
equal_outer_2d_1d = vmap(equal_outer_1d_1d, (0, None))
mult_vec_scalar = vmap(lambda a, b: a * b, (0, None))
mult_outer_1d_1d = vmap(mult_vec_scalar, (None, 0))
mult_outer_1d_2d = vmap(mult_outer_1d_1d, (None, 0))
mult_outer_2d_1d = vmap(mult_outer_1d_1d, (0, None))
@jit
def local_spin(c, AL, k):
"""
"""
A_k = get_knn_adjacency_matrix(AL, k)
nnb = get_num_neighbors(k)
s_i = jnp.array([-1, 1])[c]
return A_k @ s_i / nnb
@jit
def knn_alignment_per_cell(c, AL, k, I, O):
"""
Return alignment of cell types `c` in local neighborhoods.
`c` is the cell type vector of shape `(n,)` with dtype `int`
`A` is the `(n, n)`cell-cell adjacency matrix (can be Boolean)
`I` is the `(n_ctypes, n_ctypes)` identity matrix, where `n_ctypes`
is the number of cell types in the tissue.
`O` is the `(n_ctypes, n_ctypes - 1)` difference matrix with `-1` on
the principal diagonal and `1` elsewhere. `I[c] @ O` converts cell
types (non-negative `int`) to spins (difference vectors). The sum
of spin vector components lies in [-1, 1].
`nnb` is the number of neighbors in the (regular) lattice within
distance `k`.
"""
A_k = get_knn_adjacency_matrix(AL, k)
nnb = get_num_neighbors(k)
s_i = I[c] @ O
m_i = A_k @ s_i / nnb
return 1 - (m_i ** 2).mean(axis=1)
@jit
def knn_alignment_tissue(c, AL, k, I, O):
"""
Return mean alignment of cell types in a tissue by averaging
over neighborhoods. This is equivalent to
`knn_alignment_per_cell(*args).mean()`
`c` is the cell type vector of shape `(n,)` with dtype `int`
`A` is the `(n, n)`cell-cell adjacency matrix (can be Boolean)
`I` is the `(n_ctypes, n_ctypes)` identity matrix, where `n_ctypes`
is the number of cell types in the tissue.
`O` is the `(n_ctypes, n_ctypes - 1)` difference matrix with `-1` on
the principal diagonal and `1` elsewhere. `I[c] @ O` converts cell
types (non-negative `int`) to spins (difference vectors). The sum
of spin vector components lies in [-1, 1].
`nnb` is the number of neighbors in the (regular) lattice within
distance `k`.
"""
A_k = get_knn_adjacency_matrix(AL, k)
nnb = get_num_neighbors(k)
s_i = I[c] @ O
m_i = A_k @ s_i / nnb
return 1 - (m_i ** 2).mean()
#### Graph
def adjacency_matrix_from_adjacency_list(AL, dtype=bool):
"""
Returns adjacency matrix for a nnb-regular graph given the adjacency list.
"""
n, nnb = AL.shape
A = jnp.zeros((n, n), dtype=dtype)
return A.at[jnp.repeat(jnp.arange(n), nnb), AL.flatten()].set(1)
def get_adjacency_matrix_periodic(rows, cols=0):
"""Construct adjacency matrix for a periodic hexagonal
lattice of dimensions rows x cols."""
AL = get_adjacency_list_periodic(rows, cols, **kwargs)
return adjacency_matrix_from_adjacency_list(AL)
def get_adjacency_list_periodic(rows, cols=0):
"""Construct adjacency matrix for a periodic hexagonal
lattice of dimensions rows x cols."""
# Assume square if not specified
if cols == 0:
cols = rows
n = rows * cols
row, col = np.meshgrid(np.arange(rows), np.arange(cols))
row = row.flatten()
col = col.flatten()
# Get row of adjacent cells
dr = np.array([0, 1, 1, 0, -1, -1])
AL_row = np.add.outer(row, dr) % rows
# Get column of adjacent cells, accounting for staggering
dc1 = np.array([1, 0, -1, -1, -1, 0])
dc2 = np.array([1, 1, 0, -1, 0, 1])
AL_col = np.add.outer(col, dc1)
AL_col[1::2] += dc2 - dc1
AL_col = AL_col % cols
return rows * AL_col + AL_row
def hex_grid(rows, cols=0, r=1., sigma=0, **kwargs):
"""
Returns XY coordinates of a regular 2D hexagonal grid
(rows x cols) with edge length r. Points are optionally
passed through a Gaussian filter with std. dev. = sigma * r.
"""
print("Deprecated: please use `cx.geom.hex_grid")
# Check if square grid
if cols == 0:
cols = rows
# Populate grid
x_coords = np.linspace(-r * (cols - 1) / 2, r * (cols - 1) / 2, cols)
y_coords = np.linspace(-np.sqrt(3) * r * (rows - 1) / 4, np.sqrt(3) * r * (rows - 1) / 4, rows)
X = []
for i, x in enumerate(x_coords):
for j, y in enumerate(y_coords):
X.append(np.array([x + (j % 2) * r / 2, y]))
X = np.array(X)
# Apply Gaussian filter if specified
if sigma != 0:
X = np.array([np.random.normal(loc=x, scale=sigma*r) for x in X])
return X
def get_outer_idx(rows, cols):
"""Returns the indices of cells on the border of the lattice grid"""
print("Deprecated: please use `cx.geom.get_outer_idx")
return np.array([
rows * c + r
for c in range(cols)
for r in range(rows)
if ((r in (0, rows - 1)) or (c in (0, cols - 1)))
])
| [
"numpy.sqrt",
"jax.lax.fori_loop",
"jax.numpy.log",
"numpy.array",
"jax.numpy.matmul",
"numpy.arange",
"jax.random.split",
"jax.numpy.eye",
"numpy.ndim",
"numpy.linspace",
"numpy.random.normal",
"jax.random.uniform",
"numpy.add.outer",
"jax.numpy.logical_xor",
"jax.numpy.ones",
"jax.vm... | [((3941, 4018), 'jax.vmap', 'vmap', (['local_alignment_change'], {'in_axes': '(None, None, None, None, 0, None, None)'}), '(local_alignment_change, in_axes=(None, None, None, None, 0, None, None))\n', (3945, 4018), False, 'from jax import grad, jit, vmap, lax, jacrev, jacfwd, jvp, vjp, hessian\n'), ((4794, 4832), 'functools.partial', 'partial', (['jit'], {'static_argnums': '(2, 3, 4)'}), '(jit, static_argnums=(2, 3, 4))\n', (4801, 4832), False, 'from functools import partial\n'), ((5243, 5281), 'functools.partial', 'partial', (['jit'], {'static_argnums': '(2, 3, 4)'}), '(jit, static_argnums=(2, 3, 4))\n', (5250, 5281), False, 'from functools import partial\n'), ((5405, 5440), 'functools.partial', 'partial', (['jit'], {'static_argnums': '(2, 3)'}), '(jit, static_argnums=(2, 3))\n', (5412, 5440), False, 'from functools import partial\n'), ((5704, 5734), 'functools.partial', 'partial', (['jit'], {'static_argnums': '(3)'}), '(jit, static_argnums=3)\n', (5711, 5734), False, 'from functools import partial\n'), ((7383, 7419), 'jax.vmap', 'vmap', (['(lambda a, b: a == b)', '(0, None)'], {}), '(lambda a, b: a == b, (0, None))\n', (7387, 7419), False, 'from jax import grad, jit, vmap, lax, jacrev, jacfwd, jvp, vjp, hessian\n'), ((7440, 7473), 'jax.vmap', 'vmap', (['equal_vec_scalar', '(None, 0)'], {}), '(equal_vec_scalar, (None, 0))\n', (7444, 7473), False, 'from jax import grad, jit, vmap, lax, jacrev, jacfwd, jvp, vjp, hessian\n'), ((7494, 7528), 'jax.vmap', 'vmap', (['equal_outer_1d_1d', '(None, 0)'], {}), '(equal_outer_1d_1d, (None, 0))\n', (7498, 7528), False, 'from jax import grad, jit, vmap, lax, jacrev, jacfwd, jvp, vjp, hessian\n'), ((7549, 7583), 'jax.vmap', 'vmap', (['equal_outer_1d_1d', '(0, None)'], {}), '(equal_outer_1d_1d, (0, None))\n', (7553, 7583), False, 'from jax import grad, jit, vmap, lax, jacrev, jacfwd, jvp, vjp, hessian\n'), ((7604, 7639), 'jax.vmap', 'vmap', (['(lambda a, b: a * b)', '(0, None)'], {}), '(lambda a, b: a * b, (0, None))\n', (7608, 7639), False, 'from jax import grad, jit, vmap, lax, jacrev, jacfwd, jvp, vjp, hessian\n'), ((7659, 7691), 'jax.vmap', 'vmap', (['mult_vec_scalar', '(None, 0)'], {}), '(mult_vec_scalar, (None, 0))\n', (7663, 7691), False, 'from jax import grad, jit, vmap, lax, jacrev, jacfwd, jvp, vjp, hessian\n'), ((7711, 7744), 'jax.vmap', 'vmap', (['mult_outer_1d_1d', '(None, 0)'], {}), '(mult_outer_1d_1d, (None, 0))\n', (7715, 7744), False, 'from jax import grad, jit, vmap, lax, jacrev, jacfwd, jvp, vjp, hessian\n'), ((7764, 7797), 'jax.vmap', 'vmap', (['mult_outer_1d_1d', '(0, None)'], {}), '(mult_outer_1d_1d, (0, None))\n', (7768, 7797), False, 'from jax import grad, jit, vmap, lax, jacrev, jacfwd, jvp, vjp, hessian\n'), ((2075, 2127), 'jax.lax.cond', 'lax.cond', (['accept', 'accept_swap', 'reject_swap', 'c', 'P', 'ij'], {}), '(accept, accept_swap, reject_swap, c, P, ij)\n', (2083, 2127), False, 'from jax import grad, jit, vmap, lax, jacrev, jacfwd, jvp, vjp, hessian\n'), ((2374, 2391), 'jax.numpy.array', 'jnp.array', (['[i, j]'], {}), '([i, j])\n', (2383, 2391), True, 'import jax.numpy as jnp\n'), ((2598, 2618), 'jax.random.split', 'random.split', (['key', '(3)'], {}), '(key, 3)\n', (2610, 2618), True, 'import jax.random as random\n'), ((2728, 2751), 'jax.random.uniform', 'random.uniform', ([], {'key': 'sk2'}), '(key=sk2)\n', (2742, 2751), True, 'import jax.random as random\n'), ((4319, 4339), 'jax.random.split', 'random.split', (['key', '(3)'], {}), '(key, 3)\n', (4331, 4339), True, 'import jax.random as random\n'), ((4574, 4597), 'jax.random.uniform', 'random.uniform', ([], {'key': 'sk2'}), '(key=sk2)\n', (4588, 4597), True, 'import jax.random as random\n'), ((4637, 4683), 'jax.lax.cond', 'lax.cond', (['do_swap', '(lambda : c_swap)', '(lambda : c)'], {}), '(do_swap, lambda : c_swap, lambda : c)\n', (4645, 4683), False, 'from jax import grad, jit, vmap, lax, jacrev, jacfwd, jvp, vjp, hessian\n'), ((6234, 6273), 'jax.lax.fori_loop', 'lax.fori_loop', (['(0)', 'n', 'MC_iteration', 'args'], {}), '(0, n, MC_iteration, args)\n', (6247, 6273), False, 'from jax import grad, jit, vmap, lax, jacrev, jacfwd, jvp, vjp, hessian\n'), ((6631, 6659), 'jax.numpy.eye', 'jnp.eye', (['n_ctypes'], {'dtype': 'int'}), '(n_ctypes, dtype=int)\n', (6638, 6659), True, 'import jax.numpy as jnp\n'), ((10081, 10111), 'jax.numpy.zeros', 'jnp.zeros', (['(n, n)'], {'dtype': 'dtype'}), '((n, n), dtype=dtype)\n', (10090, 10111), True, 'import jax.numpy as jnp\n'), ((10866, 10896), 'numpy.array', 'np.array', (['[0, 1, 1, 0, -1, -1]'], {}), '([0, 1, 1, 0, -1, -1])\n', (10874, 10896), True, 'import numpy as np\n'), ((11016, 11047), 'numpy.array', 'np.array', (['[1, 0, -1, -1, -1, 0]'], {}), '([1, 0, -1, -1, -1, 0])\n', (11024, 11047), True, 'import numpy as np\n'), ((11061, 11090), 'numpy.array', 'np.array', (['[1, 1, 0, -1, 0, 1]'], {}), '([1, 1, 0, -1, 0, 1])\n', (11069, 11090), True, 'import numpy as np\n'), ((11106, 11128), 'numpy.add.outer', 'np.add.outer', (['col', 'dc1'], {}), '(col, dc1)\n', (11118, 11128), True, 'import numpy as np\n'), ((11642, 11700), 'numpy.linspace', 'np.linspace', (['(-r * (cols - 1) / 2)', '(r * (cols - 1) / 2)', 'cols'], {}), '(-r * (cols - 1) / 2, r * (cols - 1) / 2, cols)\n', (11653, 11700), True, 'import numpy as np\n'), ((11955, 11966), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (11963, 11966), True, 'import numpy as np\n'), ((492, 512), 'jax.numpy.arange', 'jnp.arange', (['n_ctypes'], {}), '(n_ctypes)\n', (502, 512), True, 'import jax.numpy as jnp\n'), ((535, 551), 'numpy.ndim', 'np.ndim', (['subkeys'], {}), '(subkeys)\n', (542, 551), True, 'import numpy as np\n'), ((578, 604), 'jax.random.permutation', 'random.permutation', (['sk', 'c0'], {}), '(sk, c0)\n', (596, 604), True, 'import jax.random as random\n'), ((645, 654), 'jax.vmap', 'vmap', (['fun'], {}), '(fun)\n', (649, 654), False, 'from jax import grad, jit, vmap, lax, jacrev, jacfwd, jvp, vjp, hessian\n'), ((1435, 1454), 'jax.numpy.exp', 'jnp.exp', (['(-beta * dE)'], {}), '(-beta * dE)\n', (1442, 1454), True, 'import jax.numpy as jnp\n'), ((1875, 1889), 'jax.numpy.log', 'jnp.log', (['(1 - P)'], {}), '(1 - P)\n', (1882, 1889), True, 'import jax.numpy as jnp\n'), ((3624, 3649), 'jax.numpy.logical_xor', 'jnp.logical_xor', (['*A_k[ij]'], {}), '(*A_k[ij])\n', (3639, 3649), True, 'import jax.numpy as jnp\n'), ((5007, 5024), 'jax.numpy.eye', 'jnp.eye', (['n_ctypes'], {}), '(n_ctypes)\n', (5014, 5024), True, 'import jax.numpy as jnp\n'), ((7210, 7233), 'jax.numpy.ones', 'jnp.ones', (['n'], {'dtype': 'bool'}), '(n, dtype=bool)\n', (7218, 7233), True, 'import jax.numpy as jnp\n'), ((7935, 7953), 'jax.numpy.array', 'jnp.array', (['[-1, 1]'], {}), '([-1, 1])\n', (7944, 7953), True, 'import jax.numpy as jnp\n'), ((10734, 10749), 'numpy.arange', 'np.arange', (['rows'], {}), '(rows)\n', (10743, 10749), True, 'import numpy as np\n'), ((10751, 10766), 'numpy.arange', 'np.arange', (['cols'], {}), '(cols)\n', (10760, 10766), True, 'import numpy as np\n'), ((10911, 10932), 'numpy.add.outer', 'np.add.outer', (['row', 'dr'], {}), '(row, dr)\n', (10923, 10932), True, 'import numpy as np\n'), ((2288, 2305), 'jax.numpy.array', 'jnp.array', (['[0, 0]'], {}), '([0, 0])\n', (2297, 2305), True, 'import jax.numpy as jnp\n'), ((2314, 2333), 'jax.numpy.array', 'jnp.array', (['AL.shape'], {}), '(AL.shape)\n', (2323, 2333), True, 'import jax.numpy as jnp\n'), ((5167, 5186), 'jax.numpy.arange', 'jnp.arange', (['nsweeps'], {}), '(nsweeps)\n', (5177, 5186), True, 'import jax.numpy as jnp\n'), ((6906, 6948), 'jax.numpy.eye', 'jnp.eye', (['n_ctypes', '(n_ctypes - 1)'], {'dtype': 'int'}), '(n_ctypes, n_ctypes - 1, dtype=int)\n', (6913, 6948), True, 'import jax.numpy as jnp\n'), ((7087, 7103), 'jax.numpy.matmul', 'jnp.matmul', (['M', 'A'], {}), '(M, A)\n', (7097, 7103), True, 'import jax.numpy as jnp\n'), ((11911, 11943), 'numpy.array', 'np.array', (['[x + j % 2 * r / 2, y]'], {}), '([x + j % 2 * r / 2, y])\n', (11919, 11943), True, 'import numpy as np\n'), ((12054, 12094), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'x', 'scale': '(sigma * r)'}), '(loc=x, scale=sigma * r)\n', (12070, 12094), True, 'import numpy as np\n'), ((11762, 11772), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (11769, 11772), True, 'import numpy as np\n'), ((10139, 10152), 'jax.numpy.arange', 'jnp.arange', (['n'], {}), '(n)\n', (10149, 10152), True, 'import jax.numpy as jnp\n'), ((11729, 11739), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (11736, 11739), True, 'import numpy as np\n')] |
import json
import argparse
from prepare_data import setup
# Support command-line options
parser = argparse.ArgumentParser()
parser.add_argument('--big-model', action='store_true', help='Use the bigger model with more conv layers')
parser.add_argument('--use-data-dir', action='store_true', help='Use custom data directory, at /data')
args = parser.parse_args()
if args.big_model:
print('Using big model')
if args.use_data_dir:
print('Using data directory')
# Prepare data
train_X_ims, train_X_seqs, train_Y, test_X_ims, test_X_seqs, test_Y, im_shape, vocab_size, num_answers, _, _, _ = setup(args.use_data_dir)
# instantiate an empty dict
# team = {}
# add a team member
# team['train_X_ims'] = train_X_ims
# team['train_X_seqs'] = train_X_seqs
# team['train_Y'] = train_Y
# team['test_X_ims'] = test_X_ims
# team['test_X_seqs'] = test_X_seqs
# team['test_Y'] = test_Y
# team['im_shape'] = im_shape
# team['vocab_size'] = vocab_size
# team['num_answers'] = num_answers
# with open('mydata.json', 'w') as f:
# json.dump(team, f)
import numpy as np
import os
np.savez('temp_arra.npz', train_X_ims=train_X_ims,train_X_seqs = train_X_seqs, train_Y = train_Y, test_X_ims = test_X_ims, test_X_seqs = test_X_seqs, test_Y = test_Y, im_shape = im_shape, vocab_size = vocab_size, num_answers= num_answers)
| [
"numpy.savez",
"argparse.ArgumentParser",
"prepare_data.setup"
] | [((100, 125), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (123, 125), False, 'import argparse\n'), ((594, 618), 'prepare_data.setup', 'setup', (['args.use_data_dir'], {}), '(args.use_data_dir)\n', (599, 618), False, 'from prepare_data import setup\n'), ((1072, 1310), 'numpy.savez', 'np.savez', (['"""temp_arra.npz"""'], {'train_X_ims': 'train_X_ims', 'train_X_seqs': 'train_X_seqs', 'train_Y': 'train_Y', 'test_X_ims': 'test_X_ims', 'test_X_seqs': 'test_X_seqs', 'test_Y': 'test_Y', 'im_shape': 'im_shape', 'vocab_size': 'vocab_size', 'num_answers': 'num_answers'}), "('temp_arra.npz', train_X_ims=train_X_ims, train_X_seqs=\n train_X_seqs, train_Y=train_Y, test_X_ims=test_X_ims, test_X_seqs=\n test_X_seqs, test_Y=test_Y, im_shape=im_shape, vocab_size=vocab_size,\n num_answers=num_answers)\n", (1080, 1310), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-present : DESY PHOTON SCIENCE
# authors:
# <NAME>, <EMAIL>
try:
import hdf5plugin # for P10, should be imported before h5py or PyTables
except ModuleNotFoundError:
pass
import gc
import pathlib
import sys
import tkinter as tk
from tkinter import filedialog
import numpy as np
from matplotlib import pyplot as plt
from numpy.fft import fftn, fftshift
from scipy.interpolate import interp1d
from scipy.ndimage.measurements import center_of_mass
import bcdi.graph.graph_utils as gu
import bcdi.utils.utilities as util
from bcdi.graph.colormap import ColormapFactory
helptext = """
Calculate the resolution of a forward CDI reconstruction using the phase retrieval
transfer function (PRTF). The diffraction pattern and reconstructions should be in
the orthogonal laboratory frame. Q values need to be provided.
For the laboratory frame, the CXI convention is used: z downstream, y vertical,
x outboard. For q, the usual convention is used: qx downstream, qz vertical, qy outboard
"""
scan = 22
root_folder = "D:/data/P10_August2019/data/" # location of the .spec or log file
sample_name = "gold_2_2_2_000" # "SN" #
datadir = root_folder + sample_name + str(scan) + "/pynx/1000_2_debug/"
savedir = None
comment = "_hotpixel" # should start with _
binning = (
1,
1,
1,
) # binning factor used during phasing: axis0=downstream,
# axis1=vertical up, axis2=outboard. Leave it to (1, 1, 1) if the binning factor is the
# same between the input data and the phasing output
original_shape = (
500,
500,
500,
) # shape of the array used during phasing, before an eventual crop of the result
###########
# options #
###########
normalize_prtf = False # set to True when the solution is the first mode
# then the intensity needs to be normalized
debug = False # True to show more plots
q_max = None # in 1/nm, PRTF normalization using only points smaller than q_max.
# Leave it to None otherwise.
##########################
# end of user parameters #
##########################
###################
# define colormap #
###################
my_cmap = ColormapFactory().cmap
#########################
# check some parameters #
#########################
savedir = savedir or datadir
pathlib.Path(savedir).mkdir(parents=True, exist_ok=True)
##############################
# load reciprocal space data #
##############################
print("\nScan", scan)
print("Datadir:", datadir)
plt.ion()
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename(
initialdir=datadir,
title="Select the diffraction pattern",
filetypes=[("NPZ", "*.npz")],
)
npzfile = np.load(file_path)
diff_pattern = npzfile[list(npzfile.files)[0]].astype(float)
nz, ny, nx = diff_pattern.shape
print("Data shape:", nz, ny, nx)
#############
# load mask #
#############
file_path = filedialog.askopenfilename(
initialdir=datadir, title="Select the mask", filetypes=[("NPZ", "*.npz")]
)
npzfile = np.load(file_path)
mask = npzfile[list(npzfile.files)[0]]
if debug:
gu.multislices_plot(
diff_pattern,
sum_frames=False,
plot_colorbar=True,
cmap=my_cmap,
title="measured amplitude",
scale="log",
vmin=np.nan,
vmax=np.nan,
reciprocal_space=True,
is_orthogonal=True,
)
gu.multislices_plot(
mask,
sum_frames=False,
plot_colorbar=True,
cmap=my_cmap,
title="mask",
scale="linear",
vmin=0,
vmax=1,
reciprocal_space=True,
is_orthogonal=True,
)
#################
# load q values #
#################
file_path = filedialog.askopenfilename(
initialdir=datadir, title="Select q values", filetypes=[("NPZ", "*.npz")]
)
npzfile = np.load(file_path)
qx = npzfile["qx"] # downstream
qz = npzfile["qz"] # vertical up
qy = npzfile["qy"] # outboard
###################################
# bin data and q values if needed #
###################################
if any(bin_factor != 1 for bin_factor in binning):
diff_pattern = util.bin_data(array=diff_pattern, binning=binning, debugging=False)
mask = util.bin_data(array=mask, binning=binning, debugging=False)
mask[np.nonzero(mask)] = 1
qx = qx[:: binning[0]]
qy = qy[:: binning[1]]
qz = qz[:: binning[2]]
############################
# plot diffraction pattern #
############################
nz, ny, nx = diff_pattern.shape
print(
"Data shape after binning=",
nz,
ny,
nx,
" Max(measured amplitude)=",
np.sqrt(diff_pattern).max(),
" at voxel # ",
np.unravel_index(diff_pattern.argmax(), diff_pattern.shape),
)
# print(diff_pattern[434, 54, 462])
mask[
diff_pattern < 1.0
] = 1 # do not use interpolated points with a low photon count in PRTF calculation.
# These points results in overshoots in the PRTF
diff_pattern[np.nonzero(mask)] = 0
z0, y0, x0 = center_of_mass(diff_pattern)
z0, y0, x0 = [int(z0), int(y0), int(x0)]
print(
"COM of measured pattern after masking: ",
z0,
y0,
x0,
" Number of unmasked photons =",
diff_pattern.sum(),
)
plt.figure()
plt.imshow(np.log10(np.sqrt(diff_pattern).sum(axis=0)), cmap=my_cmap, vmin=0)
plt.title("abs(binned measured amplitude).sum(axis=0)")
plt.colorbar()
plt.pause(0.1)
if debug:
gu.multislices_plot(
diff_pattern,
sum_frames=False,
plot_colorbar=True,
cmap=my_cmap,
title="abs(binned measured amplitude)",
scale="log",
vmin=0,
reciprocal_space=True,
is_orthogonal=True,
)
gu.multislices_plot(
mask,
sum_frames=False,
plot_colorbar=True,
cmap=my_cmap,
title="binned mask",
scale="linear",
vmin=0,
vmax=1,
reciprocal_space=True,
is_orthogonal=True,
)
##########################################################
# calculate the distances in q space relative to the COM #
##########################################################
qxCOM = qx[z0]
qzCOM = qz[y0]
qyCOM = qy[x0]
print("COM[qx, qz, qy] = ", qxCOM, qzCOM, qyCOM)
qx = qx[:, np.newaxis, np.newaxis] # broadcast array
qy = qy[np.newaxis, np.newaxis, :] # broadcast array
qz = qz[np.newaxis, :, np.newaxis] # broadcast array
distances_q = np.sqrt((qx - qxCOM) ** 2 + (qy - qyCOM) ** 2 + (qz - qzCOM) ** 2)
del qx, qy, qz
gc.collect()
if debug:
gu.multislices_plot(
distances_q,
sum_frames=False,
plot_colorbar=True,
cmap=my_cmap,
title="distances_q",
scale="linear",
vmin=np.nan,
vmax=np.nan,
reciprocal_space=True,
is_orthogonal=True,
)
#############################
# load reconstructed object #
#############################
file_path = filedialog.askopenfilename(
initialdir=datadir,
title="Select the reconstructed object",
filetypes=[("NPZ", "*.npz"), ("NPY", "*.npy"), ("CXI", "*.cxi"), ("HDF5", "*.h5")],
)
obj, extension = util.load_file(file_path)
print("Opening ", file_path)
if extension == ".h5":
comment = comment + "_mode"
# check if the shape is the same as the measured diffraction pattern
if obj.shape != original_shape:
print(
"Reconstructed object shape = ",
obj.shape,
"different from the experimental diffraction pattern: crop/pad",
)
new_shape = (
int(original_shape[0] / binning[0]),
int(original_shape[1] / binning[1]),
int(original_shape[2] / binning[2]),
)
obj = util.crop_pad(array=obj, output_shape=new_shape, debugging=False)
if obj.shape != diff_pattern.shape:
print(
"Reconstructed object shape = ",
obj.shape,
"different from diffraction pattern shape = ",
diff_pattern.shape,
)
sys.exit()
#################################################
# calculate the retrieved diffraction amplitude #
#################################################
phased_fft = fftshift(fftn(obj)) / (
np.sqrt(nz) * np.sqrt(ny) * np.sqrt(nx)
) # complex amplitude
del obj
gc.collect()
plt.figure()
plt.imshow(np.log10(abs(phased_fft).sum(axis=0)), cmap=my_cmap, vmin=0)
plt.title("abs(retrieved amplitude).sum(axis=0)")
plt.colorbar()
plt.pause(0.1)
phased_fft[np.nonzero(mask)] = 0 # do not take mask voxels into account
print("Max(retrieved amplitude) =", abs(phased_fft).max())
print(
"COM of the retrieved diffraction pattern after masking: ",
center_of_mass(abs(phased_fft)),
)
del mask
gc.collect()
gu.combined_plots(
tuple_array=(diff_pattern, phased_fft),
tuple_sum_frames=False,
tuple_sum_axis=(0, 0),
tuple_width_v=None,
tuple_width_h=None,
tuple_colorbar=False,
tuple_vmin=(-1, -1),
tuple_vmax=np.nan,
tuple_title=("measurement", "phased_fft"),
tuple_scale="log",
)
###########################################
# check alignment of diffraction patterns #
###########################################
z1, y1, x1 = center_of_mass(diff_pattern)
z1, y1, x1 = [int(z1), int(y1), int(x1)]
print(
"COM of retrieved pattern after masking: ",
z1,
y1,
x1,
" Number of unmasked photons =",
abs(phased_fft).sum(),
)
#########################
# calculate the 3D PRTF #
#########################
diff_pattern[diff_pattern == 0] = np.nan # discard zero valued pixels
prtf_matrix = abs(phased_fft) / np.sqrt(diff_pattern)
del phased_fft # , diff_pattern
gc.collect()
copy_prtf = np.copy(prtf_matrix)
copy_prtf[np.isnan(copy_prtf)] = 0
piz, piy, pix = np.unravel_index(copy_prtf.argmax(), copy_prtf.shape)
print("Max(3D PRTF)=", copy_prtf.max(), " at voxel # ", (piz, piy, pix))
gu.multislices_plot(
prtf_matrix,
sum_frames=False,
plot_colorbar=True,
cmap=my_cmap,
title="prtf_matrix",
scale="linear",
vmin=0,
vmax=np.nan,
reciprocal_space=True,
is_orthogonal=True,
)
plt.figure()
plt.imshow(copy_prtf[piz, :, :], vmin=0)
plt.colorbar()
plt.title(
"PRTF at max in Qx (frame "
+ str(piz)
+ ") \nMax in QyQz plane: vertical "
+ str(piy)
+ ", horizontal "
+ str(pix)
)
print(diff_pattern[piz, piy, pix])
if debug:
copy_prtf = np.copy(prtf_matrix)
copy_prtf[np.isnan(prtf_matrix)] = 0
copy_prtf[copy_prtf < 5] = 0
copy_prtf[np.nonzero(copy_prtf)] = 1
gu.multislices_plot(
copy_prtf,
sum_frames=False,
plot_colorbar=True,
cmap=my_cmap,
title="hotpix_prtf",
scale="linear",
vmin=0,
vmax=1,
reciprocal_space=True,
is_orthogonal=True,
)
del copy_prtf
gc.collect()
#################################
# average over spherical shells #
#################################
print(
"Distance max:",
distances_q.max(),
"(1/nm) at: ",
np.unravel_index(abs(distances_q).argmax(), distances_q.shape),
)
nb_bins = nz // 5
prtf_avg = np.zeros(nb_bins)
nb_points = np.zeros(nb_bins)
dq = distances_q.max() / nb_bins # in 1/nm
q_axis = np.linspace(0, distances_q.max(), endpoint=True, num=nb_bins + 1) # in 1/nm
for index in range(nb_bins):
logical_array = np.logical_and(
(distances_q < q_axis[index + 1]), (distances_q >= q_axis[index])
)
temp = prtf_matrix[logical_array]
nb_points[index] = logical_array.sum()
prtf_avg[index] = temp[~np.isnan(temp)].mean()
q_axis = q_axis[:-1]
plt.figure()
plt.plot(q_axis, nb_points, ".")
plt.xlabel("q (1/nm)")
plt.ylabel("nb of points in the average")
if q_max is None:
q_max = q_axis.max() + 1
prtf_avg = prtf_avg[q_axis < q_max]
q_axis = q_axis[q_axis < q_max]
if normalize_prtf:
print("Normalizing the PRTF to 1 ...")
prtf_avg = prtf_avg / prtf_avg[~np.isnan(prtf_avg)].max() # normalize to 1
#############################
# plot and save the 1D PRTF #
#############################
defined_q = q_axis[~np.isnan(prtf_avg)]
# create a new variable 'arc_length' to predict q and prtf parametrically
# (because prtf is not monotonic)
arc_length = np.concatenate(
(
np.zeros(1),
np.cumsum(
np.diff(prtf_avg[~np.isnan(prtf_avg)]) ** 2 + np.diff(defined_q) ** 2
),
),
axis=0,
) # cumulative linear arc length, used as the parameter
fit_prtf = interp1d(prtf_avg[~np.isnan(prtf_avg)], arc_length, kind="linear")
try:
arc_length_res = fit_prtf(1 / np.e)
fit_q = interp1d(arc_length, defined_q, kind="linear")
q_resolution = fit_q(arc_length_res)
except ValueError:
if (prtf_avg[~np.isnan(prtf_avg)] > 1 / np.e).all():
print("Resolution limited by the 1 photon counts only (min(prtf)>1/e)")
print(f"min(PRTF) = {prtf_avg[~np.isnan(prtf_avg)].min()}")
q_resolution = defined_q.max()
else: # PRTF always below 1/e
print("PRTF < 1/e for all q values, problem of normalization")
q_resolution = np.nan
print(f"q resolution = {q_resolution:.5f} (1/nm)")
print(f"resolution d = {2*np.pi / q_resolution:.1f} nm")
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 9))
ax.plot(defined_q, prtf_avg[~np.isnan(prtf_avg)], "or") # q_axis in 1/nm
ax.axhline(
y=1 / np.e, linestyle="dashed", color="k", linewidth=1
) # horizontal line at PRTF=1/e
ax.set_xlim(defined_q.min(), defined_q.max())
ax.set_ylim(0, 1.1)
gu.savefig(
savedir=savedir,
figure=fig,
axes=ax,
tick_width=2,
tick_length=10,
tick_labelsize=14,
label_size=16,
xlabels="q (1/nm)",
ylabels="PRTF",
filename=f"S{scan}_prtf" + comment,
text={
0: {"x": 0.15, "y": 0.30, "s": "Scan " + str(scan) + comment, "fontsize": 16},
1: {
"x": 0.15,
"y": 0.25,
"s": f"q at PRTF=1/e: {q_resolution:.5f} (1/nm)",
"fontsize": 16,
},
2: {
"x": 0.15,
"y": 0.20,
"s": f"resolution d = {2*np.pi / q_resolution:.3f} nm",
"fontsize": 16,
},
},
)
plt.ioff()
plt.show()
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"bcdi.graph.colormap.ColormapFactory",
"scipy.interpolate.interp1d",
"scipy.ndimage.measurements.center_of_mass",
"sys.exit",
"matplotlib.pyplot.imshow",
"pathlib.Path",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.fft.fftn",
"numpy.dif... | [((2623, 2632), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (2630, 2632), True, 'from matplotlib import pyplot as plt\n'), ((2640, 2647), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (2645, 2647), True, 'import tkinter as tk\n'), ((2676, 2797), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'initialdir': 'datadir', 'title': '"""Select the diffraction pattern"""', 'filetypes': "[('NPZ', '*.npz')]"}), "(initialdir=datadir, title=\n 'Select the diffraction pattern', filetypes=[('NPZ', '*.npz')])\n", (2702, 2797), False, 'from tkinter import filedialog\n'), ((2818, 2836), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (2825, 2836), True, 'import numpy as np\n'), ((3018, 3123), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'initialdir': 'datadir', 'title': '"""Select the mask"""', 'filetypes': "[('NPZ', '*.npz')]"}), "(initialdir=datadir, title='Select the mask',\n filetypes=[('NPZ', '*.npz')])\n", (3044, 3123), False, 'from tkinter import filedialog\n'), ((3136, 3154), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (3143, 3154), True, 'import numpy as np\n'), ((3817, 3922), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'initialdir': 'datadir', 'title': '"""Select q values"""', 'filetypes': "[('NPZ', '*.npz')]"}), "(initialdir=datadir, title='Select q values',\n filetypes=[('NPZ', '*.npz')])\n", (3843, 3922), False, 'from tkinter import filedialog\n'), ((3935, 3953), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (3942, 3953), True, 'import numpy as np\n'), ((5067, 5095), 'scipy.ndimage.measurements.center_of_mass', 'center_of_mass', (['diff_pattern'], {}), '(diff_pattern)\n', (5081, 5095), False, 'from scipy.ndimage.measurements import center_of_mass\n'), ((5279, 5291), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5289, 5291), True, 'from matplotlib import pyplot as plt\n'), ((5370, 5425), 'matplotlib.pyplot.title', 'plt.title', (['"""abs(binned measured amplitude).sum(axis=0)"""'], {}), "('abs(binned measured amplitude).sum(axis=0)')\n", (5379, 5425), True, 'from matplotlib import pyplot as plt\n'), ((5426, 5440), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5438, 5440), True, 'from matplotlib import pyplot as plt\n'), ((5441, 5455), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (5450, 5455), True, 'from matplotlib import pyplot as plt\n'), ((6455, 6521), 'numpy.sqrt', 'np.sqrt', (['((qx - qxCOM) ** 2 + (qy - qyCOM) ** 2 + (qz - qzCOM) ** 2)'], {}), '((qx - qxCOM) ** 2 + (qy - qyCOM) ** 2 + (qz - qzCOM) ** 2)\n', (6462, 6521), True, 'import numpy as np\n'), ((6537, 6549), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6547, 6549), False, 'import gc\n'), ((6946, 7126), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'initialdir': 'datadir', 'title': '"""Select the reconstructed object"""', 'filetypes': "[('NPZ', '*.npz'), ('NPY', '*.npy'), ('CXI', '*.cxi'), ('HDF5', '*.h5')]"}), "(initialdir=datadir, title=\n 'Select the reconstructed object', filetypes=[('NPZ', '*.npz'), ('NPY',\n '*.npy'), ('CXI', '*.cxi'), ('HDF5', '*.h5')])\n", (6972, 7126), False, 'from tkinter import filedialog\n'), ((7151, 7176), 'bcdi.utils.utilities.load_file', 'util.load_file', (['file_path'], {}), '(file_path)\n', (7165, 7176), True, 'import bcdi.utils.utilities as util\n'), ((8224, 8236), 'gc.collect', 'gc.collect', ([], {}), '()\n', (8234, 8236), False, 'import gc\n'), ((8238, 8250), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8248, 8250), True, 'from matplotlib import pyplot as plt\n'), ((8323, 8372), 'matplotlib.pyplot.title', 'plt.title', (['"""abs(retrieved amplitude).sum(axis=0)"""'], {}), "('abs(retrieved amplitude).sum(axis=0)')\n", (8332, 8372), True, 'from matplotlib import pyplot as plt\n'), ((8373, 8387), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (8385, 8387), True, 'from matplotlib import pyplot as plt\n'), ((8388, 8402), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (8397, 8402), True, 'from matplotlib import pyplot as plt\n'), ((8655, 8667), 'gc.collect', 'gc.collect', ([], {}), '()\n', (8665, 8667), False, 'import gc\n'), ((8669, 8950), 'bcdi.graph.graph_utils.combined_plots', 'gu.combined_plots', ([], {'tuple_array': '(diff_pattern, phased_fft)', 'tuple_sum_frames': '(False)', 'tuple_sum_axis': '(0, 0)', 'tuple_width_v': 'None', 'tuple_width_h': 'None', 'tuple_colorbar': '(False)', 'tuple_vmin': '(-1, -1)', 'tuple_vmax': 'np.nan', 'tuple_title': "('measurement', 'phased_fft')", 'tuple_scale': '"""log"""'}), "(tuple_array=(diff_pattern, phased_fft), tuple_sum_frames=\n False, tuple_sum_axis=(0, 0), tuple_width_v=None, tuple_width_h=None,\n tuple_colorbar=False, tuple_vmin=(-1, -1), tuple_vmax=np.nan,\n tuple_title=('measurement', 'phased_fft'), tuple_scale='log')\n", (8686, 8950), True, 'import bcdi.graph.graph_utils as gu\n'), ((9127, 9155), 'scipy.ndimage.measurements.center_of_mass', 'center_of_mass', (['diff_pattern'], {}), '(diff_pattern)\n', (9141, 9155), False, 'from scipy.ndimage.measurements import center_of_mass\n'), ((9579, 9591), 'gc.collect', 'gc.collect', ([], {}), '()\n', (9589, 9591), False, 'import gc\n'), ((9605, 9625), 'numpy.copy', 'np.copy', (['prtf_matrix'], {}), '(prtf_matrix)\n', (9612, 9625), True, 'import numpy as np\n'), ((9804, 9998), 'bcdi.graph.graph_utils.multislices_plot', 'gu.multislices_plot', (['prtf_matrix'], {'sum_frames': '(False)', 'plot_colorbar': '(True)', 'cmap': 'my_cmap', 'title': '"""prtf_matrix"""', 'scale': '"""linear"""', 'vmin': '(0)', 'vmax': 'np.nan', 'reciprocal_space': '(True)', 'is_orthogonal': '(True)'}), "(prtf_matrix, sum_frames=False, plot_colorbar=True, cmap\n =my_cmap, title='prtf_matrix', scale='linear', vmin=0, vmax=np.nan,\n reciprocal_space=True, is_orthogonal=True)\n", (9823, 9998), True, 'import bcdi.graph.graph_utils as gu\n'), ((10033, 10045), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10043, 10045), True, 'from matplotlib import pyplot as plt\n'), ((10046, 10086), 'matplotlib.pyplot.imshow', 'plt.imshow', (['copy_prtf[piz, :, :]'], {'vmin': '(0)'}), '(copy_prtf[piz, :, :], vmin=0)\n', (10056, 10086), True, 'from matplotlib import pyplot as plt\n'), ((10087, 10101), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (10099, 10101), True, 'from matplotlib import pyplot as plt\n'), ((10737, 10749), 'gc.collect', 'gc.collect', ([], {}), '()\n', (10747, 10749), False, 'import gc\n'), ((11021, 11038), 'numpy.zeros', 'np.zeros', (['nb_bins'], {}), '(nb_bins)\n', (11029, 11038), True, 'import numpy as np\n'), ((11051, 11068), 'numpy.zeros', 'np.zeros', (['nb_bins'], {}), '(nb_bins)\n', (11059, 11068), True, 'import numpy as np\n'), ((11499, 11511), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11509, 11511), True, 'from matplotlib import pyplot as plt\n'), ((11512, 11544), 'matplotlib.pyplot.plot', 'plt.plot', (['q_axis', 'nb_points', '"""."""'], {}), "(q_axis, nb_points, '.')\n", (11520, 11544), True, 'from matplotlib import pyplot as plt\n'), ((11545, 11567), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""q (1/nm)"""'], {}), "('q (1/nm)')\n", (11555, 11567), True, 'from matplotlib import pyplot as plt\n'), ((11568, 11609), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""nb of points in the average"""'], {}), "('nb of points in the average')\n", (11578, 11609), True, 'from matplotlib import pyplot as plt\n'), ((13098, 13145), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(12, 9)'}), '(nrows=1, ncols=1, figsize=(12, 9))\n', (13110, 13145), True, 'from matplotlib import pyplot as plt\n'), ((14051, 14061), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (14059, 14061), True, 'from matplotlib import pyplot as plt\n'), ((14062, 14072), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14070, 14072), True, 'from matplotlib import pyplot as plt\n'), ((2291, 2308), 'bcdi.graph.colormap.ColormapFactory', 'ColormapFactory', ([], {}), '()\n', (2306, 2308), False, 'from bcdi.graph.colormap import ColormapFactory\n'), ((3208, 3411), 'bcdi.graph.graph_utils.multislices_plot', 'gu.multislices_plot', (['diff_pattern'], {'sum_frames': '(False)', 'plot_colorbar': '(True)', 'cmap': 'my_cmap', 'title': '"""measured amplitude"""', 'scale': '"""log"""', 'vmin': 'np.nan', 'vmax': 'np.nan', 'reciprocal_space': '(True)', 'is_orthogonal': '(True)'}), "(diff_pattern, sum_frames=False, plot_colorbar=True,\n cmap=my_cmap, title='measured amplitude', scale='log', vmin=np.nan,\n vmax=np.nan, reciprocal_space=True, is_orthogonal=True)\n", (3227, 3411), True, 'import bcdi.graph.graph_utils as gu\n'), ((3496, 3672), 'bcdi.graph.graph_utils.multislices_plot', 'gu.multislices_plot', (['mask'], {'sum_frames': '(False)', 'plot_colorbar': '(True)', 'cmap': 'my_cmap', 'title': '"""mask"""', 'scale': '"""linear"""', 'vmin': '(0)', 'vmax': '(1)', 'reciprocal_space': '(True)', 'is_orthogonal': '(True)'}), "(mask, sum_frames=False, plot_colorbar=True, cmap=\n my_cmap, title='mask', scale='linear', vmin=0, vmax=1, reciprocal_space\n =True, is_orthogonal=True)\n", (3515, 3672), True, 'import bcdi.graph.graph_utils as gu\n'), ((4231, 4298), 'bcdi.utils.utilities.bin_data', 'util.bin_data', ([], {'array': 'diff_pattern', 'binning': 'binning', 'debugging': '(False)'}), '(array=diff_pattern, binning=binning, debugging=False)\n', (4244, 4298), True, 'import bcdi.utils.utilities as util\n'), ((4310, 4369), 'bcdi.utils.utilities.bin_data', 'util.bin_data', ([], {'array': 'mask', 'binning': 'binning', 'debugging': '(False)'}), '(array=mask, binning=binning, debugging=False)\n', (4323, 4369), True, 'import bcdi.utils.utilities as util\n'), ((5031, 5047), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (5041, 5047), True, 'import numpy as np\n'), ((5471, 5669), 'bcdi.graph.graph_utils.multislices_plot', 'gu.multislices_plot', (['diff_pattern'], {'sum_frames': '(False)', 'plot_colorbar': '(True)', 'cmap': 'my_cmap', 'title': '"""abs(binned measured amplitude)"""', 'scale': '"""log"""', 'vmin': '(0)', 'reciprocal_space': '(True)', 'is_orthogonal': '(True)'}), "(diff_pattern, sum_frames=False, plot_colorbar=True,\n cmap=my_cmap, title='abs(binned measured amplitude)', scale='log', vmin\n =0, reciprocal_space=True, is_orthogonal=True)\n", (5490, 5669), True, 'import bcdi.graph.graph_utils as gu\n'), ((5744, 5926), 'bcdi.graph.graph_utils.multislices_plot', 'gu.multislices_plot', (['mask'], {'sum_frames': '(False)', 'plot_colorbar': '(True)', 'cmap': 'my_cmap', 'title': '"""binned mask"""', 'scale': '"""linear"""', 'vmin': '(0)', 'vmax': '(1)', 'reciprocal_space': '(True)', 'is_orthogonal': '(True)'}), "(mask, sum_frames=False, plot_colorbar=True, cmap=\n my_cmap, title='binned mask', scale='linear', vmin=0, vmax=1,\n reciprocal_space=True, is_orthogonal=True)\n", (5763, 5926), True, 'import bcdi.graph.graph_utils as gu\n'), ((6565, 6764), 'bcdi.graph.graph_utils.multislices_plot', 'gu.multislices_plot', (['distances_q'], {'sum_frames': '(False)', 'plot_colorbar': '(True)', 'cmap': 'my_cmap', 'title': '"""distances_q"""', 'scale': '"""linear"""', 'vmin': 'np.nan', 'vmax': 'np.nan', 'reciprocal_space': '(True)', 'is_orthogonal': '(True)'}), "(distances_q, sum_frames=False, plot_colorbar=True, cmap\n =my_cmap, title='distances_q', scale='linear', vmin=np.nan, vmax=np.nan,\n reciprocal_space=True, is_orthogonal=True)\n", (6584, 6764), True, 'import bcdi.graph.graph_utils as gu\n'), ((7683, 7748), 'bcdi.utils.utilities.crop_pad', 'util.crop_pad', ([], {'array': 'obj', 'output_shape': 'new_shape', 'debugging': '(False)'}), '(array=obj, output_shape=new_shape, debugging=False)\n', (7696, 7748), True, 'import bcdi.utils.utilities as util\n'), ((7950, 7960), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7958, 7960), False, 'import sys\n'), ((8415, 8431), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (8425, 8431), True, 'import numpy as np\n'), ((9524, 9545), 'numpy.sqrt', 'np.sqrt', (['diff_pattern'], {}), '(diff_pattern)\n', (9531, 9545), True, 'import numpy as np\n'), ((9636, 9655), 'numpy.isnan', 'np.isnan', (['copy_prtf'], {}), '(copy_prtf)\n', (9644, 9655), True, 'import numpy as np\n'), ((10316, 10336), 'numpy.copy', 'np.copy', (['prtf_matrix'], {}), '(prtf_matrix)\n', (10323, 10336), True, 'import numpy as np\n'), ((10457, 10644), 'bcdi.graph.graph_utils.multislices_plot', 'gu.multislices_plot', (['copy_prtf'], {'sum_frames': '(False)', 'plot_colorbar': '(True)', 'cmap': 'my_cmap', 'title': '"""hotpix_prtf"""', 'scale': '"""linear"""', 'vmin': '(0)', 'vmax': '(1)', 'reciprocal_space': '(True)', 'is_orthogonal': '(True)'}), "(copy_prtf, sum_frames=False, plot_colorbar=True, cmap=\n my_cmap, title='hotpix_prtf', scale='linear', vmin=0, vmax=1,\n reciprocal_space=True, is_orthogonal=True)\n", (10476, 10644), True, 'import bcdi.graph.graph_utils as gu\n'), ((11249, 11326), 'numpy.logical_and', 'np.logical_and', (['(distances_q < q_axis[index + 1])', '(distances_q >= q_axis[index])'], {}), '(distances_q < q_axis[index + 1], distances_q >= q_axis[index])\n', (11263, 11326), True, 'import numpy as np\n'), ((12491, 12537), 'scipy.interpolate.interp1d', 'interp1d', (['arc_length', 'defined_q'], {'kind': '"""linear"""'}), "(arc_length, defined_q, kind='linear')\n", (12499, 12537), False, 'from scipy.interpolate import interp1d\n'), ((2422, 2443), 'pathlib.Path', 'pathlib.Path', (['savedir'], {}), '(savedir)\n', (2434, 2443), False, 'import pathlib\n'), ((4379, 4395), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (4389, 4395), True, 'import numpy as np\n'), ((8134, 8143), 'numpy.fft.fftn', 'fftn', (['obj'], {}), '(obj)\n', (8138, 8143), False, 'from numpy.fft import fftn, fftshift\n'), ((8181, 8192), 'numpy.sqrt', 'np.sqrt', (['nx'], {}), '(nx)\n', (8188, 8192), True, 'import numpy as np\n'), ((10351, 10372), 'numpy.isnan', 'np.isnan', (['prtf_matrix'], {}), '(prtf_matrix)\n', (10359, 10372), True, 'import numpy as np\n'), ((10425, 10446), 'numpy.nonzero', 'np.nonzero', (['copy_prtf'], {}), '(copy_prtf)\n', (10435, 10446), True, 'import numpy as np\n'), ((11982, 12000), 'numpy.isnan', 'np.isnan', (['prtf_avg'], {}), '(prtf_avg)\n', (11990, 12000), True, 'import numpy as np\n'), ((12154, 12165), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (12162, 12165), True, 'import numpy as np\n'), ((4703, 4724), 'numpy.sqrt', 'np.sqrt', (['diff_pattern'], {}), '(diff_pattern)\n', (4710, 4724), True, 'import numpy as np\n'), ((8153, 8164), 'numpy.sqrt', 'np.sqrt', (['nz'], {}), '(nz)\n', (8160, 8164), True, 'import numpy as np\n'), ((8167, 8178), 'numpy.sqrt', 'np.sqrt', (['ny'], {}), '(ny)\n', (8174, 8178), True, 'import numpy as np\n'), ((12386, 12404), 'numpy.isnan', 'np.isnan', (['prtf_avg'], {}), '(prtf_avg)\n', (12394, 12404), True, 'import numpy as np\n'), ((13175, 13193), 'numpy.isnan', 'np.isnan', (['prtf_avg'], {}), '(prtf_avg)\n', (13183, 13193), True, 'import numpy as np\n'), ((5312, 5333), 'numpy.sqrt', 'np.sqrt', (['diff_pattern'], {}), '(diff_pattern)\n', (5319, 5333), True, 'import numpy as np\n'), ((11454, 11468), 'numpy.isnan', 'np.isnan', (['temp'], {}), '(temp)\n', (11462, 11468), True, 'import numpy as np\n'), ((12244, 12262), 'numpy.diff', 'np.diff', (['defined_q'], {}), '(defined_q)\n', (12251, 12262), True, 'import numpy as np\n'), ((11827, 11845), 'numpy.isnan', 'np.isnan', (['prtf_avg'], {}), '(prtf_avg)\n', (11835, 11845), True, 'import numpy as np\n'), ((12616, 12634), 'numpy.isnan', 'np.isnan', (['prtf_avg'], {}), '(prtf_avg)\n', (12624, 12634), True, 'import numpy as np\n'), ((12216, 12234), 'numpy.isnan', 'np.isnan', (['prtf_avg'], {}), '(prtf_avg)\n', (12224, 12234), True, 'import numpy as np\n'), ((12774, 12792), 'numpy.isnan', 'np.isnan', (['prtf_avg'], {}), '(prtf_avg)\n', (12782, 12792), True, 'import numpy as np\n')] |
from typing import Tuple
import numpy as np
class ParallelEnv:
def __init__(self, envs):
self.env = envs
self.num_envs = len(envs)
self.seed(0)
def seed(self, seed: int):
[env.seed(seed + idx) for idx, env in enumerate(self.env)]
def reset(self) -> np.ndarray:
s = [env.reset() for env in self.env]
s = np.concatenate(s, axis=0)
return s
def step(
self, action: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
s, r, done = [], [], []
for env, a in zip(self.env, action):
x = env.step(a[np.newaxis])
s.append(x[0])
r.append(x[1])
done.append(x[2])
s = np.concatenate(s, axis=0)
r = np.concatenate(r, axis=0)
done = np.concatenate(done, axis=0)
return s, r, done
def render(self, index: int = 0):
self.env[index].render()
| [
"numpy.concatenate"
] | [((368, 393), 'numpy.concatenate', 'np.concatenate', (['s'], {'axis': '(0)'}), '(s, axis=0)\n', (382, 393), True, 'import numpy as np\n'), ((725, 750), 'numpy.concatenate', 'np.concatenate', (['s'], {'axis': '(0)'}), '(s, axis=0)\n', (739, 750), True, 'import numpy as np\n'), ((763, 788), 'numpy.concatenate', 'np.concatenate', (['r'], {'axis': '(0)'}), '(r, axis=0)\n', (777, 788), True, 'import numpy as np\n'), ((804, 832), 'numpy.concatenate', 'np.concatenate', (['done'], {'axis': '(0)'}), '(done, axis=0)\n', (818, 832), True, 'import numpy as np\n')] |
import time
import numpy as np
from PIL import Image as pil_image
from keras.preprocessing.image import save_img
from keras import layers
from keras.applications import vgg16
from keras import backend as K
import matplotlib.pyplot as plt
def normalize(x):
"""utility function to normalize a tensor.
# Arguments
x: An input tensor.
# Returns
The normalized input tensor.
"""
return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())
def deprocess_image(x):
"""utility function to convert a float array into a valid uint8 image.
# Arguments
x: A numpy-array representing the generated image.
# Returns
A processed numpy-array, which could be used in e.g. imshow.
"""
# normalize tensor: center on 0., ensure std is 0.25
x -= x.mean()
x /= (x.std() + K.epsilon())
x *= 0.25
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if K.image_data_format() == 'channels_first':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
def process_image(x, former):
"""utility function to convert a valid uint8 image back into a float array.
Reverses `deprocess_image`.
# Arguments
x: A numpy-array, which could be used in e.g. imshow.
former: The former numpy-array.
Need to determine the former mean and variance.
# Returns
A processed numpy-array representing the generated image.
"""
if K.image_data_format() == 'channels_first':
x = x.transpose((2, 0, 1))
return (x / 255 - 0.5) * 4 * former.std() + former.mean()
def visualize_layer(model,
layer_name,
step=0.5,
epochs=25,
upscaling_steps=10,
upscaling_factor=1.1,
output_dim=(128, 128),
filter_range=(0, None),
grid_columns=8,
show_filters=True,
image_size_multiplier=2):
"""Visualizes the most relevant filters of one conv-layer in a certain model.
# Arguments
model: The model containing layer_name.
layer_name: The name of the layer to be visualized.
Has to be a part of model.
step: step size for gradient ascent.
epochs: Number of iterations for gradient ascent.
upscaling_steps: Number of upscaling steps.
Starting image is in this case (80, 80).
upscaling_factor: Factor to which to slowly upgrade
the image towards output_dim.
output_dim: [img_width, img_height] The output image dimensions.
filter_range: Tupel[lower, upper]
Determines the to be computed filter numbers.
If the second value is `None`,
the last filter will be inferred as the upper boundary.
"""
def _generate_filter_image(input_img,
layer_output,
filter_index,
channels=3):
"""Generates image for one particular filter.
# Arguments
input_img: The input-image Tensor.
layer_output: The output-image Tensor.
filter_index: The to be processed filter number.
Assumed to be valid.
#Returns
Either None if no image could be generated.
or a tuple of the image (array) itself and the last loss.
"""
s_time = time.time()
# we build a loss function that maximizes the activation
# of the nth filter of the layer considered
if K.image_data_format() == 'channels_first':
loss = K.mean(layer_output[:, filter_index, :, :])
else:
loss = K.mean(layer_output[:, :, :, filter_index])
# we compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization trick: we normalize the gradient
grads = normalize(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
# we start from a gray image with some random noise
intermediate_dim = tuple(
int(x / (upscaling_factor ** upscaling_steps)) for x in output_dim)
def _get_input_random_image():
if K.image_data_format() == 'channels_first':
input_img_data = np.random.random(
(1, channels, intermediate_dim[0], intermediate_dim[1]))
else:
input_img_data = np.random.random(
(1, intermediate_dim[0], intermediate_dim[1], channels))
input_img_data = (input_img_data - 0.5) * 20 + 128
return input_img_data
def _get_random_noise(array):
return np.random.randn(*array.shape) * 0.1
input_img_data = _get_input_random_image()
# Slowly upscaling towards the original size prevents
# a dominating high-frequency of the to visualized structure
# as it would occur if we directly compute the 412d-image.
# Behaves as a better starting point for each following dimension
# and therefore avoids poor local minima
losses, grads = [], []
reinit_enabled = True
for up in reversed(range(upscaling_steps)):
# we run gradient ascent for e.g. 20 steps
for epoch in range(epochs):
loss_value, grads_value = iterate([input_img_data])
losses.append(loss_value)
grads.append(np.mean(np.abs(grads_value)))
input_img_data += grads_value * step
if reinit_enabled and (np.sum(losses) <= 1e-04 or (len(losses) > 1 and np.diff(losses)[-1] < 0.5)):
input_img_data = input_img_data + _get_random_noise(input_img_data)
reinit_enabled = False
intermediate_dim = tuple(
int(x / (upscaling_factor ** up)) for x in output_dim)
# Upscale
mode = "L" if channels == 1 else None
img = deprocess_image(input_img_data[0])
if channels == 1:
img = img.reshape((img.shape[0], img.shape[1]))
img = np.array(pil_image.fromarray(img, mode).resize(intermediate_dim,
pil_image.BICUBIC))
img = img.reshape((img.shape[0], img.shape[1], 1))
else:
img = np.array(pil_image.fromarray(img).resize(intermediate_dim,
pil_image.BICUBIC))
input_img_data = [process_image(img, input_img_data[0])]
# decode the resulting input image
img = deprocess_image(input_img_data[0])
e_time = time.time()
print('{:3}'.format(filter_index,),end =" ")
return img, loss_value
def _draw_filters(filters, columns=8, show_filters=True, channels=3):
"""Draw the best filters in a nxn grid.
# Arguments
filters: A List of generated images and their corresponding losses
for each processed filter.
n: dimension of the grid.
If none, the largest possible square will be used
"""
rows = int(np.ceil(len(filters) / columns))
output_dim = (filters[0][0].shape[0], filters[0][0].shape[1])
# build a black picture with enough space for
# e.g. our 8 x 8 filters of size 412 x 412, with a 5px margin in between
MARGIN = 1
width = rows * output_dim[0] + (rows - 1) * MARGIN
height = columns * output_dim[1] + (columns - 1) * MARGIN
stitched_filters = np.zeros((width, height, channels), dtype='uint8')
# fill the picture with our saved filters
for i in range(rows):
for j in range(columns):
idx = min(i * columns + j, len(filters) - 1)
if i * columns + j > len(filters) - 1:
img = np.zeros_like(filters[0][0])
else:
img, _ = filters[idx]
width_margin = (output_dim[0] + MARGIN) * i
height_margin = (output_dim[1] + MARGIN) * j
stitched_filters[
width_margin: width_margin + output_dim[0],
height_margin: height_margin + output_dim[1], :] = img
if show_filters:
fig_height = rows * image_size_multiplier
fig_width = columns * image_size_multiplier
fig = plt.figure(figsize=(fig_width, fig_height))
plt.imshow(stitched_filters)
plt.title('{0:}_{1:}x{2:}.png'.format(layer_name, rows, columns))
plt.show()
# save the result to disk
save_img('{0:}_{1:}x{2:}.png'.format(layer_name, rows, columns), stitched_filters)
# this is the placeholder for the input images
assert len(model.inputs) == 1
input_img = model.inputs[0]
channels = K.int_shape(model.inputs[0])[-1]
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers[0:]])
output_layer = layer_dict[layer_name]
assert isinstance(output_layer, layers.Conv2D)
# Compute to be processed filter range
filter_lower = filter_range[0]
filter_upper = (min(filter_range[1],len(output_layer.get_weights()[1]))
if filter_range[1] is not None
else len(output_layer.get_weights()[1]))
assert (filter_lower >= 0
and filter_upper <= len(output_layer.get_weights()[1])
and filter_upper > filter_lower)
print('Compute filters {:} to {:}'.format(filter_lower, filter_upper))
# iterate through each filter and generate its corresponding image
processed_filters = []
for f in range(filter_lower, filter_upper):
img_loss = _generate_filter_image(input_img, output_layer.output, f, channels)
if img_loss is not None:
processed_filters.append(img_loss)
print('{} filter processed.'.format(len(processed_filters)))
# Finally draw and store the best filters to disk
print("Filter Losses: ", [loss for f, loss in processed_filters])
_draw_filters(processed_filters, grid_columns, show_filters)
if __name__ == '__main__':
# the name of the layer we want to visualize
# (see model definition at keras/applications/vgg16.py)
LAYER_NAME = 'block5_conv1'
# build the VGG16 network with ImageNet weights
vgg = vgg16.VGG16(weights='imagenet', include_top=False)
print('Model loaded.')
vgg.summary()
# example function call
visualize_layer(vgg, LAYER_NAME, filter_range=(0, 4)) | [
"numpy.clip",
"keras.applications.vgg16.VGG16",
"keras.backend.gradients",
"matplotlib.pyplot.imshow",
"keras.backend.image_data_format",
"numpy.random.random",
"keras.backend.square",
"numpy.diff",
"keras.backend.epsilon",
"numpy.abs",
"numpy.random.randn",
"time.time",
"matplotlib.pyplot.s... | [((903, 919), 'numpy.clip', 'np.clip', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (910, 919), True, 'import numpy as np\n'), ((10772, 10822), 'keras.applications.vgg16.VGG16', 'vgg16.VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (10783, 10822), False, 'from keras.applications import vgg16\n'), ((833, 844), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (842, 844), True, 'from keras import backend as K\n'), ((968, 989), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (987, 989), True, 'from keras import backend as K\n'), ((1528, 1549), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (1547, 1549), True, 'from keras import backend as K\n'), ((3624, 3635), 'time.time', 'time.time', ([], {}), '()\n', (3633, 3635), False, 'import time\n'), ((4251, 4289), 'keras.backend.function', 'K.function', (['[input_img]', '[loss, grads]'], {}), '([input_img], [loss, grads])\n', (4261, 4289), True, 'from keras import backend as K\n'), ((7001, 7012), 'time.time', 'time.time', ([], {}), '()\n', (7010, 7012), False, 'import time\n'), ((7914, 7964), 'numpy.zeros', 'np.zeros', (['(width, height, channels)'], {'dtype': '"""uint8"""'}), "((width, height, channels), dtype='uint8')\n", (7922, 7964), True, 'import numpy as np\n'), ((9201, 9229), 'keras.backend.int_shape', 'K.int_shape', (['model.inputs[0]'], {}), '(model.inputs[0])\n', (9212, 9229), True, 'from keras import backend as K\n'), ((456, 467), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (465, 467), True, 'from keras import backend as K\n'), ((1054, 1072), 'numpy.clip', 'np.clip', (['x', '(0)', '(255)'], {}), '(x, 0, 255)\n', (1061, 1072), True, 'import numpy as np\n'), ((3765, 3786), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (3784, 3786), True, 'from keras import backend as K\n'), ((3827, 3870), 'keras.backend.mean', 'K.mean', (['layer_output[:, filter_index, :, :]'], {}), '(layer_output[:, filter_index, :, :])\n', (3833, 3870), True, 'from keras import backend as K\n'), ((3904, 3947), 'keras.backend.mean', 'K.mean', (['layer_output[:, :, :, filter_index]'], {}), '(layer_output[:, :, :, filter_index])\n', (3910, 3947), True, 'from keras import backend as K\n'), ((4034, 4062), 'keras.backend.gradients', 'K.gradients', (['loss', 'input_img'], {}), '(loss, input_img)\n', (4045, 4062), True, 'from keras import backend as K\n'), ((8757, 8800), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(fig_width, fig_height)'}), '(figsize=(fig_width, fig_height))\n', (8767, 8800), True, 'import matplotlib.pyplot as plt\n'), ((8813, 8841), 'matplotlib.pyplot.imshow', 'plt.imshow', (['stitched_filters'], {}), '(stitched_filters)\n', (8823, 8841), True, 'import matplotlib.pyplot as plt\n'), ((8932, 8942), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8940, 8942), True, 'import matplotlib.pyplot as plt\n'), ((4520, 4541), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (4539, 4541), True, 'from keras import backend as K\n'), ((4596, 4669), 'numpy.random.random', 'np.random.random', (['(1, channels, intermediate_dim[0], intermediate_dim[1])'], {}), '((1, channels, intermediate_dim[0], intermediate_dim[1]))\n', (4612, 4669), True, 'import numpy as np\n'), ((4742, 4815), 'numpy.random.random', 'np.random.random', (['(1, intermediate_dim[0], intermediate_dim[1], channels)'], {}), '((1, intermediate_dim[0], intermediate_dim[1], channels))\n', (4758, 4815), True, 'import numpy as np\n'), ((4992, 5021), 'numpy.random.randn', 'np.random.randn', (['*array.shape'], {}), '(*array.shape)\n', (5007, 5021), True, 'import numpy as np\n'), ((440, 451), 'keras.backend.square', 'K.square', (['x'], {}), '(x)\n', (448, 451), True, 'from keras import backend as K\n'), ((8224, 8252), 'numpy.zeros_like', 'np.zeros_like', (['filters[0][0]'], {}), '(filters[0][0])\n', (8237, 8252), True, 'import numpy as np\n'), ((5758, 5777), 'numpy.abs', 'np.abs', (['grads_value'], {}), '(grads_value)\n', (5764, 5777), True, 'import numpy as np\n'), ((5869, 5883), 'numpy.sum', 'np.sum', (['losses'], {}), '(losses)\n', (5875, 5883), True, 'import numpy as np\n'), ((6428, 6458), 'PIL.Image.fromarray', 'pil_image.fromarray', (['img', 'mode'], {}), '(img, mode)\n', (6447, 6458), True, 'from PIL import Image as pil_image\n'), ((6689, 6713), 'PIL.Image.fromarray', 'pil_image.fromarray', (['img'], {}), '(img)\n', (6708, 6713), True, 'from PIL import Image as pil_image\n'), ((5917, 5932), 'numpy.diff', 'np.diff', (['losses'], {}), '(losses)\n', (5924, 5932), True, 'import numpy as np\n')] |
import random
import xarray as xr
import numpy as np
import scipy as sp
import networkx as nx
from collections import deque
from skimage.morphology import cube
from scipy.interpolate import interp1d
from statsmodels.distributions.empirical_distribution import ECDF
from joblib import Parallel, delayed
def label_function(struct, pore_object, label, verbose = False):
mask = pore_object == label
connections = deque()
if verbose:
print('Searching around {}'.format(label))
mask = sp.ndimage.binary_dilation(input = mask, structure = struct(3))
neighbors = np.unique(pore_object[mask])[1:]
for nb in neighbors:
if nb != label:
conn = (label, nb)
if verbose:
print('\t{} connects to {}'.format(conn[1], conn[0]))
connections.append(conn)
return connections
class PNMStats:
def __init__(self, path):
# waiting time statistics
self.delta_t_025 = np.array([])
self.delta_t_100 = np.array([])
self.delta_t_300 = np.array([])
self.delta_t_all = np.array([])
print('Reading the statistics dataset at {}'.format(path))
stats_dataset = xr.load_dataset(path)
for key in list(stats_dataset.coords):
if not key[-2:] == '_t': continue
if key[3:6] == '025':
self.delta_t_025 = np.concatenate([self.delta_t_025, stats_dataset[key].data])
if key[3:6] == '100':
self.delta_t_100 = np.concatenate([self.delta_t_100, stats_dataset[key].data])
if key[3:6] == '300':
self.delta_t_300 = np.concatenate([self.delta_t_300, stats_dataset[key].data])
self.delta_t_all = stats_dataset['deltatall'].data
class PNM:
def __init__(self, stats_path,
graph = None,
exp_data_path = None,
pore_data_path = None,
inlets = [],
R_inlet = 1E17,
job_count = 4,
verbose = False
):
self.job_count = job_count
self.verbose = verbose
self.stats = None
self.randomize_waiting_times = True
self.waiting_times_data = None
if stats_path is not None:
self.stats = PNMStats(stats_path)
self.waiting_times_data = self.stats.delta_t_all
self.randomize_waiting_times = False
self.graph = graph
self.data = None
self.waiting_times = np.array([])
self.V = None # Water volume in the network
self.filled = None # filled nodes
self.inlets = inlets # inlet pores
self.R0 = None # pore resistances
self.R_full = None # resistances when full
self.R_inlet = R_inlet # inlet resistance
self.radi = None
self.heights = None
if exp_data_path is not None:
print('Reading the experimental dataset at {}'.format(exp_data_path))
self.data = xr.load_dataset(exp_data_path)
self.generate_graph(self.data)
if pore_data_path is not None:
print('Reading the pore dataset at {}'.format(pore_data_path))
pore_data = xr.load_dataset(pore_data_path)
self.generate_pore_data(pore_data)
else:
self.generate_pore_data()
self.generate_waiting_times()
self.build_inlets() #amount of inlets should be an adjustable argument or a fraction of the total number of nodes
def extract_throat_list(self, label_matrix, labels):
"""
inspired by <NAME>'s GETNET
extracts a list of directed throats connecting pores i->j including a few throat parameters
undirected network i-j needs to be calculated in a second step
"""
def extend_bounding_box(s, shape, pad=3):
a = deque()
for i, dim in zip(s, shape):
start = 0
stop = dim
if i.start - pad >= 0:
start = i.start - pad
if i.stop + pad < dim:
stop = i.stop + pad
a.append(slice(start, stop, None))
return tuple(a)
im = label_matrix
struct = cube # FIXME: ball does not work as you would think (anisotropic expansion)
if im.ndim == 2:
struct = disk
crude_pores = sp.ndimage.find_objects(im)
# throw out None-entries (counterintuitive behavior of find_objects)
pores = deque()
bounding_boxes = deque()
for pore in crude_pores:
if pore is not None: bb = extend_bounding_box(pore, im.shape)
if pore is not None and len(np.unique(im[bb])) > 2:
pores.append(pore)
bounding_boxes.append(bb)
connections_raw = Parallel(n_jobs = self.job_count)(
delayed(label_function)\
(struct, im[bounding_box], label, self.verbose) \
for (bounding_box, label) in zip(bounding_boxes, labels)
)
# clear out empty objects
connections = deque()
for connection in connections_raw:
if len(connection) > 0:
connections.append(connection)
return np.concatenate(connections, axis = 0)
def generate_graph(self, exp_data):
label_matrix = exp_data['label_matrix'].data
labels = exp_data['label'].data
if self.verbose:
print('labels', labels)
print('label matrix shape', label_matrix.shape)
if self.graph is None:
print('Generating the pore network graph from the experimental dataset')
throats = self.extract_throat_list(label_matrix, labels)
self.graph = nx.Graph()
self.graph.add_edges_from(np.uint16(throats[:,:2]))
def generate_pore_data(self, pore_data = None):
if pore_data is None:
if self.verbose:
print('Filling the graph with random pore data')
size = len(self.graph.nodes)
re = self.radi = np.random.rand(size)
h0e = self.heights = np.random.rand(size)
for i in range(size):
re[i] /= 10**np.random.randint(5, 6)
h0e[i] /= 10**np.random.randint(4, 5)
else:
if self.verbose:
print('Using experimental pore data')
px = pore_data.attrs['voxel'].data
self.radi = px*np.sqrt(pore_data['value_properties'].sel(property = 'median_area').data/np.pi)
self.heights = px*pore_data['value_properties'].sel(property = 'major_axis').data
def generate_waiting_times(self):
size = np.array(np.unique(self.graph.nodes)).max() + 1
data = self.waiting_times_data
if self.randomize_waiting_times or data is None or len(data) == 0:
wt = self.waiting_times = np.random.rand(size)
for i in range(size):
wt[i] *= 10**np.random.randint(-1, 3)
else:
# assign a random waiting time to every pore based on the experimental distribution
ecdf = ECDF(data)
func = interp1d(ecdf.y[1:], ecdf.x[1:], fill_value = 'extrapolate')
self.waiting_times = func(np.random.rand(size))
def build_inlets(self, amount = 5):
inlets = np.array(self.inlets, dtype = np.int)
if not np.any(inlets):
self.generate_inlets(amount)
else:
# double-check if inlet pores are actually in the network
temp_inlets = deque()
print('Taking inlets from command-line arguments.')
for inlet in inlets:
if inlet in self.graph:
temp_inlets.append(inlet)
self.inlets = np.array(temp_inlets)
# TODO: Change this to start with one random inlet and some amount of distant neighbours
def generate_inlets(self, amount):
print('Generating {} inlets'.format(amount))
self.inlets = random.sample(self.graph.nodes, amount)
def outlet_resistances(self):
"""
find your path through the filled network to calculate the inlet
resistance imposed on the pores at the waterfront
quick and dirty, this part makes the code slow and might even be wrong
we have to check
"""
# initialize pore resistances
self.R0 = np.zeros(len(self.filled))
# only filled pores contribute to the network permeability
filled_inlets = deque()
for inlet in self.inlets:
if self.filled[inlet]:
filled_inlets.append(inlet)
if self.verbose:
print('\nfilled inlets', filled_inlets)
return self.outlet_resistances_r(filled_inlets)
# this function recursivelself.waiting_times ould calculate the effective inlet resistance
# for every pore with the same distance (layer) to the network inlet
def outlet_resistances_r(self, layer, visited = {}):
if len(layer) == 0:
return self.R0
if self.verbose:
print('current layer', layer)
next_layer = deque()
for node in layer:
neighbours = self.graph.neighbors(node)
inv_R_eff = np.float64(0)
if self.verbose:
print('visiting node', node)
for neighbour in neighbours:
if neighbour in layer:
inv_R_eff += 1/np.float64(self.R0[neighbour] + self.R_full[neighbour]) #<- you sure about this? you add a resistance to an inverse resistance
self.R0[node] += 1/inv_R_eff
if self.filled[node] and node not in visited:
next_layer.append(node)
visited[node] = True
if self.verbose:
print('next layer', next_layer)
return self.outlet_resistances_r(next_layer, visited)
| [
"random.sample",
"collections.deque",
"numpy.unique",
"numpy.random.rand",
"numpy.float64",
"scipy.ndimage.find_objects",
"networkx.Graph",
"scipy.interpolate.interp1d",
"joblib.Parallel",
"numpy.array",
"numpy.any",
"statsmodels.distributions.empirical_distribution.ECDF",
"numpy.random.rand... | [((419, 426), 'collections.deque', 'deque', ([], {}), '()\n', (424, 426), False, 'from collections import deque\n'), ((587, 615), 'numpy.unique', 'np.unique', (['pore_object[mask]'], {}), '(pore_object[mask])\n', (596, 615), True, 'import numpy as np\n'), ((966, 978), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (974, 978), True, 'import numpy as np\n'), ((1006, 1018), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1014, 1018), True, 'import numpy as np\n'), ((1046, 1058), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1054, 1058), True, 'import numpy as np\n'), ((1086, 1098), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1094, 1098), True, 'import numpy as np\n'), ((1191, 1212), 'xarray.load_dataset', 'xr.load_dataset', (['path'], {}), '(path)\n', (1206, 1212), True, 'import xarray as xr\n'), ((2433, 2445), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2441, 2445), True, 'import numpy as np\n'), ((4328, 4355), 'scipy.ndimage.find_objects', 'sp.ndimage.find_objects', (['im'], {}), '(im)\n', (4351, 4355), True, 'import scipy as sp\n'), ((4450, 4457), 'collections.deque', 'deque', ([], {}), '()\n', (4455, 4457), False, 'from collections import deque\n'), ((4483, 4490), 'collections.deque', 'deque', ([], {}), '()\n', (4488, 4490), False, 'from collections import deque\n'), ((5044, 5051), 'collections.deque', 'deque', ([], {}), '()\n', (5049, 5051), False, 'from collections import deque\n'), ((5194, 5229), 'numpy.concatenate', 'np.concatenate', (['connections'], {'axis': '(0)'}), '(connections, axis=0)\n', (5208, 5229), True, 'import numpy as np\n'), ((7287, 7322), 'numpy.array', 'np.array', (['self.inlets'], {'dtype': 'np.int'}), '(self.inlets, dtype=np.int)\n', (7295, 7322), True, 'import numpy as np\n'), ((7954, 7993), 'random.sample', 'random.sample', (['self.graph.nodes', 'amount'], {}), '(self.graph.nodes, amount)\n', (7967, 7993), False, 'import random\n'), ((8463, 8470), 'collections.deque', 'deque', ([], {}), '()\n', (8468, 8470), False, 'from collections import deque\n'), ((9090, 9097), 'collections.deque', 'deque', ([], {}), '()\n', (9095, 9097), False, 'from collections import deque\n'), ((2925, 2955), 'xarray.load_dataset', 'xr.load_dataset', (['exp_data_path'], {}), '(exp_data_path)\n', (2940, 2955), True, 'import xarray as xr\n'), ((3138, 3169), 'xarray.load_dataset', 'xr.load_dataset', (['pore_data_path'], {}), '(pore_data_path)\n', (3153, 3169), True, 'import xarray as xr\n'), ((3789, 3796), 'collections.deque', 'deque', ([], {}), '()\n', (3794, 3796), False, 'from collections import deque\n'), ((4766, 4797), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.job_count'}), '(n_jobs=self.job_count)\n', (4774, 4797), False, 'from joblib import Parallel, delayed\n'), ((5699, 5709), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (5707, 5709), True, 'import networkx as nx\n'), ((6022, 6042), 'numpy.random.rand', 'np.random.rand', (['size'], {}), '(size)\n', (6036, 6042), True, 'import numpy as np\n'), ((6076, 6096), 'numpy.random.rand', 'np.random.rand', (['size'], {}), '(size)\n', (6090, 6096), True, 'import numpy as np\n'), ((6840, 6860), 'numpy.random.rand', 'np.random.rand', (['size'], {}), '(size)\n', (6854, 6860), True, 'import numpy as np\n'), ((7078, 7088), 'statsmodels.distributions.empirical_distribution.ECDF', 'ECDF', (['data'], {}), '(data)\n', (7082, 7088), False, 'from statsmodels.distributions.empirical_distribution import ECDF\n'), ((7108, 7166), 'scipy.interpolate.interp1d', 'interp1d', (['ecdf.y[1:]', 'ecdf.x[1:]'], {'fill_value': '"""extrapolate"""'}), "(ecdf.y[1:], ecdf.x[1:], fill_value='extrapolate')\n", (7116, 7166), False, 'from scipy.interpolate import interp1d\n'), ((7340, 7354), 'numpy.any', 'np.any', (['inlets'], {}), '(inlets)\n', (7346, 7354), True, 'import numpy as np\n'), ((7507, 7514), 'collections.deque', 'deque', ([], {}), '()\n', (7512, 7514), False, 'from collections import deque\n'), ((7724, 7745), 'numpy.array', 'np.array', (['temp_inlets'], {}), '(temp_inlets)\n', (7732, 7745), True, 'import numpy as np\n'), ((9201, 9214), 'numpy.float64', 'np.float64', (['(0)'], {}), '(0)\n', (9211, 9214), True, 'import numpy as np\n'), ((1376, 1435), 'numpy.concatenate', 'np.concatenate', (['[self.delta_t_025, stats_dataset[key].data]'], {}), '([self.delta_t_025, stats_dataset[key].data])\n', (1390, 1435), True, 'import numpy as np\n'), ((1505, 1564), 'numpy.concatenate', 'np.concatenate', (['[self.delta_t_100, stats_dataset[key].data]'], {}), '([self.delta_t_100, stats_dataset[key].data])\n', (1519, 1564), True, 'import numpy as np\n'), ((1634, 1693), 'numpy.concatenate', 'np.concatenate', (['[self.delta_t_300, stats_dataset[key].data]'], {}), '([self.delta_t_300, stats_dataset[key].data])\n', (1648, 1693), True, 'import numpy as np\n'), ((5748, 5773), 'numpy.uint16', 'np.uint16', (['throats[:, :2]'], {}), '(throats[:, :2])\n', (5757, 5773), True, 'import numpy as np\n'), ((7207, 7227), 'numpy.random.rand', 'np.random.rand', (['size'], {}), '(size)\n', (7221, 7227), True, 'import numpy as np\n'), ((4813, 4836), 'joblib.delayed', 'delayed', (['label_function'], {}), '(label_function)\n', (4820, 4836), False, 'from joblib import Parallel, delayed\n'), ((6160, 6183), 'numpy.random.randint', 'np.random.randint', (['(5)', '(6)'], {}), '(5, 6)\n', (6177, 6183), True, 'import numpy as np\n'), ((6214, 6237), 'numpy.random.randint', 'np.random.randint', (['(4)', '(5)'], {}), '(4, 5)\n', (6231, 6237), True, 'import numpy as np\n'), ((6924, 6948), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(3)'], {}), '(-1, 3)\n', (6941, 6948), True, 'import numpy as np\n'), ((4638, 4655), 'numpy.unique', 'np.unique', (['im[bb]'], {}), '(im[bb])\n', (4647, 4655), True, 'import numpy as np\n'), ((6648, 6675), 'numpy.unique', 'np.unique', (['self.graph.nodes'], {}), '(self.graph.nodes)\n', (6657, 6675), True, 'import numpy as np\n'), ((9406, 9461), 'numpy.float64', 'np.float64', (['(self.R0[neighbour] + self.R_full[neighbour])'], {}), '(self.R0[neighbour] + self.R_full[neighbour])\n', (9416, 9461), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib
import pylab as pl
import pandas
from ae_measure2 import *
from feature_extraction import *
import glob
import os
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.metrics import davies_bouldin_score
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import adjusted_rand_score as ari
from sklearn.cluster import SpectralClustering
if __name__ == "__main__":
'''
Set hyperparameters
'''
sig_len = 1024
k = 2 # NOTE: number of clusters
kmeans = KMeans(n_clusters=k, n_init=20000)
#kmeans = SpectralClustering(n_clusters=k, n_init=1000, eigen_solver='arpack'
# ,affinity="nearest_neighbors", n_neighbors=5)
reference_index = 1
test_index = 3
my_scaler = StandardScaler() # NOTE: normalize to unit variance
'''
Read-in and Setup
'''
mypath = 'C:/Research/Framework_Benchmarking/Data/PLB_data.json'
data = load_PLB(mypath)
waves = data['data']
targets = data['target']
angles = data['target_angle']
energy = data['energy']
reference_energies = energy[np.where(targets==reference_index)]
test_energies = energy[np.where(targets==test_index)]
energy_set = np.hstack((reference_energies, test_energies))
reference_waves = waves[np.where(targets==reference_index)]
test_waves = waves[np.where(targets==test_index)]
wave_set = np.vstack((reference_waves, test_waves))
reference_targets = targets[np.where(targets==reference_index)]
test_targets = targets[np.where(targets==test_index)]
target_set = np.hstack((reference_targets, test_targets))
'''
Cast experiment as vectors
'''
vect = []
for wave in wave_set:
feature_vector = extract_Sause_vect(waveform=wave)
vect.append(feature_vector) # set of all waveforms from channel as a vector
# NOTE: do rescaling
vect = my_scaler.fit_transform(vect)
'''
vect = np.array(vect)
x = vect.T[0]
y = vect.T[1]
pl.scatter(x,y,c=target_set)
pl.show()
'''
'''
Do k-means clustering and get labels
'''
print('Beginning clustering')
labels = kmeans.fit(vect).labels_
#print(kmeans.n_iter_)
print('ARI: ', ari(labels,target_set))
#df = pd.DataFrame({'Stress': stress, 'Ch_A': A_lads, 'Ch_B': B_lads, 'Ch_C': C_lads, 'Ch_D': D_lads})
#df.to_csv(r'Frequency_framework_labels.csv')
| [
"sklearn.cluster.KMeans",
"numpy.hstack",
"numpy.where",
"sklearn.metrics.adjusted_rand_score",
"sklearn.preprocessing.StandardScaler",
"numpy.vstack"
] | [((607, 641), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k', 'n_init': '(20000)'}), '(n_clusters=k, n_init=20000)\n', (613, 641), False, 'from sklearn.cluster import KMeans\n'), ((878, 894), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (892, 894), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1346, 1392), 'numpy.hstack', 'np.hstack', (['(reference_energies, test_energies)'], {}), '((reference_energies, test_energies))\n', (1355, 1392), True, 'import numpy as np\n'), ((1531, 1571), 'numpy.vstack', 'np.vstack', (['(reference_waves, test_waves)'], {}), '((reference_waves, test_waves))\n', (1540, 1571), True, 'import numpy as np\n'), ((1722, 1766), 'numpy.hstack', 'np.hstack', (['(reference_targets, test_targets)'], {}), '((reference_targets, test_targets))\n', (1731, 1766), True, 'import numpy as np\n'), ((1233, 1269), 'numpy.where', 'np.where', (['(targets == reference_index)'], {}), '(targets == reference_index)\n', (1241, 1269), True, 'import numpy as np\n'), ((1297, 1328), 'numpy.where', 'np.where', (['(targets == test_index)'], {}), '(targets == test_index)\n', (1305, 1328), True, 'import numpy as np\n'), ((1424, 1460), 'numpy.where', 'np.where', (['(targets == reference_index)'], {}), '(targets == reference_index)\n', (1432, 1460), True, 'import numpy as np\n'), ((1484, 1515), 'numpy.where', 'np.where', (['(targets == test_index)'], {}), '(targets == test_index)\n', (1492, 1515), True, 'import numpy as np\n'), ((1608, 1644), 'numpy.where', 'np.where', (['(targets == reference_index)'], {}), '(targets == reference_index)\n', (1616, 1644), True, 'import numpy as np\n'), ((1673, 1704), 'numpy.where', 'np.where', (['(targets == test_index)'], {}), '(targets == test_index)\n', (1681, 1704), True, 'import numpy as np\n'), ((2418, 2441), 'sklearn.metrics.adjusted_rand_score', 'ari', (['labels', 'target_set'], {}), '(labels, target_set)\n', (2421, 2441), True, 'from sklearn.metrics import adjusted_rand_score as ari\n')] |
"""
This file is based on dominant_invariant_subspace.m from the manopt MATLAB
package.
The optimization is performed on the Grassmann manifold, since only the
space spanned by the columns of X matters. The implementation is short to
show how Manopt can be used to quickly obtain a prototype. To make the
implementation more efficient, one might first try to use the caching
system, that is, use the optional 'store' arguments in the cost, grad and
hess functions. Furthermore, using egrad2rgrad and ehess2rhess is quick
and easy, but not always efficient. Having a look at the formulas
implemented in these functions can help rewrite the code without them,
possibly more efficiently.
See also: dominant_invariant_subspace_complex
Main author: <NAME>, July 5, 2013
Ported to pymanopt by <NAME>, Nov 24, 2015
"""
import theano.tensor as T
import numpy as np
from pymanopt import Problem
from pymanopt.solvers import TrustRegions
from pymanopt.manifolds import Grassmann
def dominant_invariant_subspace(A, p):
"""
Returns an orthonormal basis of the dominant invariant p-subspace of A.
Arguments:
- A
A real, symmetric matrix A of size nxn
- p
integer p < n.
Returns:
A real, orthonormal matrix X of size nxp such that trace(X'*A*X)
is maximized. That is, the columns of X form an orthonormal basis
of a dominant subspace of dimension p of A. These span the same space
as the eigenvectors associated with the largest eigenvalues of A.
Sign is important: 2 is deemed a larger eigenvalue than -5.
"""
# Make sure the input matrix is square and symmetric
n = A.shape[0]
assert type(A) == np.ndarray, 'A must be a numpy array.'
assert np.isreal(A).all(), 'A must be real.'
assert A.shape[1] == n, 'A must be square.'
assert np.linalg.norm(A-A.T) < n * np.spacing(1), 'A must be symmetric.'
assert p <= n, 'p must be smaller than n.'
# Define the cost on the Grassmann manifold
Gr = Grassmann(n, p)
X = T.matrix()
cost = -T.dot(X.T, T.dot(A, X)).trace()
# Setup the problem
problem = Problem(manifold=Gr, cost=cost, arg=X)
# Create a solver object
solver = TrustRegions()
# Solve
Xopt = solver.solve(problem, Delta_bar=8*np.sqrt(p))
return Xopt
if __name__ == '__main__':
"""
This demo script will generate a random 128 x 128 symmetric matrix and find
the dominant invariant 3 dimensional subspace for this matrix, that is, it
will find the subspace spanned by the three eigenvectors with the largest
eigenvalues.
"""
# Generate some random data to test the function
print('Generating random matrix...')
A = np.random.randn(128, 128)
A = 0.5 * (A + A.T)
p = 3
# Test function...
dominant_invariant_subspace(A, p)
| [
"numpy.sqrt",
"pymanopt.solvers.TrustRegions",
"pymanopt.Problem",
"theano.tensor.matrix",
"pymanopt.manifolds.Grassmann",
"numpy.isreal",
"numpy.linalg.norm",
"numpy.random.randn",
"numpy.spacing",
"theano.tensor.dot"
] | [((2022, 2037), 'pymanopt.manifolds.Grassmann', 'Grassmann', (['n', 'p'], {}), '(n, p)\n', (2031, 2037), False, 'from pymanopt.manifolds import Grassmann\n'), ((2046, 2056), 'theano.tensor.matrix', 'T.matrix', ([], {}), '()\n', (2054, 2056), True, 'import theano.tensor as T\n'), ((2140, 2178), 'pymanopt.Problem', 'Problem', ([], {'manifold': 'Gr', 'cost': 'cost', 'arg': 'X'}), '(manifold=Gr, cost=cost, arg=X)\n', (2147, 2178), False, 'from pymanopt import Problem\n'), ((2222, 2236), 'pymanopt.solvers.TrustRegions', 'TrustRegions', ([], {}), '()\n', (2234, 2236), False, 'from pymanopt.solvers import TrustRegions\n'), ((2725, 2750), 'numpy.random.randn', 'np.random.randn', (['(128)', '(128)'], {}), '(128, 128)\n', (2740, 2750), True, 'import numpy as np\n'), ((1851, 1874), 'numpy.linalg.norm', 'np.linalg.norm', (['(A - A.T)'], {}), '(A - A.T)\n', (1865, 1874), True, 'import numpy as np\n'), ((1754, 1766), 'numpy.isreal', 'np.isreal', (['A'], {}), '(A)\n', (1763, 1766), True, 'import numpy as np\n'), ((1879, 1892), 'numpy.spacing', 'np.spacing', (['(1)'], {}), '(1)\n', (1889, 1892), True, 'import numpy as np\n'), ((2295, 2305), 'numpy.sqrt', 'np.sqrt', (['p'], {}), '(p)\n', (2302, 2305), True, 'import numpy as np\n'), ((2080, 2091), 'theano.tensor.dot', 'T.dot', (['A', 'X'], {}), '(A, X)\n', (2085, 2091), True, 'import theano.tensor as T\n')] |
"""
Implementation of pairwise ranking using scikit-learn LinearSVC
Reference: "Large Margin Rank Boundaries for Ordinal Regression", <NAME>,
<NAME>, <NAME>.
"""
import itertools
import numpy as np
def transform_pairwise(X, y):
"""Transforms data into pairs with balanced labels for ranking
Transforms a n-class ranking problem into a two-class classification
problem. Subclasses implementing particular strategies for choosing
pairs should override this method.
In this method, all pairs are choosen, except for those that have the
same target value. The output is an array of balanced classes, i.e.
there are the same number of -1 as +1
Parameters
----------
X : array, shape (n_samples, n_features)
The data
y : array, shape (n_samples,) or (n_samples, 2)
Target labels. If it's a 2D array, the second column represents
the grouping of samples, i.e., samples with different groups will
not be considered.
Returns
-------
X_trans : array, shape (k, n_feaures)
Data as pairs
y_trans : array, shape (k,)
Output class labels, where classes have values {-1, +1}
"""
X_new = []
y_new = []
y = np.asarray(y)
if y.ndim == 1:
y = np.c_[y, np.ones(y.shape[0])]
comb = itertools.combinations(range(X.shape[0]), 2)
for k, (i, j) in enumerate(comb):
if y[i, 0] == y[j, 0] or y[i, 1] != y[j, 1]:
# skip if same target or different group
continue
X_new.append(X[i] - X[j])
y_new.append(np.sign(y[i, 0] - y[j, 0]))
# output balanced classes
if y_new[-1] != (-1) ** k:
y_new[-1] = - y_new[-1]
X_new[-1] = - X_new[-1]
return np.asarray(X_new), np.asarray(y_new).ravel() | [
"numpy.ones",
"numpy.asarray",
"numpy.sign"
] | [((1221, 1234), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1231, 1234), True, 'import numpy as np\n'), ((1753, 1770), 'numpy.asarray', 'np.asarray', (['X_new'], {}), '(X_new)\n', (1763, 1770), True, 'import numpy as np\n'), ((1573, 1599), 'numpy.sign', 'np.sign', (['(y[i, 0] - y[j, 0])'], {}), '(y[i, 0] - y[j, 0])\n', (1580, 1599), True, 'import numpy as np\n'), ((1276, 1295), 'numpy.ones', 'np.ones', (['y.shape[0]'], {}), '(y.shape[0])\n', (1283, 1295), True, 'import numpy as np\n'), ((1772, 1789), 'numpy.asarray', 'np.asarray', (['y_new'], {}), '(y_new)\n', (1782, 1789), True, 'import numpy as np\n')] |
## @ingroup Plots
# Mission_Plots.py
#
# Created: Mar 2020, <NAME>
# Apr 2020, <NAME>
# Sep 2020, <NAME>
# Apr 2021, <NAME>
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from SUAVE.Core import Units
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import plotly.graph_objects as go
import matplotlib.ticker as ticker
# ------------------------------------------------------------------
# Altitude, SFC & Weight
# ------------------------------------------------------------------
## @ingroup Plots
def plot_altitude_sfc_weight(results, line_color = 'bo-', save_figure = False, save_filename = "Altitude_SFC_Weight" , file_type = ".png"):
"""This plots the altitude, speficic fuel comsumption and vehicle weight
Assumptions:
None
Source:
None
Inputs:
results.segments.condtions.
freestream.altitude
weights.total_mass
weights.vehicle_mass_rate
frames.body.thrust_force_vector
Outputs:
Plots
Properties Used:
N/A
"""
axis_font = {'size':'14'}
fig = plt.figure(save_filename)
fig.set_size_inches(10, 8)
for segment in results.segments.values():
time = segment.conditions.frames.inertial.time[:,0] / Units.min
mass = segment.conditions.weights.total_mass[:,0] / Units.lb
altitude = segment.conditions.freestream.altitude[:,0] / Units.ft
mdot = segment.conditions.weights.vehicle_mass_rate[:,0]
thrust = segment.conditions.frames.body.thrust_force_vector[:,0]
sfc = (mdot / Units.lb) / (thrust /Units.lbf) * Units.hr
axes = plt.subplot(3,1,1)
axes.plot( time , altitude , line_color)
axes.set_ylabel('Altitude (ft)',axis_font)
set_axes(axes)
axes = plt.subplot(3,1,3)
axes.plot( time , sfc , line_color )
axes.set_xlabel('Time (min)',axis_font)
axes.set_ylabel('sfc (lb/lbf-hr)',axis_font)
set_axes(axes)
axes = plt.subplot(3,1,2)
axes.plot( time , mass , 'ro-' )
axes.set_ylabel('Weight (lb)',axis_font)
set_axes(axes)
plt.tight_layout()
if save_figure:
plt.savefig(save_filename + file_type)
return
# ------------------------------------------------------------------
# Aircraft Velocities
# ------------------------------------------------------------------
## @ingroup Plots
def plot_aircraft_velocities(results, line_color = 'bo-', save_figure = False, save_filename = "Aircraft_Velocities", file_type = ".png"):
"""This plots aircraft velocity, mach , true air speed
Assumptions:
None
Source:
None
Inputs:
results.segments.condtions.freestream.
velocity
density
mach_number
Outputs:
Plots
Properties Used:
N/A
"""
axis_font = {'size':'14'}
fig = plt.figure(save_filename)
fig.set_size_inches(10, 8)
for segment in results.segments.values():
time = segment.conditions.frames.inertial.time[:,0] / Units.min
velocity = segment.conditions.freestream.velocity[:,0]
density = segment.conditions.freestream.density[:,0]
EAS = velocity * np.sqrt(density/1.225)
mach = segment.conditions.freestream.mach_number[:,0]
axes = plt.subplot(3,1,1)
axes.plot( time , velocity / Units.kts, line_color)
axes.set_ylabel('velocity (kts)',axis_font)
set_axes(axes)
axes = plt.subplot(3,1,2)
axes.plot( time , EAS / Units.kts, line_color)
axes.set_ylabel('Equivalent Airspeed',axis_font)
set_axes(axes)
axes = plt.subplot(3,1,3)
axes.plot( time , mach , line_color)
axes.set_xlabel('Time (min)',axis_font)
axes.set_ylabel('Mach',axis_font)
set_axes(axes)
plt.tight_layout()
if save_figure:
plt.savefig(save_filename + file_type)
return
# ------------------------------------------------------------------
# Disc and Power Loadings
# ------------------------------------------------------------------
## @ingroup Plots
def plot_disc_power_loading(results, line_color = 'bo-', save_figure = False, save_filename = "Disc_Power_Loading", file_type = ".png"):
"""This plots the propeller disc and power loadings
Assumptions:
None
Source:
None
Inputs:
results.segments.condtions.propulsion.
disc_loadings
power_loading
Outputs:
Plots
Properties Used:
N/A
"""
axis_font = {'size':'14'}
fig = plt.figure(save_filename)
fig.set_size_inches(12, 10)
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
DL = results.segments[i].conditions.propulsion.disc_loading
PL = results.segments[i].conditions.propulsion.power_loading
axes = plt.subplot(2,1,1)
axes.plot(time, DL, line_color)
axes.set_ylabel('lift disc power N/m^2',axis_font)
set_axes(axes)
axes = plt.subplot(2,1,2)
axes.plot(time, PL, line_color )
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel('lift power loading (N/W)',axis_font)
set_axes(axes)
plt.tight_layout()
if save_figure:
plt.savefig(save_filename + file_type)
return
# ------------------------------------------------------------------
# Aerodynamic Coefficients
# ------------------------------------------------------------------
## @ingroup Plots
def plot_aerodynamic_coefficients(results, line_color = 'bo-', save_figure = False, save_filename = "Aerodynamic_Coefficients", file_type = ".png"):
"""This plots the aerodynamic coefficients
Assumptions:
None
Source:
None
Inputs:
results.segments.condtions.aerodynamics.
lift_coefficient
drag_coefficient
angle_of_attack
Outputs:
Plots
Properties Used:
N/A
"""
axis_font = {'size':'14'}
fig = plt.figure(save_filename)
fig.set_size_inches(12, 10)
for segment in results.segments.values():
time = segment.conditions.frames.inertial.time[:,0] / Units.min
cl = segment.conditions.aerodynamics.lift_coefficient[:,0,None]
cd = segment.conditions.aerodynamics.drag_coefficient[:,0,None]
aoa = segment.conditions.aerodynamics.angle_of_attack[:,0] / Units.deg
l_d = cl/cd
axes = plt.subplot(2,2,1)
axes.plot( time , aoa , line_color )
axes.set_ylabel('Angle of Attack (deg)',axis_font)
set_axes(axes)
axes = plt.subplot(2,2,2)
axes.plot( time , cl, line_color )
axes.set_ylabel('CL',axis_font)
set_axes(axes)
axes = plt.subplot(2,2,3)
axes.plot( time , cd, line_color )
axes.set_xlabel('Time (min)',axis_font)
axes.set_ylabel('CD',axis_font)
set_axes(axes)
axes = plt.subplot(2,2,4)
axes.plot( time , l_d, line_color )
axes.set_xlabel('Time (min)',axis_font)
axes.set_ylabel('L/D',axis_font)
set_axes(axes)
plt.tight_layout()
if save_figure:
plt.savefig(save_filename + file_type)
return
# ------------------------------------------------------------------
# Aerodynamic Forces
# ------------------------------------------------------------------
## @ingroup Plots
def plot_aerodynamic_forces(results, line_color = 'bo-', save_figure = False, save_filename = "Aerodynamic_Forces", file_type = ".png"):
"""This plots the aerodynamic forces
Assumptions:
None
Source:
None
Inputs:
results.segments.condtions.frames
body.thrust_force_vector
wind.lift_force_vector
wind.drag_force_vector
Outputs:
Plots
Properties Used:
N/A
"""
axis_font = {'size':'14'}
fig = plt.figure(save_filename)
fig.set_size_inches(12, 10)
for segment in results.segments.values():
time = segment.conditions.frames.inertial.time[:,0] / Units.min
Thrust = segment.conditions.frames.body.thrust_force_vector[:,0]
Lift = -segment.conditions.frames.wind.lift_force_vector[:,2]
Drag = -segment.conditions.frames.wind.drag_force_vector[:,0]
eta = segment.conditions.propulsion.throttle[:,0]
axes = plt.subplot(2,2,1)
axes.plot( time , eta , line_color )
axes.set_ylabel('Throttle',axis_font)
set_axes(axes)
axes = plt.subplot(2,2,2)
axes.plot( time , Lift , line_color)
axes.set_ylabel('Lift (N)',axis_font)
set_axes(axes)
axes = plt.subplot(2,2,3)
axes.plot( time , Thrust , line_color)
axes.set_ylabel('Thrust (N)',axis_font)
axes.set_xlabel('Time (min)',axis_font)
set_axes(axes)
axes = plt.subplot(2,2,4)
axes.plot( time , Drag , line_color)
axes.set_ylabel('Drag (N)',axis_font)
axes.set_xlabel('Time (min)',axis_font)
set_axes(axes)
plt.tight_layout()
if save_figure:
plt.savefig(save_filename + file_type)
return
# ------------------------------------------------------------------
# Drag Components
# ------------------------------------------------------------------
## @ingroup Plots
def plot_drag_components(results, line_color = 'bo-', save_figure = False, save_filename = "Drag_Components", file_type = ".png"):
"""This plots the drag components of the aircraft
Assumptions:
None
Source:
None
Inputs:
results.segments.condtions.aerodynamics.drag_breakdown
parasite.total
induced.total
compressible.total
miscellaneous.total
Outputs:
Plots
Properties Used:
N/A
"""
axis_font = {'size':'14'}
fig = plt.figure(save_filename)
fig.set_size_inches(12, 10)
axes = plt.subplot(1,1,1)
for i, segment in enumerate(results.segments.values()):
time = segment.conditions.frames.inertial.time[:,0] / Units.min
drag_breakdown = segment.conditions.aerodynamics.drag_breakdown
cdp = drag_breakdown.parasite.total[:,0]
cdi = drag_breakdown.induced.total[:,0]
cdc = drag_breakdown.compressible.total[:,0]
cdm = drag_breakdown.miscellaneous.total[:,0]
cd = drag_breakdown.total[:,0]
if i == 0:
axes.plot( time , cdp , 'ko-', label='CD parasite' )
axes.plot( time , cdi , 'bo-', label='CD induced' )
axes.plot( time , cdc , 'go-', label='CD compressibility' )
axes.plot( time , cdm , 'yo-', label='CD miscellaneous' )
axes.plot( time , cd , 'ro-', label='CD total' )
axes.legend(loc='upper center')
else:
axes.plot( time , cdp , 'ko-' )
axes.plot( time , cdi , 'bo-')
axes.plot( time , cdc , 'go-')
axes.plot( time , cdm , 'yo-')
axes.plot( time , cd , 'ro-')
axes.set_xlabel('Time (min)',axis_font)
axes.set_ylabel('CD',axis_font)
axes.grid(True)
plt.tight_layout()
if save_figure:
plt.savefig(save_filename + file_type)
return
# ------------------------------------------------------------------
# Electronic Conditions
# ------------------------------------------------------------------
## @ingroup Plots
def plot_battery_pack_conditions(results, line_color = 'bo-', line_color2 = 'rs--', save_figure = False, save_filename = "Battery_Pack_Conditions", file_type = ".png"):
"""This plots the battery pack conditions of the network
Assumptions:
None
Source:
None
Inputs:
results.segments.conditions.propulsion
battery_power_draw
battery_energy
battery_voltage_under_load
battery_voltage_open_circuit
current
Outputs:
Plots
Properties Used:
N/A
"""
axis_font = {'size':'14'}
fig = plt.figure(save_filename)
fig.set_size_inches(12, 10)
fig.suptitle('Battery Pack Conditions')
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
pack_power = results.segments[i].conditions.propulsion.battery_power_draw[:,0]
pack_energy = results.segments[i].conditions.propulsion.battery_energy[:,0]
pack_volts = results.segments[i].conditions.propulsion.battery_voltage_under_load[:,0]
pack_volts_oc = results.segments[i].conditions.propulsion.battery_voltage_open_circuit[:,0]
pack_current = results.segments[i].conditions.propulsion.battery_current[:,0]
pack_SOC = results.segments[i].conditions.propulsion.battery_state_of_charge[:,0]
pack_current = results.segments[i].conditions.propulsion.battery_current[:,0]
pack_battery_amp_hr = (pack_energy/ Units.Wh )/pack_volts
pack_C_instant = pack_current/pack_battery_amp_hr
pack_C_nominal = pack_current/np.max(pack_battery_amp_hr)
axes = plt.subplot(3,3,1)
axes.plot(time, pack_SOC , line_color)
axes.set_ylabel('SOC',axis_font)
set_axes(axes)
axes = plt.subplot(3,3,2)
axes.plot(time, (pack_energy/Units.Wh)/1000, line_color)
axes.set_ylabel('Energy (kW-hr)',axis_font)
set_axes(axes)
axes = plt.subplot(3,3,3)
axes.plot(time, -pack_power/1000, line_color)
axes.set_ylabel('Power (kW)',axis_font)
set_axes(axes)
axes = plt.subplot(3,3,4)
axes.set_ylabel('Voltage (V)',axis_font)
set_axes(axes)
if i == 0:
axes.plot(time, pack_volts, line_color,label='Under Load')
axes.plot(time,pack_volts_oc, line_color2,label='Open Circuit')
else:
axes.plot(time, pack_volts, line_color)
axes.plot(time,pack_volts_oc,line_color2)
axes.legend(loc='upper right')
axes = plt.subplot(3,3,5)
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel('C-Rate (C)',axis_font)
set_axes(axes)
if i == 0:
axes.plot(time, pack_C_instant, line_color,label='Instantaneous')
axes.plot(time, pack_C_nominal, line_color2,label='Nominal')
else:
axes.plot(time, pack_C_instant, line_color)
axes.plot(time, pack_C_nominal, line_color2)
axes.legend(loc='upper right')
axes = plt.subplot(3,3,6)
axes.plot(time, pack_current, line_color)
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel('Current (A)',axis_font)
set_axes(axes)
# Set limits
for i in range(1,7):
ax = plt.subplot(3,3,i)
y_lo, y_hi = ax.get_ylim()
if y_lo>0: y_lo = 0
y_hi = y_hi*1.1
ax.set_ylim(y_lo,y_hi)
plt.tight_layout()
if save_figure:
fig.savefig(save_filename + file_type)
return
# ------------------------------------------------------------------
# Electronic Conditions
# ------------------------------------------------------------------
## @ingroup Plots
def plot_battery_cell_conditions(results, line_color = 'bo-',line_color2 = 'rs--', save_figure = False, save_filename = "Battery_Cell_Conditions", file_type = ".png"):
"""This plots the battery pack conditions of the network
Assumptions:
None
Source:
None
Inputs:
results.segments.conditions.propulsion
battery_power_draw
battery_energy
voltage_under_load
voltage_open_circuit
current
Outputs:
Plots
Properties Used:
N/A
"""
axis_font = {'size':'14'}
fig = plt.figure(save_filename)
fig.set_size_inches(12, 10)
fig.suptitle('Battery Cell Conditions')
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
cell_power = results.segments[i].conditions.propulsion.battery_cell_power_draw[:,0]
cell_energy = results.segments[i].conditions.propulsion.battery_cell_energy[:,0]
cell_volts = results.segments[i].conditions.propulsion.battery_cell_voltage_under_load[:,0]
cell_volts_oc = results.segments[i].conditions.propulsion.battery_cell_voltage_open_circuit[:,0]
cell_current = results.segments[i].conditions.propulsion.battery_cell_current[:,0]
cell_SOC = results.segments[i].conditions.propulsion.battery_state_of_charge[:,0]
cell_temp = results.segments[i].conditions.propulsion.battery_cell_temperature[:,0]
cell_charge = results.segments[i].conditions.propulsion.battery_cell_charge_throughput[:,0]
cell_current = results.segments[i].conditions.propulsion.battery_cell_current[:,0]
cell_battery_amp_hr = (cell_energy/ Units.Wh )/cell_volts
cell_battery_amp_hr = (cell_energy/ Units.Wh )/cell_volts
cell_C_instant = cell_current/cell_battery_amp_hr
cell_C_nominal = cell_current/np.max(cell_battery_amp_hr)
axes = plt.subplot(3,3,1)
axes.plot(time, cell_SOC, line_color)
axes.set_ylabel('SOC',axis_font)
set_axes(axes)
axes = plt.subplot(3,3,2)
axes.plot(time, (cell_energy/Units.Wh), line_color)
axes.set_ylabel('Energy (W-hr)',axis_font)
set_axes(axes)
axes = plt.subplot(3,3,3)
axes.plot(time, -cell_power, line_color)
axes.set_ylabel('Power (W)',axis_font)
set_axes(axes)
axes = plt.subplot(3,3,4)
axes.set_ylabel('Voltage (V)',axis_font)
set_axes(axes)
if i == 0:
axes.plot(time, cell_volts, line_color,label='Under Load')
axes.plot(time,cell_volts_oc, line_color2,label='Open Circuit')
axes.legend(loc='upper right')
else:
axes.plot(time, cell_volts, line_color)
axes.plot(time,cell_volts_oc, line_color2)
axes = plt.subplot(3,3,5)
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel('C-Rate (C)',axis_font)
set_axes(axes)
if i == 0:
axes.plot(time, cell_C_instant, line_color,label='Instantaneous')
axes.plot(time, cell_C_nominal, line_color2,label='Nominal')
axes.legend(loc='upper right')
else:
axes.plot(time, cell_C_instant, line_color)
axes.plot(time, cell_C_nominal, line_color2)
axes = plt.subplot(3,3,6)
axes.plot(time, cell_charge, line_color)
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel('Current (A)',axis_font)
set_axes(axes)
axes = plt.subplot(3,3,7)
axes.plot(time, cell_charge, line_color)
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel('Charge Throughput (Ah)',axis_font)
set_axes(axes)
axes = plt.subplot(3,3,8)
axes.plot(time, cell_temp, line_color)
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel('Temperature (K)',axis_font)
set_axes(axes)
# Set limits
for i in range(1,9):
ax = plt.subplot(3,3,i)
y_lo, y_hi = ax.get_ylim()
if y_lo>0: y_lo = 0
y_hi = y_hi*1.1
ax.set_ylim(y_lo,y_hi)
plt.tight_layout()
if save_figure:
fig.savefig(save_filename + file_type)
return
# ------------------------------------------------------------------
# Battery Degradation
# ------------------------------------------------------------------
## @ingroup Plots
def plot_battery_degradation(results, line_color = 'bo-',line_color2 = 'rs--', save_figure = False, save_filename = "Battery_Cell_Conditions", file_type = ".png"):
"""This plots the battery cell degradation
Assumptions:
None
Source:
None
Inputs:
results.segments.conditions.propulsion
battery_cycle_day [unitless]
battery_capacity_fade_factor [-]
battery_resistance_growth_factor [-]
battery_cell_charge_throughput [Ah]
Outputs:
Plots
Properties Used:
N/A
"""
axis_font = {'size':'14'}
fig = plt.figure(save_filename)
fig.set_size_inches(12, 10)
fig.suptitle('Battery Cell Degradation')
num_segs = len(results.segments)
time_hrs = np.zeros(num_segs)
capacity_fade = np.zeros_like(time_hrs)
resistance_growth = np.zeros_like(time_hrs)
cycle_day = np.zeros_like(time_hrs)
charge_throughput = np.zeros_like(time_hrs)
for i in range(num_segs):
time_hrs[i] = results.segments[i].conditions.frames.inertial.time[-1,0] / Units.hour
cycle_day[i] = results.segments[i].conditions.propulsion.battery_cycle_day
capacity_fade[i] = results.segments[i].conditions.propulsion.battery_capacity_fade_factor
resistance_growth[i] = results.segments[i].conditions.propulsion.battery_resistance_growth_factor
charge_throughput[i] = results.segments[i].conditions.propulsion.battery_cell_charge_throughput[-1,0]
axes = plt.subplot(2,2,1)
axes.plot(charge_throughput, capacity_fade, line_color)
axes.plot(charge_throughput, resistance_growth, line_color2)
axes.set_ylabel('% Capacity Fade/Resistance Growth',axis_font)
axes.set_xlabel('Time (hrs)',axis_font)
set_axes(axes)
axes = plt.subplot(2,2,2)
axes.plot(time_hrs, capacity_fade, line_color)
axes.plot(time_hrs, resistance_growth, line_color2)
axes.set_ylabel('% Capacity Fade/Resistance Growth',axis_font)
axes.set_xlabel('Time (hrs)',axis_font)
set_axes(axes)
axes = plt.subplot(2,2,3)
axes.plot(cycle_day, capacity_fade, line_color)
axes.plot(cycle_day, resistance_growth, line_color2)
axes.set_ylabel('% Capacity Fade/Resistance Growth',axis_font)
axes.set_xlabel('Time (days)',axis_font)
set_axes(axes)
plt.tight_layout()
if save_figure:
fig.savefig(save_filename + file_type)
return
# ------------------------------------------------------------------
# Flight Conditions
# ------------------------------------------------------------------
## @ingroup Plots
def plot_flight_conditions(results, line_color = 'bo-', save_figure = False, save_filename = "Flight_Conditions", file_type = ".png"):
"""This plots the flights the conditions
Assumptions:
None
Source:
None
Inputs:
results.segments.conditions.
frames
body.inertial_rotations
inertial.position_vector
freestream.velocity
aerodynamics.
lift_coefficient
drag_coefficient
angle_of_attack
Outputs:
Plots
Properties Used:
N/A
"""
axis_font = {'size':'14'}
fig = plt.figure(save_filename)
fig.set_size_inches(12, 10)
for segment in results.segments.values():
time = segment.conditions.frames.inertial.time[:,0] / Units.min
airspeed = segment.conditions.freestream.velocity[:,0] / Units['mph']
theta = segment.conditions.frames.body.inertial_rotations[:,1,None] / Units.deg
x = segment.conditions.frames.inertial.position_vector[:,0]/ Units.nmi
y = segment.conditions.frames.inertial.position_vector[:,1]
z = segment.conditions.frames.inertial.position_vector[:,2]
altitude = segment.conditions.freestream.altitude[:,0]/Units.feet
axes = plt.subplot(2,2,1)
axes.plot(time, altitude, line_color)
axes.set_ylabel('Altitude (ft)',axis_font)
set_axes(axes)
axes = plt.subplot(2,2,2)
axes.plot( time , airspeed , line_color )
axes.set_ylabel('Airspeed (mph)',axis_font)
set_axes(axes)
axes = plt.subplot(2,2,3)
axes.plot( time , theta, line_color )
axes.set_ylabel('Pitch Angle (deg)',axis_font)
axes.set_xlabel('Time (min)',axis_font)
set_axes(axes)
axes = plt.subplot(2,2,4)
axes.plot( time , x, 'bo-')
axes.set_ylabel('Range (nmi)',axis_font)
axes.set_xlabel('Time (min)',axis_font)
set_axes(axes)
plt.tight_layout()
if save_figure:
plt.savefig(save_filename + file_type)
return
# ------------------------------------------------------------------
# Propulsion Conditions
# ------------------------------------------------------------------
## @ingroup Plots
def plot_propeller_conditions(results, line_color = 'bo-', save_figure = False, save_filename = "Propeller", file_type = ".png"):
"""This plots the propeller performance
Assumptions:
None
Source:
None
Inputs:
results.segments.conditions.
frames.inertial.time
propulsion.rpm
frames.body.thrust_force_vector
propulsion.propeller_motor_torque
propulsion.propeller_tip_mach
Outputs:
Plots
Properties Used:
N/A
"""
axis_font = {'size':'14'}
fig = plt.figure(save_filename)
fig.set_size_inches(12, 10)
for segment in results.segments.values():
time = segment.conditions.frames.inertial.time[:,0] / Units.min
rpm = segment.conditions.propulsion.propeller_rpm[:,0]
thrust = np.linalg.norm(segment.conditions.frames.body.thrust_force_vector[:,:],axis=1)
torque = segment.conditions.propulsion.propeller_motor_torque[:,0]
tm = segment.conditions.propulsion.propeller_tip_mach[:,0]
Cp = segment.conditions.propulsion.propeller_power_coefficient[:,0]
eta = segment.conditions.propulsion.throttle[:,0]
axes = plt.subplot(2,3,1)
axes.plot(time, thrust, line_color)
axes.set_ylabel('Thrust (N)',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,2)
axes.plot(time, rpm, line_color)
axes.set_ylabel('RPM',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,3)
axes.plot(time, torque, line_color )
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel('Torque (N-m)',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,4)
axes.plot( time , eta , line_color )
axes.set_ylabel('Throttle',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,5)
axes.plot(time, Cp, line_color )
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel('Power Coefficient',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,6)
axes.plot(time, tm, line_color )
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel('Tip Mach',axis_font)
set_axes(axes)
# Set limits
for i in range(1,7):
ax = plt.subplot(2,3,i)
y_lo, y_hi = ax.get_ylim()
if y_lo>0: y_lo = 0
y_hi = y_hi*1.1
ax.set_ylim(y_lo,y_hi)
plt.tight_layout()
if save_figure:
plt.savefig(save_filename + file_type)
return
# ------------------------------------------------------------------
# Electric Propulsion efficiencies
# ------------------------------------------------------------------
## @ingroup Plots
def plot_eMotor_Prop_efficiencies(results, line_color = 'bo-', save_figure = False, save_filename = "eMotor_Prop_Propulsor", file_type = ".png"):
"""This plots the electric driven network propeller efficiencies
Assumptions:
None
Source:
None
Inputs:
results.segments.conditions.propulsion.
etap
etam
Outputs:
Plots
Properties Used:
N/A
"""
axis_font = {'size':'14'}
fig = plt.figure(save_filename)
fig.set_size_inches(12, 10)
for segment in results.segments.values():
time = segment.conditions.frames.inertial.time[:,0] / Units.min
effp = segment.conditions.propulsion.propeller_efficiency[:,0]
effm = segment.conditions.propulsion.propeller_motor_efficiency[:,0]
axes = plt.subplot(1,2,1)
axes.plot(time, effp, line_color )
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel(r'Propeller Efficiency ($\eta_p$)',axis_font)
set_axes(axes)
plt.ylim((0,1))
axes = plt.subplot(1,2,2)
axes.plot(time, effm, line_color )
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel(r'Motor Efficiency ($\eta_m$)',axis_font)
set_axes(axes)
plt.ylim((0,1))
plt.tight_layout()
if save_figure:
plt.savefig(save_filename + file_type)
return
# ------------------------------------------------------------------
# Stability Coefficients
# ------------------------------------------------------------------
## @ingroup Plots
def plot_stability_coefficients(results, line_color = 'bo-', save_figure = False, save_filename = "Stability_Coefficients", file_type = ".png"):
"""This plots the static stability characteristics of an aircraft
Assumptions:
None
Source:
None
Inputs:
results.segments.conditions.stability.
static
CM
Cm_alpha
static_margin
aerodynamics.
angle_of_attack
Outputs:
Plots
Properties Used:
N/A
"""
axis_font = {'size':'14'}
fig = plt.figure(save_filename)
fig.set_size_inches(12, 10)
for segment in results.segments.values():
time = segment.conditions.frames.inertial.time[:,0] / Units.min
cm = segment.conditions.stability.static.CM[:,0]
cm_alpha = segment.conditions.stability.static.Cm_alpha[:,0]
SM = segment.conditions.stability.static.static_margin[:,0]
aoa = segment.conditions.aerodynamics.angle_of_attack[:,0] / Units.deg
axes = plt.subplot(2,2,1)
axes.plot( time , aoa, line_color )
axes.set_ylabel(r'$AoA$',axis_font)
set_axes(axes)
axes = plt.subplot(2,2,2)
axes.plot( time , cm, line_color )
axes.set_ylabel(r'$C_M$',axis_font)
set_axes(axes)
axes = plt.subplot(2,2,3)
axes.plot( time , cm_alpha, line_color )
axes.set_xlabel('Time (min)',axis_font)
axes.set_ylabel(r'$C_M\alpha$',axis_font)
set_axes(axes)
axes = plt.subplot(2,2,4)
axes.plot( time , SM, line_color )
axes.set_xlabel('Time (min)',axis_font)
axes.set_ylabel('Static Margin (%)',axis_font)
set_axes(axes)
plt.tight_layout()
if save_figure:
plt.savefig(save_filename + file_type)
return
# ------------------------------------------------------------------
# Solar Flux
# ------------------------------------------------------------------
## @ingroup Plots
def plot_solar_flux(results, line_color = 'bo-', save_figure = False, save_filename = "Solar_Flux", file_type = ".png"):
"""This plots the solar flux and power train performance of an solar powered aircraft
Assumptions:
None
Source:
None
Inputs:
results.segments.conditions.propulsion
solar_flux
battery_power_draw
battery_energy
Outputs:
Plots
Properties Used:
N/A
"""
axis_font = {'size':'14'}
fig = plt.figure(save_filename)
fig.set_size_inches(8, 6)
for segment in results.segments.values():
time = segment.conditions.frames.inertial.time[:,0] / Units.min
flux = segment.conditions.propulsion.solar_flux[:,0]
charge = segment.conditions.propulsion.battery_power_draw[:,0]
energy = segment.conditions.propulsion.battery_energy[:,0] / Units.MJ
axes = plt.subplot(3,1,1)
axes.plot( time , flux , line_color )
axes.set_ylabel('Solar Flux (W/m$^2$)',axis_font)
set_axes(axes)
axes = plt.subplot(3,1,2)
axes.plot( time , charge , line_color )
axes.set_ylabel('Charging Power (W)',axis_font)
set_axes(axes)
axes = plt.subplot(3,1,3)
axes.plot( time , energy , line_color )
axes.set_xlabel('Time (min)',axis_font)
axes.set_ylabel('Battery Energy (MJ)',axis_font)
set_axes(axes)
plt.tight_layout()
if save_figure:
plt.savefig(save_filename + file_type)
return
# ------------------------------------------------------------------
# Lift-Cruise Network
# ------------------------------------------------------------------
## @ingroup Plots
def plot_lift_cruise_network(results, line_color = 'bo-',line_color2 = 'r^-', save_figure = False, save_filename = "Lift_Cruise_Network", file_type = ".png"):
"""This plots the electronic and propulsor performance of a vehicle with a lift cruise network
Assumptions:
None
Source:
None
Inputs:
results.segments.conditions.propulsion
throttle
lift_rotor_throttle
battery_energy
battery_specfic_power
voltage_under_load
voltage_open_circuit
Outputs:
Plots
Properties Used:
N/A
"""
axis_font = {'size':'14'}
# ------------------------------------------------------------------
# Electronic Conditions
# ------------------------------------------------------------------
fig = plt.figure("Lift_Cruise_Battery_Pack_Conditions")
fig.set_size_inches(16, 8)
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
eta = results.segments[i].conditions.propulsion.throttle[:,0]
eta_l = results.segments[i].conditions.propulsion.throttle_lift[:,0]
energy = results.segments[i].conditions.propulsion.battery_energy[:,0]/ Units.Wh
specific_power = results.segments[i].conditions.propulsion.battery_specfic_power[:,0]
volts = results.segments[i].conditions.propulsion.battery_voltage_under_load[:,0]
volts_oc = results.segments[i].conditions.propulsion.battery_voltage_open_circuit[:,0]
plt.title('Battery Pack Conditions')
axes = plt.subplot(2,2,1)
axes.set_ylabel('Throttle',axis_font)
set_axes(axes)
plt.ylim((0,1))
if i == 0:
axes.plot(time, eta, line_color,label='Propeller Motor')
axes.plot(time, eta_l, line_color2,label='Lift Rotor Motor')
axes.legend(loc='upper center')
else:
axes.plot(time, eta, line_color)
axes.plot(time, eta_l, line_color2)
axes = plt.subplot(2,2,2)
axes.plot(time, energy, line_color)
axes.set_ylabel('Battery Energy (W-hr)',axis_font)
set_axes(axes)
axes = plt.subplot(2,2,3)
axes.set_ylabel('Battery Voltage (Volts)',axis_font)
axes.set_xlabel('Time (mins)',axis_font)
set_axes(axes)
if i == 0:
axes.plot(time, volts, line_color,label='Under Load')
axes.plot(time,volts_oc, line_color2,label='Open Circuit')
axes.legend(loc='upper center')
else:
axes.plot(time, volts, line_color)
axes.plot(time,volts_oc,line_color2)
axes = plt.subplot(2,2,4)
axes.plot(time, specific_power, line_color)
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel('Specific Power',axis_font)
set_axes(axes)
plt.tight_layout()
if save_figure:
plt.savefig("Lift_Cruise_Battery_Pack_Conditions" + file_type)
# ------------------------------------------------------------------
# Propulsion Conditions
# ------------------------------------------------------------------
fig = plt.figure("Prop-Rotor Network")
fig.set_size_inches(16, 8)
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
prop_rpm = results.segments[i].conditions.propulsion.propeller_rpm[:,0]
prop_thrust = results.segments[i].conditions.frames.body.thrust_force_vector[:,0]
prop_torque = results.segments[i].conditions.propulsion.propeller_motor_torque[:,0]
prop_effp = results.segments[i].conditions.propulsion.propeller_efficiency[:,0]
prop_effm = results.segments[i].conditions.propulsion.propeller_motor_efficiency[:,0]
prop_Cp = results.segments[i].conditions.propulsion.propeller_power_coefficient[:,0]
lift_rotor_rpm = results.segments[i].conditions.propulsion.lift_rotor_rpm[:,0]
lift_rotor_thrust = -results.segments[i].conditions.frames.body.thrust_force_vector[:,2]
lift_rotor_torque = results.segments[i].conditions.propulsion.lift_rotor_motor_torque[:,0]
lift_rotor_effp = results.segments[i].conditions.propulsion.lift_rotor_efficiency[:,0]
lift_rotor_effm = results.segments[i].conditions.propulsion.lift_rotor_motor_efficiency[:,0]
lift_rotor_Cp = results.segments[i].conditions.propulsion.lift_rotor_power_coefficient[:,0]
# title
plt.title("Prop-Rotor Network")
# plots
axes = plt.subplot(2,3,1)
axes.plot(time, prop_rpm, line_color)
axes.plot(time, lift_rotor_rpm, line_color2)
axes.set_ylabel('RPM',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,2)
axes.plot(time, prop_thrust,line_color)
axes.plot(time, lift_rotor_thrust, line_color2)
axes.set_ylabel('Thrust (N)',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,3)
axes.plot(time, prop_torque, line_color)
axes.plot(time, lift_rotor_torque, line_color2)
axes.set_ylabel('Torque (N-m)',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,4)
axes.plot(time, prop_effp, line_color )
axes.plot(time, lift_rotor_effp, line_color2)
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel(r'Propeller Efficiency, $\eta_{propeller}$',axis_font)
set_axes(axes)
plt.ylim((0,1))
axes = plt.subplot(2,3,5)
axes.plot(time, prop_effm, line_color )
axes.plot(time, lift_rotor_effm,line_color2)
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel(r'Motor Efficiency, $\eta_{motor}$',axis_font)
set_axes(axes)
plt.ylim((0,1))
axes = plt.subplot(2,3,6)
axes.plot(time, prop_Cp,line_color )
axes.plot(time, lift_rotor_Cp, line_color2 )
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel('Power Coefficient, $C_{P}$',axis_font)
set_axes(axes)
plt.tight_layout()
if save_figure:
plt.savefig("Propulsor_Network" + file_type)
# ------------------------------------------------------------------
# Propulsion Conditions
# ------------------------------------------------------------------
fig = plt.figure("Lift_Rotor")
fig.set_size_inches(16, 8)
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
rpm = results.segments[i].conditions.propulsion.lift_rotor_rpm [:,0]
thrust = results.segments[i].conditions.frames.body.thrust_force_vector[:,2]
torque = results.segments[i].conditions.propulsion.lift_rotor_motor_torque
effp = results.segments[i].conditions.propulsion.lift_rotor_efficiency[:,0]
effm = results.segments[i].conditions.propulsion.lift_rotor_motor_efficiency[:,0]
Cp = results.segments[i].conditions.propulsion.lift_rotor_power_coefficient[:,0]
# title
plt.title("Lift Rotor")
# plots
axes = plt.subplot(2,3,1)
axes.plot(time, rpm, line_color2)
axes.set_ylabel('RPM',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,2)
axes.plot(time, -thrust, line_color2)
axes.set_ylabel('Thrust (N)',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,3)
axes.plot(time, torque, line_color2 )
axes.set_ylabel('Torque (N-m)',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,4)
axes.plot(time, effp, line_color2,label= r'$\eta_{lift rotor}$' )
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel(r'Propeller Efficiency, $\eta_{lift rotor}$',axis_font)
set_axes(axes)
plt.ylim((0,1))
axes = plt.subplot(2,3,5)
axes.plot(time, effm, line_color2 )
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel(r'Motor Efficiency, $\eta_{mot}$',axis_font)
set_axes(axes)
plt.ylim((0,1))
axes = plt.subplot(2,3,6)
axes.plot(time, Cp , line_color2 )
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel('Power Coefficient, $C_{P}$',axis_font)
set_axes(axes)
plt.tight_layout()
if save_figure:
plt.savefig("Lift_Rotor" + file_type)
# ------------------------------------------------------------------
# Propulsion Conditions
# ------------------------------------------------------------------
fig = plt.figure("Propeller")
fig.set_size_inches(16, 8)
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
rpm = results.segments[i].conditions.propulsion.propeller_rpm [:,0]
thrust = results.segments[i].conditions.frames.body.thrust_force_vector[:,0]
torque = results.segments[i].conditions.propulsion.propeller_motor_torque[:,0]
effp = results.segments[i].conditions.propulsion.propeller_efficiency[:,0]
effm = results.segments[i].conditions.propulsion.propeller_motor_efficiency[:,0]
Cp = results.segments[i].conditions.propulsion.propeller_power_coefficient[:,0]
# title
plt.title("Propeller")
# plots
axes = plt.subplot(2,3,1)
axes.plot(time, rpm,line_color)
axes.set_ylabel('RPM')
set_axes(axes)
axes = plt.subplot(2,3,2)
axes.plot(time, thrust,line_color)
axes.set_ylabel('Thrust (N)',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,3)
axes.plot(time, torque, line_color)
axes.set_ylabel('Torque (N-m)',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,4)
axes.plot(time, effp,line_color)
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel(r'Propeller Efficiency $\eta_{propeller}$',axis_font)
set_axes(axes)
plt.ylim((0,1))
axes = plt.subplot(2,3,5)
axes.plot(time, effm,line_color )
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel(r'Motor Efficiency $\eta_{motor}$',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,6)
axes.plot(time, Cp, line_color )
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel('Power Coefficient',axis_font)
set_axes(axes)
plt.tight_layout()
if save_figure:
plt.savefig("Cruise_Propulsor" + file_type)
# ------------------------------------------------------------------
# Propulsion Conditions
# ------------------------------------------------------------------
fig = plt.figure("Tip_Mach")
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
rtm = results.segments[i].conditions.propulsion.lift_rotor_tip_mach[:,0]
ptm = results.segments[i].conditions.propulsion.propeller_tip_mach[:,0]
# title
plt.title("Tip Mach Number")
# plots
axes = plt.subplot(1,1,1)
axes.set_ylabel('Mach',axis_font)
set_axes(axes)
if i == 0:
axes.plot(time, ptm, line_color,label='Propeller')
axes.plot(time, rtm, line_color2,label='Lift Rotor')
axes.legend(loc='upper center')
else:
axes.plot(time, ptm, line_color )
axes.plot(time, rtm, line_color2 )
plt.tight_layout()
if save_figure:
plt.savefig("Tip_Mach" + file_type)
return
# ------------------------------------------------------------------
# Pressure Coefficient
# ------------------------------------------------------------------
def plot_surface_pressure_contours(results,vehicle, save_figure = False, save_filename = "Surface_Pressure", file_type = ".png"):
"""This plots the surface pressure distrubtion at all control points
on all lifting surfaces of the aircraft
Assumptions:
None
Source:
None
Inputs:
results.segments.aerodynamics.
pressure_coefficient
vehicle.vortex_distribution.
n_cw
n_sw
n_w
Outputs:
Plots
Properties Used:
N/A
"""
VD = vehicle.vortex_distribution
n_cw = VD.n_cw
n_cw = VD.n_cw
n_sw = VD.n_sw
n_w = VD.n_w
b_pts = np.concatenate(([0],np.cumsum(VD.n_sw*VD.n_cw)))
# Create a boolean for not plotting vertical wings
idx = 0
plot_flag = np.ones(n_w)
for wing in vehicle.wings:
if wing.vertical:
plot_flag[idx] = 0
idx += 1
else:
idx += 1
if wing.vertical and wing.symmetric:
plot_flag[idx] = 0
idx += 1
else:
idx += 1
img_idx = 1
seg_idx = 1
for segment in results.segments.values():
num_ctrl_pts = len(segment.conditions.frames.inertial.time)
for ti in range(num_ctrl_pts):
CP = segment.conditions.aerodynamics.pressure_coefficient[ti]
fig = plt.figure()
axes = plt.subplot(1, 1, 1)
x_max = max(VD.XC) + 2
y_max = max(VD.YC) + 2
axes.set_ylim(x_max, 0)
axes.set_xlim(-y_max, y_max)
fig.set_size_inches(8,8)
for i in range(n_w):
n_pts = (n_sw[i] + 1) * (n_cw[i]+ 1)
xc_pts = VD.X[i*(n_pts):(i+1)*(n_pts)]
x_pts = np.reshape(np.atleast_2d(VD.XC[b_pts[i]:b_pts[i+1]]).T, (n_sw[i],-1))
y_pts = np.reshape(np.atleast_2d(VD.YC[b_pts[i]:b_pts[i+1]]).T, (n_sw[i],-1))
z_pts = np.reshape(np.atleast_2d(CP[b_pts[i]:b_pts[i+1]]).T, (n_sw[i],-1))
x_pts_p = x_pts*((n_cw[i]+1)/n_cw[i]) - x_pts[0,0]*((n_cw[i]+1)/n_cw[i]) + xc_pts[0]
points = np.linspace(0.001,1,50)
A = np.cumsum(np.sin(np.pi/2*points))
levals = -(np.concatenate([-A[::-1],A[1:]])/(2*A[-1]) + A[-1]/(2*A[-1]) )[::-1]*0.015
color_map = plt.cm.get_cmap('jet')
rev_cm = color_map.reversed()
if plot_flag[i] == 1:
CS = axes.contourf(y_pts,x_pts_p, z_pts, cmap = rev_cm,levels=levals,extend='both')
# Set Color bar
cbar = fig.colorbar(CS, ax=axes)
cbar.ax.set_ylabel('$C_{P}$', rotation = 0)
plt.axis('off')
plt.grid(None)
if save_figure:
plt.savefig( save_filename + '_' + str(img_idx) + file_type)
img_idx += 1
seg_idx +=1
return
# ------------------------------------------------------------------
# Sectional Lift Distribution
# ------------------------------------------------------------------
def plot_lift_distribution(results,vehicle, save_figure = False, save_filename = "Sectional_Lift", file_type = ".png"):
"""This plots the sectional lift distrubtion at all control points
on all lifting surfaces of the aircraft
Assumptions:
None
Source:
None
Inputs:
results.segments.aerodynamics.
inviscid_wings_sectional_lift
vehicle.vortex_distribution.
n_sw
n_w
Outputs:
Plots
Properties Used:
N/A
"""
VD = vehicle.vortex_distribution
n_w = VD.n_w
b_sw = np.concatenate(([0],np.cumsum(VD.n_sw)))
axis_font = {'size':'12'}
img_idx = 1
seg_idx = 1
for segment in results.segments.values():
num_ctrl_pts = len(segment.conditions.frames.inertial.time)
for ti in range(num_ctrl_pts):
cl_y = segment.conditions.aerodynamics.lift_breakdown.inviscid_wings_sectional[ti]
line = ['-b','-b','-r','-r','-k']
fig = plt.figure()
fig.set_size_inches(8,8)
axes = plt.subplot(1,1,1)
for i in range(n_w):
y_pts = VD.Y_SW[b_sw[i]:b_sw[i+1]]
z_pts = cl_y[b_sw[i]:b_sw[i+1]]
axes.plot(y_pts, z_pts, line[i] )
axes.set_xlabel("Spanwise Location (m)",axis_font)
axes.set_title('$C_{Ly}$',axis_font)
if save_figure:
plt.savefig( save_filename + '_' + str(img_idx) + file_type)
img_idx += 1
seg_idx +=1
return
# ------------------------------------------------------------------
# VLM Video
# ------------------------------------------------------------------
def create_video_frames(results,vehicle, save_figure = True ,flight_profile = True, save_filename = "Flight_Mission_Frame", file_type = ".png"):
"""This creates video frames of the aerodynamic conditions of the vehicle as well as the
surface pressure coefficient throughout a mission
Assumptions:
None
Source:
None
Inputs:
results.segments.
aerodynamics.
lift_coefficient
drag_coefficient
conditions.
freestream.altitude
weights.total_mass
vehicle.vortex_distribution.
n_cp
n_cw
n_sw
n_w
n_fus
Outputs:
Plots
Properties Used:
N/A
"""
VD = vehicle.vortex_distribution
n_cw = VD.n_cw
n_sw = VD.n_sw
n_w = VD.n_w
n_fus = VD.n_fus
b_pts = np.concatenate(([0],np.cumsum(VD.n_sw*VD.n_cw)))
# Create a boolean for not plotting vertical wings
idx = 0
plot_flag = np.ones(n_w)
for wing in vehicle.wings:
if wing.vertical:
plot_flag[idx] = 0
idx += 1
else:
idx += 1
if wing.vertical and wing.symmetric:
plot_flag[idx] = 0
idx += 1
else:
idx += 1
axis_font = {'size':'16'}
img_idx = 1
seg_idx = 1
for segment in results.segments.values():
num_ctrl_pts = len(segment.conditions.frames.inertial.time)
for ti in range(num_ctrl_pts):
CP = segment.conditions.aerodynamics.pressure_coefficient[ti]
fig = plt.figure(constrained_layout=True)
fig.set_size_inches(12, 6.75)
gs = fig.add_gridspec(4, 4)
axes = plt.subplot(gs[:, :-1])
x_max = max(VD.XC) + 2
y_max = max(VD.YC) + 2
axes.set_ylim(x_max, -2)
axes.set_xlim(-y_max, y_max)
# plot wing CP distribution
for i in range(n_w):
n_pts = (n_sw[i] + 1) * (n_cw[i]+ 1)
xc_pts = VD.X[i*(n_pts):(i+1)*(n_pts)]
x_pts = np.reshape(np.atleast_2d(VD.XC[b_pts[i]:b_pts[i+1]]).T, (n_sw[i],-1))
y_pts = np.reshape(np.atleast_2d(VD.YC[b_pts[i]:b_pts[i+1]]).T, (n_sw[i],-1))
z_pts = np.reshape(np.atleast_2d(CP[b_pts[i]:b_pts[i+1]]).T, (n_sw[i],-1))
x_pts_p = x_pts*((n_cw[i]+1)/n_cw[i]) - x_pts[0,0]*((n_cw[i]+1)/n_cw[i]) + xc_pts[0]
points = np.linspace(0.001,1,50)
A = np.cumsum(np.sin(np.pi/2*points))
levals = -(np.concatenate([-A[::-1],A[1:]])/(2*A[-1]) + A[-1]/(2*A[-1]) )[::-1]*0.015
color_map = plt.cm.get_cmap('jet')
rev_cm = color_map.reversed()
if plot_flag[i] == 1:
CS = axes.contourf( y_pts,x_pts_p, z_pts, cmap = rev_cm,levels=levals,extend='both')
# Set Color bar
sfmt = ticker.ScalarFormatter(useMathText=True)
sfmt = ticker.FormatStrFormatter('%.3f')
cbar = fig.colorbar(CS, ax=axes , format= sfmt )
cbar.ax.set_ylabel('$C_{P}$', labelpad = 20, rotation = 0, fontsize =16)
# plot fuselage
for i in range(n_fus):
n_pts = (n_sw + 1) * (n_cw + 1)
j = n_w + i
x_pts = np.reshape(np.atleast_2d(VD.X[j*(n_pts):(j+1)*(n_pts)]).T, (n_sw+1,n_cw+1))
y_pts = np.reshape(np.atleast_2d(VD.Y[j*(n_pts):(j+1)*(n_pts)]).T, (n_sw+1,n_cw+1))
z_pts = np.reshape(np.atleast_2d(VD.Z[j*(n_pts):(j+1)*(n_pts)]).T, (n_sw+1,n_cw+1))
plt.axis('off')
plt.grid(None)
if flight_profile:
time_vec = np.empty(shape=[0,1])
cl_vec = np.empty(shape=[0,1])
cd_vec = np.empty(shape=[0,1])
l_d_vec = np.empty(shape=[0,1])
altitude_vec = np.empty(shape=[0,1])
mass_vec = np.empty(shape=[0,1])
for seg_i in range(seg_idx):
if seg_i == seg_idx-1:
t_vals = results.segments[seg_i].conditions.frames.inertial.time[0:ti+1] / Units.min
cl_vals = results.segments[seg_i].conditions.aerodynamics.lift_coefficient[0:ti+1]
cd_vals = results.segments[seg_i].conditions.aerodynamics.drag_coefficient[0:ti+1]
l_d_vals = cl_vals/cd_vals
alt_vals = results.segments[seg_i].conditions.freestream.altitude[0:ti+1] / Units.ft
m_vals = results.segments[seg_i].conditions.weights.total_mass[0:ti+1] * 0.001
else:
t_vals = results.segments[seg_i].conditions.frames.inertial.time / Units.min
cl_vals = results.segments[seg_i].conditions.aerodynamics.lift_coefficient
cd_vals = results.segments[seg_i].conditions.aerodynamics.drag_coefficient
l_d_vals = cl_vals/cd_vals
alt_vals = results.segments[seg_i].conditions.freestream.altitude / Units.ft
m_vals = results.segments[seg_i].conditions.weights.total_mass * 0.001
time_vec = np.append(time_vec ,t_vals[:,0])
cl_vec = np.append(cl_vec ,cl_vals[:,0])
cd_vec = np.append(cd_vec ,cd_vals[:,0])
l_d_vec = np.append(l_d_vec , l_d_vals[:,0])
altitude_vec = np.append(altitude_vec ,alt_vals[:,0])
mass_vec = np.append(mass_vec ,m_vals[:,0])
mini_axes1 = plt.subplot(gs[0:1, -1])
mini_axes1.plot(time_vec, altitude_vec , 'ko-')
mini_axes1.set_ylabel('Altitude (ft)',axis_font)
mini_axes1.set_xlim(-10,420)
mini_axes1.set_ylim(0,36000)
mini_axes1.grid(False)
mini_axes2 = plt.subplot(gs[1:2, -1])
mini_axes2.plot(time_vec, mass_vec , 'ro-' )
mini_axes2.set_ylabel('Weight (tons)',axis_font)
mini_axes2.grid(False)
mini_axes2.set_xlim(-10,420)
mini_axes2.set_ylim(60,80)
mini_axes3 = plt.subplot(gs[2:3, -1])
mini_axes3.plot( time_vec, cl_vec, 'bo-' )
mini_axes3.set_ylabel('$C_{L}$',axis_font)
mini_axes3.set_xlim(-10,420)
mini_axes3.set_ylim(0.3,0.9)
mini_axes3.grid(False)
mini_axes4 = plt.subplot(gs[3:4, -1])
mini_axes4.plot(time_vec , l_d_vec ,'go-' )
mini_axes4.set_ylabel('L/D',axis_font)
mini_axes4.set_xlabel('Time (mins)',axis_font)
mini_axes4.set_xlim(-10,420)
mini_axes4.set_ylim(15,20)
mini_axes4.grid(False)
if save_figure:
plt.savefig(save_filename + '_' + str(img_idx) + file_type)
img_idx += 1
seg_idx +=1
# ------------------------------------------------------------------
# Rotor/Propeller Acoustics
# ------------------------------------------------------------------
def plot_ground_noise_levels(results, line_color = 'bo-', save_figure = False, save_filename = "Sideline Noise Levels"):
"""This plots the A-weighted Sound Pressure Level as a function of time at various aximuthal angles
on the ground
Assumptions:
None
Source:
None
Inputs:
results.segments.conditions.
frames.inertial.position_vector - position vector of aircraft
noise.
total_SPL_dBA - total SPL (dbA)
total_microphone_locations - microphone locations
Outputs:
Plots
Properties Used:
N/A
"""
# unpack
dim_segs = len(results.segments)
dim_gm = results.segments[0].conditions.noise.number_ground_microphones
dim_ctrl_pts = len(results.segments[0].conditions.frames.inertial.time[:,0])
N_gm_x = results.segments[0].analyses.noise.settings.level_ground_microphone_x_resolution
N_gm_y = results.segments[0].analyses.noise.settings.level_ground_microphone_y_resolution
gm = results.segments[0].conditions.noise.ground_microphone_locations[0].reshape(N_gm_x,N_gm_y,3)
gm_x = -gm[:,:,0]
gm_y = -gm[:,:,1]
colors = cm.jet(np.linspace(0, 1,N_gm_y))
# figure parameters
axis_font = {'size':'14'}
fig = plt.figure(save_filename)
fig.set_size_inches(10, 8)
axes = fig.add_subplot(1,1,1)
SPL = np.zeros((dim_segs,dim_ctrl_pts,N_gm_x,N_gm_y))
# loop through control points
for i in range(dim_segs):
for j in range(dim_ctrl_pts):
if results.segments[i].battery_discharge == False:
pass
else:
SPL[i,j,:] = results.segments[i].conditions.noise.total_SPL_dBA[j,:dim_gm].reshape(N_gm_x,N_gm_y)
max_SPL = np.max(np.max(SPL,axis=0),axis=0)
for k in range(N_gm_y):
axes.plot(gm_x[:,0]/Units.nmi, max_SPL[:,k], marker = 'o', color = colors[k], label= r'mic at y = ' + str(round(gm_y[0,k],1)) + r' m' )
axes.set_ylabel('SPL (dBA)',axis_font)
axes.set_xlabel('Range (nmi)',axis_font)
set_axes(axes)
axes.legend(loc='upper right')
if save_figure:
plt.savefig(save_filename + ".png")
return
def plot_flight_profile_noise_contours(results, line_color = 'bo-', save_figure = False, save_filename = "Noise_Contour",show_figure = True):
"""This plots two contour surface of the maximum A-weighted Sound Pressure Level in the defined computational domain.
The first contour is the that of radiated noise on level ground only while the second contains radiated noise on buildings
as well as the aircraft trajectory.
Assumptions:
None
Source:
None
Inputs:
results.segments.conditions.
frames.inertial.position_vector - position vector of aircraft
noise.
total_SPL_dBA - total SPL (dbA)
total_microphone_locations - microphone locations
Outputs:
Plots
Properties Used:
N/A
"""
# unpack
dim_segs = len(results.segments)
dim_ctrl_pts = len(results.segments[0].conditions.frames.inertial.time[:,0])
dim_gm = results.segments[0].conditions.noise.number_ground_microphones
gm_N_x = results.segments[0].analyses.noise.settings.level_ground_microphone_x_resolution
gm_N_y = results.segments[0].analyses.noise.settings.level_ground_microphone_y_resolution
dim_bm = results.segments[0].conditions.noise.number_building_microphones
dim_mat = dim_segs*dim_ctrl_pts
SPL_contour_gm = np.zeros((dim_mat,dim_gm))
Range = np.zeros((dim_mat,dim_gm))
Span = np.zeros((dim_mat,dim_gm))
SPL_contour_bm = np.zeros((dim_mat,dim_bm))
Aircraft_pos = np.zeros((dim_mat,3))
plot_data = []
for i in range(dim_segs):
if results.segments[i].battery_discharge == False:
pass
else:
for j in range(dim_ctrl_pts):
idx = i*dim_ctrl_pts + j
Aircraft_pos[idx ,0] = results.segments[i].conditions.frames.inertial.position_vector[j,0]
Aircraft_pos[idx ,2] = -results.segments[i].conditions.frames.inertial.position_vector[j,2]
SPL_contour_gm[idx,:] = results.segments[i].conditions.noise.total_SPL_dBA[j,:dim_gm]
if dim_bm > 0:
SPL_contour_bm[idx,:] = results.segments[i].conditions.noise.total_SPL_dBA[j,-dim_bm:]
# Level Ground Noise Contour
gm_mic_loc = results.segments[0].analyses.noise.settings.ground_microphone_locations
Range = gm_mic_loc[:,0].reshape(gm_N_x,gm_N_y)
Span = gm_mic_loc[:,1].reshape(gm_N_x,gm_N_y)
ground_surface = np.zeros(Range.shape)
max_SPL_contour_gm = np.max(SPL_contour_gm,axis=0)
SPL_gm = max_SPL_contour_gm.reshape(gm_N_x,gm_N_y)
# ---------------------------------------------------------------------------
# Level ground contour
# ---------------------------------------------------------------------------
filename_1 = 'Level_Ground_' + save_filename
fig = plt.figure(filename_1)
fig.set_size_inches(10 ,10)
levs = np.linspace(40,120,25)
axes = fig.add_subplot(1,1,1)
Range = Range/Units.nmi
Span = Span/Units.nmi
CS = axes.contourf(Range , Span,SPL_gm, levels = levs, cmap=plt.cm.jet, extend='both')
cbar = fig.colorbar(CS)
cbar.ax.set_ylabel('SPL (dBA)', rotation = 90)
axes.set_ylabel('Spanwise $x_{fp}$ (nmi)',labelpad = 15)
axes.set_xlabel('Streamwise $x_{fp}$ (nmi)')
# ---------------------------------------------------------------------------
# Comprehensive contour including buildings
# ---------------------------------------------------------------------------
ground_contour = contour_surface_slice(Range,Span, ground_surface , SPL_gm)
plot_data.append(ground_contour)
# Aircraft Trajectory
aircraft_trajectory = go.Scatter3d(x=Aircraft_pos[:,0], y=Aircraft_pos[:,1], z=Aircraft_pos[:,2],
mode='markers',
marker=dict(size=6,color='black',opacity=0.8),
line=dict(color='black',width=2))
plot_data.append(aircraft_trajectory)
# Define Colorbar Bounds
min_gm_SPL = np.min(SPL_contour_gm)
max_gm_SPL = np.max(SPL_contour_gm)
min_SPL = min_gm_SPL
max_SPL = max_gm_SPL
min_alt = 0
max_alt = np.max(Aircraft_pos[:,2])
# Adjust Plot Camera
camera = dict(up=dict(x=0, y=0, z=1), center=dict(x=0, y=0, z=0), eye=dict(x=-1., y=-1., z=.25))
building_loc = results.segments[0].analyses.noise.settings.urban_canyon_building_locations
num_buildings = len( building_loc)
if num_buildings >0:
max_alt = np.maximum(max_alt, max((np.array(building_loc))[:,2]))
min_bm_SPL = np.min(SPL_contour_bm)
max_bm_SPL = np.max(SPL_contour_bm)
min_SPL = np.minimum(min_bm_SPL,min_SPL)
max_SPL = np.maximum(max_bm_SPL,max_SPL)
# Get SPL aon Building Surfaces
max_SPL_contour_bm = np.max(SPL_contour_bm,axis=0)
building_dimensions = results.segments[0].analyses.noise.settings.urban_canyon_building_dimensions
N_x = results.segments[0].analyses.noise.settings.urban_canyon_microphone_x_resolution
bldg_mic_loc = results.segments[0].analyses.noise.settings.urban_canyon_microphone_locations
N_y = results.segments[0].analyses.noise.settings.urban_canyon_microphone_y_resolution
N_z = results.segments[0].analyses.noise.settings.urban_canyon_microphone_z_resolution
num_mics_on_xz_surface = N_x*N_z
num_mics_on_yz_surface = N_y*N_z
num_mics_on_xy_surface = N_x*N_y
num_mics_per_building = 2*(num_mics_on_xz_surface + num_mics_on_yz_surface) + num_mics_on_xy_surface
# get surfaces of buildings
for bldg_idx in range(num_buildings):
# front (y-z plane)
side_1_start = bldg_idx*num_mics_per_building
side_1_end = bldg_idx*num_mics_per_building + num_mics_on_yz_surface
surf_1_x = np.ones((N_y,N_z))*(building_loc[bldg_idx][0] - building_dimensions[bldg_idx][0]/2)
surf_1_y = bldg_mic_loc[side_1_start:side_1_end,1].reshape(N_y,N_z)
surf_1_z = bldg_mic_loc[side_1_start:side_1_end,2].reshape(N_y,N_z)
SPL_vals_1 = max_SPL_contour_bm[side_1_start:side_1_end].reshape(N_y,N_z)
bldg_surf_1 = contour_surface_slice(surf_1_x ,surf_1_y ,surf_1_z ,SPL_vals_1)
plot_data.append(bldg_surf_1)
# right (x-z plane)
side_2_start = side_1_end
side_2_end = side_2_start + num_mics_on_xz_surface
surf_2_x = bldg_mic_loc[side_2_start:side_2_end,0].reshape(N_x,N_z)
surf_2_y = np.ones((N_x,N_z))*building_loc[bldg_idx][1] + building_dimensions[bldg_idx][1]/2
surf_2_z = bldg_mic_loc[side_2_start:side_2_end,2].reshape(N_x,N_z)
SPL_vals_2 = max_SPL_contour_bm[side_2_start:side_2_end].reshape(N_x,N_z)
bldg_surf_2 = contour_surface_slice(surf_2_x ,surf_2_y ,surf_2_z ,SPL_vals_2)
plot_data.append(bldg_surf_2)
# back (y-z plane)
side_3_start = side_2_end
side_3_end = side_3_start + num_mics_on_yz_surface
surf_3_x = np.ones((N_y,N_z))*(building_loc[bldg_idx][0] + building_dimensions[bldg_idx][0]/2)
surf_3_y = bldg_mic_loc[side_3_start:side_3_end,1].reshape(N_y,N_z)
surf_3_z = bldg_mic_loc[side_3_start:side_3_end,2].reshape(N_y,N_z)
SPL_vals_3 = max_SPL_contour_bm[side_3_start:side_3_end].reshape(N_y,N_z)
bldg_surf_3 = contour_surface_slice(surf_3_x ,surf_3_y ,surf_3_z ,SPL_vals_3)
plot_data.append(bldg_surf_3)
# left (x-z plane)
side_4_start = side_3_end
side_4_end = side_4_start + num_mics_on_xz_surface
surf_4_x = bldg_mic_loc[side_4_start:side_4_end,0].reshape(N_x,N_z)
surf_4_y = np.ones((N_x,N_z))*(building_loc[bldg_idx][1] - building_dimensions[bldg_idx][1]/2)
surf_4_z = bldg_mic_loc[side_4_start:side_4_end,2].reshape(N_x,N_z)
SPL_vals_4 = max_SPL_contour_bm[side_4_start:side_4_end].reshape(N_x,N_z)
bldg_surf_4 = contour_surface_slice(surf_4_x ,surf_4_y ,surf_4_z ,SPL_vals_4)
plot_data.append(bldg_surf_4)
# top (x-y plane)
side_5_start = side_4_end
side_5_end = (bldg_idx+1)*num_mics_per_building
surf_5_x = bldg_mic_loc[side_5_start:side_5_end,0].reshape(N_x,N_y)
surf_5_y = bldg_mic_loc[side_5_start:side_5_end,1].reshape(N_x,N_y)
surf_5_z = np.ones((N_x,N_y))*(building_dimensions[bldg_idx][2])
SPL_vals_5 = max_SPL_contour_bm[side_5_start:side_5_end].reshape(N_x,N_y)
bldg_surf_5 = contour_surface_slice(surf_5_x ,surf_5_y ,surf_5_z ,SPL_vals_5)
plot_data.append(bldg_surf_5)
fig = go.Figure(data=plot_data)
fig.update_layout(
title_text= 'Flight_Profile_' + save_filename,
title_x = 0.5,
width = 750,
height = 750,
font_family = "Times New Roman",
font_size=18,
scene_zaxis_range=[min_alt,max_alt],
coloraxis=dict(colorscale='Jet',
colorbar_thickness=50,
colorbar_nticks=20,
colorbar_title_text = 'SPL (dBA)',
colorbar_tickfont_size=18,
colorbar_title_side="right",
colorbar_ypad=60,
colorbar_len= 0.75,
**colorax(min_SPL, max_SPL)),
scene_camera=camera)
if show_figure:
fig.show()
return
def contour_surface_slice(x,y,z,values):
return go.Surface(x=x,y=y,z=z,surfacecolor=values,coloraxis='coloraxis')
def colorax(vmin, vmax):
return dict(cmin=vmin,
cmax=vmax)
# ------------------------------------------------------------------
# Set Axis Parameters
# ------------------------------------------------------------------
## @ingroup Plots
def set_axes(axes):
"""This sets the axis parameters for all plots
Assumptions:
None
Source:
None
Inputs
axes
Outputs:
axes
Properties Used:
N/A
"""
axes.minorticks_on()
axes.grid(which='major', linestyle='-', linewidth=0.5, color='grey')
axes.grid(which='minor', linestyle=':', linewidth=0.5, color='grey')
axes.grid(True)
axes.get_yaxis().get_major_formatter().set_scientific(False)
axes.get_yaxis().get_major_formatter().set_useOffset(False)
return
| [
"matplotlib.pyplot.grid",
"numpy.sqrt",
"numpy.array",
"matplotlib.ticker.ScalarFormatter",
"numpy.linalg.norm",
"numpy.sin",
"plotly.graph_objects.Surface",
"numpy.atleast_2d",
"numpy.zeros_like",
"numpy.max",
"numpy.linspace",
"numpy.empty",
"numpy.concatenate",
"numpy.min",
"matplotli... | [((1230, 1255), 'matplotlib.pyplot.figure', 'plt.figure', (['save_filename'], {}), '(save_filename)\n', (1240, 1255), True, 'import matplotlib.pyplot as plt\n'), ((2293, 2311), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2309, 2311), True, 'import matplotlib.pyplot as plt\n'), ((3052, 3077), 'matplotlib.pyplot.figure', 'plt.figure', (['save_filename'], {}), '(save_filename)\n', (3062, 3077), True, 'import matplotlib.pyplot as plt\n'), ((4036, 4054), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4052, 4054), True, 'import matplotlib.pyplot as plt\n'), ((4793, 4818), 'matplotlib.pyplot.figure', 'plt.figure', (['save_filename'], {}), '(save_filename)\n', (4803, 4818), True, 'import matplotlib.pyplot as plt\n'), ((5533, 5551), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5549, 5551), True, 'import matplotlib.pyplot as plt\n'), ((6334, 6359), 'matplotlib.pyplot.figure', 'plt.figure', (['save_filename'], {}), '(save_filename)\n', (6344, 6359), True, 'import matplotlib.pyplot as plt\n'), ((7493, 7511), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7509, 7511), True, 'import matplotlib.pyplot as plt\n'), ((8300, 8325), 'matplotlib.pyplot.figure', 'plt.figure', (['save_filename'], {}), '(save_filename)\n', (8310, 8325), True, 'import matplotlib.pyplot as plt\n'), ((9503, 9521), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9519, 9521), True, 'import matplotlib.pyplot as plt\n'), ((10336, 10361), 'matplotlib.pyplot.figure', 'plt.figure', (['save_filename'], {}), '(save_filename)\n', (10346, 10361), True, 'import matplotlib.pyplot as plt\n'), ((10410, 10430), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (10421, 10430), True, 'import matplotlib.pyplot as plt\n'), ((11672, 11690), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11688, 11690), True, 'import matplotlib.pyplot as plt\n'), ((12590, 12615), 'matplotlib.pyplot.figure', 'plt.figure', (['save_filename'], {}), '(save_filename)\n', (12600, 12615), True, 'import matplotlib.pyplot as plt\n'), ((15693, 15711), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15709, 15711), True, 'import matplotlib.pyplot as plt\n'), ((16602, 16627), 'matplotlib.pyplot.figure', 'plt.figure', (['save_filename'], {}), '(save_filename)\n', (16612, 16627), True, 'import matplotlib.pyplot as plt\n'), ((20488, 20506), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (20504, 20506), True, 'import matplotlib.pyplot as plt\n'), ((21438, 21463), 'matplotlib.pyplot.figure', 'plt.figure', (['save_filename'], {}), '(save_filename)\n', (21448, 21463), True, 'import matplotlib.pyplot as plt\n'), ((21619, 21637), 'numpy.zeros', 'np.zeros', (['num_segs'], {}), '(num_segs)\n', (21627, 21637), True, 'import numpy as np\n'), ((21662, 21685), 'numpy.zeros_like', 'np.zeros_like', (['time_hrs'], {}), '(time_hrs)\n', (21675, 21685), True, 'import numpy as np\n'), ((21710, 21733), 'numpy.zeros_like', 'np.zeros_like', (['time_hrs'], {}), '(time_hrs)\n', (21723, 21733), True, 'import numpy as np\n'), ((21758, 21781), 'numpy.zeros_like', 'np.zeros_like', (['time_hrs'], {}), '(time_hrs)\n', (21771, 21781), True, 'import numpy as np\n'), ((21806, 21829), 'numpy.zeros_like', 'np.zeros_like', (['time_hrs'], {}), '(time_hrs)\n', (21819, 21829), True, 'import numpy as np\n'), ((22413, 22433), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (22424, 22433), True, 'import matplotlib.pyplot as plt\n'), ((22706, 22726), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (22717, 22726), True, 'import matplotlib.pyplot as plt\n'), ((22980, 23000), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (22991, 23000), True, 'import matplotlib.pyplot as plt\n'), ((23254, 23272), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (23270, 23272), True, 'import matplotlib.pyplot as plt\n'), ((24173, 24198), 'matplotlib.pyplot.figure', 'plt.figure', (['save_filename'], {}), '(save_filename)\n', (24183, 24198), True, 'import matplotlib.pyplot as plt\n'), ((25608, 25626), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (25624, 25626), True, 'import matplotlib.pyplot as plt\n'), ((26477, 26502), 'matplotlib.pyplot.figure', 'plt.figure', (['save_filename'], {}), '(save_filename)\n', (26487, 26502), True, 'import matplotlib.pyplot as plt\n'), ((28448, 28466), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (28464, 28466), True, 'import matplotlib.pyplot as plt\n'), ((29226, 29251), 'matplotlib.pyplot.figure', 'plt.figure', (['save_filename'], {}), '(save_filename)\n', (29236, 29251), True, 'import matplotlib.pyplot as plt\n'), ((30076, 30094), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (30092, 30094), True, 'import matplotlib.pyplot as plt\n'), ((30930, 30955), 'matplotlib.pyplot.figure', 'plt.figure', (['save_filename'], {}), '(save_filename)\n', (30940, 30955), True, 'import matplotlib.pyplot as plt\n'), ((32154, 32172), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (32170, 32172), True, 'import matplotlib.pyplot as plt\n'), ((32951, 32976), 'matplotlib.pyplot.figure', 'plt.figure', (['save_filename'], {}), '(save_filename)\n', (32961, 32976), True, 'import matplotlib.pyplot as plt\n'), ((33930, 33948), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (33946, 33948), True, 'import matplotlib.pyplot as plt\n'), ((35041, 35090), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Lift_Cruise_Battery_Pack_Conditions"""'], {}), "('Lift_Cruise_Battery_Pack_Conditions')\n", (35051, 35090), True, 'import matplotlib.pyplot as plt\n'), ((37237, 37255), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (37253, 37255), True, 'import matplotlib.pyplot as plt\n'), ((37547, 37579), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Prop-Rotor Network"""'], {}), "('Prop-Rotor Network')\n", (37557, 37579), True, 'import matplotlib.pyplot as plt\n'), ((40558, 40576), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (40574, 40576), True, 'import matplotlib.pyplot as plt\n'), ((40854, 40878), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Lift_Rotor"""'], {}), "('Lift_Rotor')\n", (40864, 40878), True, 'import matplotlib.pyplot as plt\n'), ((42898, 42916), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (42914, 42916), True, 'import matplotlib.pyplot as plt\n'), ((43181, 43204), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Propeller"""'], {}), "('Propeller')\n", (43191, 43204), True, 'import matplotlib.pyplot as plt\n'), ((45132, 45150), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (45148, 45150), True, 'import matplotlib.pyplot as plt\n'), ((45421, 45443), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Tip_Mach"""'], {}), "('Tip_Mach')\n", (45431, 45443), True, 'import matplotlib.pyplot as plt\n'), ((46247, 46265), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (46263, 46265), True, 'import matplotlib.pyplot as plt\n'), ((47353, 47365), 'numpy.ones', 'np.ones', (['n_w'], {}), '(n_w)\n', (47360, 47365), True, 'import numpy as np\n'), ((52689, 52701), 'numpy.ones', 'np.ones', (['n_w'], {}), '(n_w)\n', (52696, 52701), True, 'import numpy as np\n'), ((60989, 61014), 'matplotlib.pyplot.figure', 'plt.figure', (['save_filename'], {}), '(save_filename)\n', (60999, 61014), True, 'import matplotlib.pyplot as plt\n'), ((61105, 61155), 'numpy.zeros', 'np.zeros', (['(dim_segs, dim_ctrl_pts, N_gm_x, N_gm_y)'], {}), '((dim_segs, dim_ctrl_pts, N_gm_x, N_gm_y))\n', (61113, 61155), True, 'import numpy as np\n'), ((63392, 63419), 'numpy.zeros', 'np.zeros', (['(dim_mat, dim_gm)'], {}), '((dim_mat, dim_gm))\n', (63400, 63419), True, 'import numpy as np\n'), ((63441, 63468), 'numpy.zeros', 'np.zeros', (['(dim_mat, dim_gm)'], {}), '((dim_mat, dim_gm))\n', (63449, 63468), True, 'import numpy as np\n'), ((63490, 63517), 'numpy.zeros', 'np.zeros', (['(dim_mat, dim_gm)'], {}), '((dim_mat, dim_gm))\n', (63498, 63517), True, 'import numpy as np\n'), ((63539, 63566), 'numpy.zeros', 'np.zeros', (['(dim_mat, dim_bm)'], {}), '((dim_mat, dim_bm))\n', (63547, 63566), True, 'import numpy as np\n'), ((63589, 63611), 'numpy.zeros', 'np.zeros', (['(dim_mat, 3)'], {}), '((dim_mat, 3))\n', (63597, 63611), True, 'import numpy as np\n'), ((64649, 64670), 'numpy.zeros', 'np.zeros', (['Range.shape'], {}), '(Range.shape)\n', (64657, 64670), True, 'import numpy as np\n'), ((64698, 64728), 'numpy.max', 'np.max', (['SPL_contour_gm'], {'axis': '(0)'}), '(SPL_contour_gm, axis=0)\n', (64704, 64728), True, 'import numpy as np\n'), ((65076, 65098), 'matplotlib.pyplot.figure', 'plt.figure', (['filename_1'], {}), '(filename_1)\n', (65086, 65098), True, 'import matplotlib.pyplot as plt\n'), ((65162, 65186), 'numpy.linspace', 'np.linspace', (['(40)', '(120)', '(25)'], {}), '(40, 120, 25)\n', (65173, 65186), True, 'import numpy as np\n'), ((66400, 66422), 'numpy.min', 'np.min', (['SPL_contour_gm'], {}), '(SPL_contour_gm)\n', (66406, 66422), True, 'import numpy as np\n'), ((66442, 66464), 'numpy.max', 'np.max', (['SPL_contour_gm'], {}), '(SPL_contour_gm)\n', (66448, 66464), True, 'import numpy as np\n'), ((66562, 66588), 'numpy.max', 'np.max', (['Aircraft_pos[:, 2]'], {}), '(Aircraft_pos[:, 2])\n', (66568, 66588), True, 'import numpy as np\n'), ((71694, 71719), 'plotly.graph_objects.Figure', 'go.Figure', ([], {'data': 'plot_data'}), '(data=plot_data)\n', (71703, 71719), True, 'import plotly.graph_objects as go\n'), ((72625, 72694), 'plotly.graph_objects.Surface', 'go.Surface', ([], {'x': 'x', 'y': 'y', 'z': 'z', 'surfacecolor': 'values', 'coloraxis': '"""coloraxis"""'}), "(x=x, y=y, z=z, surfacecolor=values, coloraxis='coloraxis')\n", (72635, 72694), True, 'import plotly.graph_objects as go\n'), ((1790, 1810), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (1801, 1810), True, 'import matplotlib.pyplot as plt\n'), ((1948, 1968), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (1959, 1968), True, 'import matplotlib.pyplot as plt\n'), ((2152, 2172), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (2163, 2172), True, 'import matplotlib.pyplot as plt\n'), ((2345, 2383), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(save_filename + file_type)'], {}), '(save_filename + file_type)\n', (2356, 2383), True, 'import matplotlib.pyplot as plt\n'), ((3495, 3515), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (3506, 3515), True, 'import matplotlib.pyplot as plt\n'), ((3665, 3685), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (3676, 3685), True, 'import matplotlib.pyplot as plt\n'), ((3848, 3868), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (3859, 3868), True, 'import matplotlib.pyplot as plt\n'), ((4088, 4126), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(save_filename + file_type)'], {}), '(save_filename + file_type)\n', (4099, 4126), True, 'import matplotlib.pyplot as plt\n'), ((5151, 5171), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (5162, 5171), True, 'import matplotlib.pyplot as plt\n'), ((5316, 5336), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (5327, 5336), True, 'import matplotlib.pyplot as plt\n'), ((5581, 5619), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(save_filename + file_type)'], {}), '(save_filename + file_type)\n', (5592, 5619), True, 'import matplotlib.pyplot as plt\n'), ((6783, 6803), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (6794, 6803), True, 'import matplotlib.pyplot as plt\n'), ((6945, 6965), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (6956, 6965), True, 'import matplotlib.pyplot as plt\n'), ((7097, 7117), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (7108, 7117), True, 'import matplotlib.pyplot as plt\n'), ((7297, 7317), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (7308, 7317), True, 'import matplotlib.pyplot as plt\n'), ((7553, 7591), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(save_filename + file_type)'], {}), '(save_filename + file_type)\n', (7564, 7591), True, 'import matplotlib.pyplot as plt\n'), ((8789, 8809), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (8800, 8809), True, 'import matplotlib.pyplot as plt\n'), ((8940, 8960), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (8951, 8960), True, 'import matplotlib.pyplot as plt\n'), ((9097, 9117), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (9108, 9117), True, 'import matplotlib.pyplot as plt\n'), ((9306, 9326), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (9317, 9326), True, 'import matplotlib.pyplot as plt\n'), ((9551, 9589), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(save_filename + file_type)'], {}), '(save_filename + file_type)\n', (9562, 9589), True, 'import matplotlib.pyplot as plt\n'), ((11720, 11758), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(save_filename + file_type)'], {}), '(save_filename + file_type)\n', (11731, 11758), True, 'import matplotlib.pyplot as plt\n'), ((13784, 13804), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(1)'], {}), '(3, 3, 1)\n', (13795, 13804), True, 'import matplotlib.pyplot as plt\n'), ((13934, 13954), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(2)'], {}), '(3, 3, 2)\n', (13945, 13954), True, 'import matplotlib.pyplot as plt\n'), ((14127, 14147), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(3)'], {}), '(3, 3, 3)\n', (14138, 14147), True, 'import matplotlib.pyplot as plt\n'), ((14302, 14322), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(4)'], {}), '(3, 3, 4)\n', (14313, 14322), True, 'import matplotlib.pyplot as plt\n'), ((14748, 14768), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(5)'], {}), '(3, 3, 5)\n', (14759, 14768), True, 'import matplotlib.pyplot as plt\n'), ((15253, 15273), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(6)'], {}), '(3, 3, 6)\n', (15264, 15273), True, 'import matplotlib.pyplot as plt\n'), ((15527, 15547), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', 'i'], {}), '(3, 3, i)\n', (15538, 15547), True, 'import matplotlib.pyplot as plt\n'), ((18122, 18142), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(1)'], {}), '(3, 3, 1)\n', (18133, 18142), True, 'import matplotlib.pyplot as plt\n'), ((18270, 18290), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(2)'], {}), '(3, 3, 2)\n', (18281, 18290), True, 'import matplotlib.pyplot as plt\n'), ((18466, 18486), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(3)'], {}), '(3, 3, 3)\n', (18477, 18486), True, 'import matplotlib.pyplot as plt\n'), ((18636, 18656), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(4)'], {}), '(3, 3, 4)\n', (18647, 18656), True, 'import matplotlib.pyplot as plt\n'), ((19092, 19112), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(5)'], {}), '(3, 3, 5)\n', (19103, 19112), True, 'import matplotlib.pyplot as plt\n'), ((19610, 19630), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(6)'], {}), '(3, 3, 6)\n', (19621, 19630), True, 'import matplotlib.pyplot as plt\n'), ((19835, 19855), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(7)'], {}), '(3, 3, 7)\n', (19846, 19855), True, 'import matplotlib.pyplot as plt\n'), ((20058, 20078), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(8)'], {}), '(3, 3, 8)\n', (20069, 20078), True, 'import matplotlib.pyplot as plt\n'), ((20333, 20353), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', 'i'], {}), '(3, 3, i)\n', (20344, 20353), True, 'import matplotlib.pyplot as plt\n'), ((24870, 24890), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (24881, 24890), True, 'import matplotlib.pyplot as plt\n'), ((25037, 25057), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (25048, 25057), True, 'import matplotlib.pyplot as plt\n'), ((25197, 25217), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (25208, 25217), True, 'import matplotlib.pyplot as plt\n'), ((25415, 25435), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (25426, 25435), True, 'import matplotlib.pyplot as plt\n'), ((25659, 25697), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(save_filename + file_type)'], {}), '(save_filename + file_type)\n', (25670, 25697), True, 'import matplotlib.pyplot as plt\n'), ((26748, 26833), 'numpy.linalg.norm', 'np.linalg.norm', (['segment.conditions.frames.body.thrust_force_vector[:, :]'], {'axis': '(1)'}), '(segment.conditions.frames.body.thrust_force_vector[:, :], axis=1\n )\n', (26762, 26833), True, 'import numpy as np\n'), ((27132, 27152), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(1)'], {}), '(2, 3, 1)\n', (27143, 27152), True, 'import matplotlib.pyplot as plt\n'), ((27291, 27311), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(2)'], {}), '(2, 3, 2)\n', (27302, 27311), True, 'import matplotlib.pyplot as plt\n'), ((27439, 27459), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(3)'], {}), '(2, 3, 3)\n', (27450, 27459), True, 'import matplotlib.pyplot as plt\n'), ((27651, 27671), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (27662, 27671), True, 'import matplotlib.pyplot as plt\n'), ((27810, 27830), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(5)'], {}), '(2, 3, 5)\n', (27821, 27830), True, 'import matplotlib.pyplot as plt\n'), ((28024, 28044), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(6)'], {}), '(2, 3, 6)\n', (28035, 28044), True, 'import matplotlib.pyplot as plt\n'), ((28274, 28294), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', 'i'], {}), '(2, 3, i)\n', (28285, 28294), True, 'import matplotlib.pyplot as plt\n'), ((28499, 28537), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(save_filename + file_type)'], {}), '(save_filename + file_type)\n', (28510, 28537), True, 'import matplotlib.pyplot as plt\n'), ((29582, 29602), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (29593, 29602), True, 'import matplotlib.pyplot as plt\n'), ((29803, 29819), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (29811, 29819), True, 'import matplotlib.pyplot as plt\n'), ((29843, 29863), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (29854, 29863), True, 'import matplotlib.pyplot as plt\n'), ((30051, 30067), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (30059, 30067), True, 'import matplotlib.pyplot as plt\n'), ((30128, 30166), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(save_filename + file_type)'], {}), '(save_filename + file_type)\n', (30139, 30166), True, 'import matplotlib.pyplot as plt\n'), ((31429, 31449), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (31440, 31449), True, 'import matplotlib.pyplot as plt\n'), ((31587, 31607), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (31598, 31607), True, 'import matplotlib.pyplot as plt\n'), ((31742, 31762), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (31753, 31762), True, 'import matplotlib.pyplot as plt\n'), ((31957, 31977), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (31968, 31977), True, 'import matplotlib.pyplot as plt\n'), ((32203, 32241), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(save_filename + file_type)'], {}), '(save_filename + file_type)\n', (32214, 32241), True, 'import matplotlib.pyplot as plt\n'), ((33382, 33402), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (33393, 33402), True, 'import matplotlib.pyplot as plt\n'), ((33548, 33568), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (33559, 33568), True, 'import matplotlib.pyplot as plt\n'), ((33714, 33734), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (33725, 33734), True, 'import matplotlib.pyplot as plt\n'), ((33978, 34016), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(save_filename + file_type)'], {}), '(save_filename + file_type)\n', (33989, 34016), True, 'import matplotlib.pyplot as plt\n'), ((35859, 35895), 'matplotlib.pyplot.title', 'plt.title', (['"""Battery Pack Conditions"""'], {}), "('Battery Pack Conditions')\n", (35868, 35895), True, 'import matplotlib.pyplot as plt\n'), ((35911, 35931), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (35922, 35931), True, 'import matplotlib.pyplot as plt\n'), ((36012, 36028), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (36020, 36028), True, 'import matplotlib.pyplot as plt\n'), ((36364, 36384), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (36375, 36384), True, 'import matplotlib.pyplot as plt\n'), ((36529, 36549), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (36540, 36549), True, 'import matplotlib.pyplot as plt\n'), ((37030, 37050), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (37041, 37050), True, 'import matplotlib.pyplot as plt\n'), ((37289, 37351), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Lift_Cruise_Battery_Pack_Conditions' + file_type)"], {}), "('Lift_Cruise_Battery_Pack_Conditions' + file_type)\n", (37300, 37351), True, 'import matplotlib.pyplot as plt\n'), ((38945, 38976), 'matplotlib.pyplot.title', 'plt.title', (['"""Prop-Rotor Network"""'], {}), "('Prop-Rotor Network')\n", (38954, 38976), True, 'import matplotlib.pyplot as plt\n'), ((39018, 39038), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(1)'], {}), '(2, 3, 1)\n', (39029, 39038), True, 'import matplotlib.pyplot as plt\n'), ((39226, 39246), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(2)'], {}), '(2, 3, 2)\n', (39237, 39246), True, 'import matplotlib.pyplot as plt\n'), ((39442, 39462), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(3)'], {}), '(2, 3, 3)\n', (39453, 39462), True, 'import matplotlib.pyplot as plt\n'), ((39660, 39680), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (39671, 39680), True, 'import matplotlib.pyplot as plt\n'), ((39946, 39962), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (39954, 39962), True, 'import matplotlib.pyplot as plt\n'), ((39982, 40002), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(5)'], {}), '(2, 3, 5)\n', (39993, 40002), True, 'import matplotlib.pyplot as plt\n'), ((40260, 40276), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (40268, 40276), True, 'import matplotlib.pyplot as plt\n'), ((40296, 40316), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(6)'], {}), '(2, 3, 6)\n', (40307, 40316), True, 'import matplotlib.pyplot as plt\n'), ((40610, 40654), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Propulsor_Network' + file_type)"], {}), "('Propulsor_Network' + file_type)\n", (40621, 40654), True, 'import matplotlib.pyplot as plt\n'), ((41604, 41627), 'matplotlib.pyplot.title', 'plt.title', (['"""Lift Rotor"""'], {}), "('Lift Rotor')\n", (41613, 41627), True, 'import matplotlib.pyplot as plt\n'), ((41669, 41689), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(1)'], {}), '(2, 3, 1)\n', (41680, 41689), True, 'import matplotlib.pyplot as plt\n'), ((41820, 41840), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(2)'], {}), '(2, 3, 2)\n', (41831, 41840), True, 'import matplotlib.pyplot as plt\n'), ((41976, 41996), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(3)'], {}), '(2, 3, 3)\n', (41987, 41996), True, 'import matplotlib.pyplot as plt\n'), ((42135, 42155), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (42146, 42155), True, 'import matplotlib.pyplot as plt\n'), ((42393, 42409), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (42401, 42409), True, 'import matplotlib.pyplot as plt\n'), ((42429, 42449), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(5)'], {}), '(2, 3, 5)\n', (42440, 42449), True, 'import matplotlib.pyplot as plt\n'), ((42641, 42657), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (42649, 42657), True, 'import matplotlib.pyplot as plt\n'), ((42679, 42699), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(6)'], {}), '(2, 3, 6)\n', (42690, 42699), True, 'import matplotlib.pyplot as plt\n'), ((42946, 42983), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Lift_Rotor' + file_type)"], {}), "('Lift_Rotor' + file_type)\n", (42957, 42983), True, 'import matplotlib.pyplot as plt\n'), ((43921, 43943), 'matplotlib.pyplot.title', 'plt.title', (['"""Propeller"""'], {}), "('Propeller')\n", (43930, 43943), True, 'import matplotlib.pyplot as plt\n'), ((43989, 44009), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(1)'], {}), '(2, 3, 1)\n', (44000, 44009), True, 'import matplotlib.pyplot as plt\n'), ((44129, 44149), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(2)'], {}), '(2, 3, 2)\n', (44140, 44149), True, 'import matplotlib.pyplot as plt\n'), ((44285, 44305), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(3)'], {}), '(2, 3, 3)\n', (44296, 44305), True, 'import matplotlib.pyplot as plt\n'), ((44444, 44464), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (44455, 44464), True, 'import matplotlib.pyplot as plt\n'), ((44674, 44690), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (44682, 44690), True, 'import matplotlib.pyplot as plt\n'), ((44710, 44730), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(5)'], {}), '(2, 3, 5)\n', (44721, 44730), True, 'import matplotlib.pyplot as plt\n'), ((44934, 44954), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(6)'], {}), '(2, 3, 6)\n', (44945, 44954), True, 'import matplotlib.pyplot as plt\n'), ((45183, 45226), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Cruise_Propulsor' + file_type)"], {}), "('Cruise_Propulsor' + file_type)\n", (45194, 45226), True, 'import matplotlib.pyplot as plt\n'), ((45773, 45801), 'matplotlib.pyplot.title', 'plt.title', (['"""Tip Mach Number"""'], {}), "('Tip Mach Number')\n", (45782, 45801), True, 'import matplotlib.pyplot as plt\n'), ((45851, 45871), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (45862, 45871), True, 'import matplotlib.pyplot as plt\n'), ((46295, 46330), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Tip_Mach' + file_type)"], {}), "('Tip_Mach' + file_type)\n", (46306, 46330), True, 'import matplotlib.pyplot as plt\n'), ((60878, 60903), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N_gm_y'], {}), '(0, 1, N_gm_y)\n', (60889, 60903), True, 'import numpy as np\n'), ((61498, 61517), 'numpy.max', 'np.max', (['SPL'], {'axis': '(0)'}), '(SPL, axis=0)\n', (61504, 61517), True, 'import numpy as np\n'), ((61886, 61921), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_filename + '.png')"], {}), "(save_filename + '.png')\n", (61897, 61921), True, 'import matplotlib.pyplot as plt\n'), ((66999, 67021), 'numpy.min', 'np.min', (['SPL_contour_bm'], {}), '(SPL_contour_bm)\n', (67005, 67021), True, 'import numpy as np\n'), ((67045, 67067), 'numpy.max', 'np.max', (['SPL_contour_bm'], {}), '(SPL_contour_bm)\n', (67051, 67067), True, 'import numpy as np\n'), ((67093, 67124), 'numpy.minimum', 'np.minimum', (['min_bm_SPL', 'min_SPL'], {}), '(min_bm_SPL, min_SPL)\n', (67103, 67124), True, 'import numpy as np\n'), ((67146, 67177), 'numpy.maximum', 'np.maximum', (['max_bm_SPL', 'max_SPL'], {}), '(max_bm_SPL, max_SPL)\n', (67156, 67177), True, 'import numpy as np\n'), ((67261, 67291), 'numpy.max', 'np.max', (['SPL_contour_bm'], {'axis': '(0)'}), '(SPL_contour_bm, axis=0)\n', (67267, 67291), True, 'import numpy as np\n'), ((3390, 3414), 'numpy.sqrt', 'np.sqrt', (['(density / 1.225)'], {}), '(density / 1.225)\n', (3397, 3414), True, 'import numpy as np\n'), ((13727, 13754), 'numpy.max', 'np.max', (['pack_battery_amp_hr'], {}), '(pack_battery_amp_hr)\n', (13733, 13754), True, 'import numpy as np\n'), ((18053, 18080), 'numpy.max', 'np.max', (['cell_battery_amp_hr'], {}), '(cell_battery_amp_hr)\n', (18059, 18080), True, 'import numpy as np\n'), ((47228, 47256), 'numpy.cumsum', 'np.cumsum', (['(VD.n_sw * VD.n_cw)'], {}), '(VD.n_sw * VD.n_cw)\n', (47237, 47256), True, 'import numpy as np\n'), ((47995, 48007), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (48005, 48007), True, 'import matplotlib.pyplot as plt\n'), ((48034, 48054), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (48045, 48054), True, 'import matplotlib.pyplot as plt\n'), ((49451, 49466), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (49459, 49466), True, 'import matplotlib.pyplot as plt\n'), ((49480, 49494), 'matplotlib.pyplot.grid', 'plt.grid', (['None'], {}), '(None)\n', (49488, 49494), True, 'import matplotlib.pyplot as plt\n'), ((50478, 50496), 'numpy.cumsum', 'np.cumsum', (['VD.n_sw'], {}), '(VD.n_sw)\n', (50487, 50496), True, 'import numpy as np\n'), ((50897, 50909), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (50907, 50909), True, 'import matplotlib.pyplot as plt\n'), ((50973, 50993), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (50984, 50993), True, 'import matplotlib.pyplot as plt\n'), ((52564, 52592), 'numpy.cumsum', 'np.cumsum', (['(VD.n_sw * VD.n_cw)'], {}), '(VD.n_sw * VD.n_cw)\n', (52573, 52592), True, 'import numpy as np\n'), ((53357, 53392), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'constrained_layout': '(True)'}), '(constrained_layout=True)\n', (53367, 53392), True, 'import matplotlib.pyplot as plt\n'), ((53518, 53541), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[:, :-1]'], {}), '(gs[:, :-1])\n', (53529, 53541), True, 'import matplotlib.pyplot as plt\n'), ((54848, 54888), 'matplotlib.ticker.ScalarFormatter', 'ticker.ScalarFormatter', ([], {'useMathText': '(True)'}), '(useMathText=True)\n', (54870, 54888), True, 'import matplotlib.ticker as ticker\n'), ((54909, 54942), 'matplotlib.ticker.FormatStrFormatter', 'ticker.FormatStrFormatter', (['"""%.3f"""'], {}), "('%.3f')\n", (54934, 54942), True, 'import matplotlib.ticker as ticker\n'), ((55588, 55603), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (55596, 55603), True, 'import matplotlib.pyplot as plt\n'), ((55617, 55631), 'matplotlib.pyplot.grid', 'plt.grid', (['None'], {}), '(None)\n', (55625, 55631), True, 'import matplotlib.pyplot as plt\n'), ((48848, 48873), 'numpy.linspace', 'np.linspace', (['(0.001)', '(1)', '(50)'], {}), '(0.001, 1, 50)\n', (48859, 48873), True, 'import numpy as np\n'), ((49070, 49092), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""jet"""'], {}), "('jet')\n", (49085, 49092), True, 'import matplotlib.pyplot as plt\n'), ((54340, 54365), 'numpy.linspace', 'np.linspace', (['(0.001)', '(1)', '(50)'], {}), '(0.001, 1, 50)\n', (54351, 54365), True, 'import numpy as np\n'), ((54562, 54584), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""jet"""'], {}), "('jet')\n", (54577, 54584), True, 'import matplotlib.pyplot as plt\n'), ((55712, 55734), 'numpy.empty', 'np.empty', ([], {'shape': '[0, 1]'}), '(shape=[0, 1])\n', (55720, 55734), True, 'import numpy as np\n'), ((55767, 55789), 'numpy.empty', 'np.empty', ([], {'shape': '[0, 1]'}), '(shape=[0, 1])\n', (55775, 55789), True, 'import numpy as np\n'), ((55822, 55844), 'numpy.empty', 'np.empty', ([], {'shape': '[0, 1]'}), '(shape=[0, 1])\n', (55830, 55844), True, 'import numpy as np\n'), ((55877, 55899), 'numpy.empty', 'np.empty', ([], {'shape': '[0, 1]'}), '(shape=[0, 1])\n', (55885, 55899), True, 'import numpy as np\n'), ((55932, 55954), 'numpy.empty', 'np.empty', ([], {'shape': '[0, 1]'}), '(shape=[0, 1])\n', (55940, 55954), True, 'import numpy as np\n'), ((55987, 56009), 'numpy.empty', 'np.empty', ([], {'shape': '[0, 1]'}), '(shape=[0, 1])\n', (55995, 56009), True, 'import numpy as np\n'), ((57862, 57886), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0:1, -1]'], {}), '(gs[0:1, -1])\n', (57873, 57886), True, 'import matplotlib.pyplot as plt\n'), ((58205, 58229), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1:2, -1]'], {}), '(gs[1:2, -1])\n', (58216, 58229), True, 'import matplotlib.pyplot as plt\n'), ((58556, 58580), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2:3, -1]'], {}), '(gs[2:3, -1])\n', (58567, 58580), True, 'import matplotlib.pyplot as plt\n'), ((58883, 58907), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[3:4, -1]'], {}), '(gs[3:4, -1])\n', (58894, 58907), True, 'import matplotlib.pyplot as plt\n'), ((68430, 68449), 'numpy.ones', 'np.ones', (['(N_y, N_z)'], {}), '((N_y, N_z))\n', (68437, 68449), True, 'import numpy as np\n'), ((69804, 69823), 'numpy.ones', 'np.ones', (['(N_y, N_z)'], {}), '((N_y, N_z))\n', (69811, 69823), True, 'import numpy as np\n'), ((70607, 70626), 'numpy.ones', 'np.ones', (['(N_x, N_z)'], {}), '((N_x, N_z))\n', (70614, 70626), True, 'import numpy as np\n'), ((71383, 71402), 'numpy.ones', 'np.ones', (['(N_x, N_y)'], {}), '((N_x, N_y))\n', (71390, 71402), True, 'import numpy as np\n'), ((48910, 48936), 'numpy.sin', 'np.sin', (['(np.pi / 2 * points)'], {}), '(np.pi / 2 * points)\n', (48916, 48936), True, 'import numpy as np\n'), ((54402, 54428), 'numpy.sin', 'np.sin', (['(np.pi / 2 * points)'], {}), '(np.pi / 2 * points)\n', (54408, 54428), True, 'import numpy as np\n'), ((57400, 57433), 'numpy.append', 'np.append', (['time_vec', 't_vals[:, 0]'], {}), '(time_vec, t_vals[:, 0])\n', (57409, 57433), True, 'import numpy as np\n'), ((57474, 57506), 'numpy.append', 'np.append', (['cl_vec', 'cl_vals[:, 0]'], {}), '(cl_vec, cl_vals[:, 0])\n', (57483, 57506), True, 'import numpy as np\n'), ((57549, 57581), 'numpy.append', 'np.append', (['cd_vec', 'cd_vals[:, 0]'], {}), '(cd_vec, cd_vals[:, 0])\n', (57558, 57581), True, 'import numpy as np\n'), ((57624, 57658), 'numpy.append', 'np.append', (['l_d_vec', 'l_d_vals[:, 0]'], {}), '(l_d_vec, l_d_vals[:, 0])\n', (57633, 57658), True, 'import numpy as np\n'), ((57701, 57740), 'numpy.append', 'np.append', (['altitude_vec', 'alt_vals[:, 0]'], {}), '(altitude_vec, alt_vals[:, 0])\n', (57710, 57740), True, 'import numpy as np\n'), ((57777, 57810), 'numpy.append', 'np.append', (['mass_vec', 'm_vals[:, 0]'], {}), '(mass_vec, m_vals[:, 0])\n', (57786, 57810), True, 'import numpy as np\n'), ((66946, 66968), 'numpy.array', 'np.array', (['building_loc'], {}), '(building_loc)\n', (66954, 66968), True, 'import numpy as np\n'), ((69202, 69221), 'numpy.ones', 'np.ones', (['(N_x, N_z)'], {}), '((N_x, N_z))\n', (69209, 69221), True, 'import numpy as np\n'), ((48462, 48505), 'numpy.atleast_2d', 'np.atleast_2d', (['VD.XC[b_pts[i]:b_pts[i + 1]]'], {}), '(VD.XC[b_pts[i]:b_pts[i + 1]])\n', (48475, 48505), True, 'import numpy as np\n'), ((48560, 48603), 'numpy.atleast_2d', 'np.atleast_2d', (['VD.YC[b_pts[i]:b_pts[i + 1]]'], {}), '(VD.YC[b_pts[i]:b_pts[i + 1]])\n', (48573, 48603), True, 'import numpy as np\n'), ((48658, 48698), 'numpy.atleast_2d', 'np.atleast_2d', (['CP[b_pts[i]:b_pts[i + 1]]'], {}), '(CP[b_pts[i]:b_pts[i + 1]])\n', (48671, 48698), True, 'import numpy as np\n'), ((53951, 53994), 'numpy.atleast_2d', 'np.atleast_2d', (['VD.XC[b_pts[i]:b_pts[i + 1]]'], {}), '(VD.XC[b_pts[i]:b_pts[i + 1]])\n', (53964, 53994), True, 'import numpy as np\n'), ((54049, 54092), 'numpy.atleast_2d', 'np.atleast_2d', (['VD.YC[b_pts[i]:b_pts[i + 1]]'], {}), '(VD.YC[b_pts[i]:b_pts[i + 1]])\n', (54062, 54092), True, 'import numpy as np\n'), ((54147, 54187), 'numpy.atleast_2d', 'np.atleast_2d', (['CP[b_pts[i]:b_pts[i + 1]]'], {}), '(CP[b_pts[i]:b_pts[i + 1]])\n', (54160, 54187), True, 'import numpy as np\n'), ((55291, 55337), 'numpy.atleast_2d', 'np.atleast_2d', (['VD.X[j * n_pts:(j + 1) * n_pts]'], {}), '(VD.X[j * n_pts:(j + 1) * n_pts])\n', (55304, 55337), True, 'import numpy as np\n'), ((55392, 55438), 'numpy.atleast_2d', 'np.atleast_2d', (['VD.Y[j * n_pts:(j + 1) * n_pts]'], {}), '(VD.Y[j * n_pts:(j + 1) * n_pts])\n', (55405, 55438), True, 'import numpy as np\n'), ((55493, 55539), 'numpy.atleast_2d', 'np.atleast_2d', (['VD.Z[j * n_pts:(j + 1) * n_pts]'], {}), '(VD.Z[j * n_pts:(j + 1) * n_pts])\n', (55506, 55539), True, 'import numpy as np\n'), ((48964, 48997), 'numpy.concatenate', 'np.concatenate', (['[-A[::-1], A[1:]]'], {}), '([-A[::-1], A[1:]])\n', (48978, 48997), True, 'import numpy as np\n'), ((54456, 54489), 'numpy.concatenate', 'np.concatenate', (['[-A[::-1], A[1:]]'], {}), '([-A[::-1], A[1:]])\n', (54470, 54489), True, 'import numpy as np\n')] |
import torch
import numpy as np
# check if CUDA is available
train_on_gpu = torch.cuda.is_available()
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
import torchvision
import torchvision.transforms as transforms
batch_size=1024
transform=transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
full_dataset = torchvision.datasets.CIFAR10(
root='./data',
train=True,
download=True,
transform=transform)
train_length = int(full_dataset.__len__()*0.8)
val_length = full_dataset.__len__() - train_length
train_dataset, val_dataset = torch.utils.data.random_split(full_dataset, [train_length, val_length])
test_dataset = torchvision.datasets.CIFAR10(
root='./data',
train=False,
download=True,
transform=transform)
trainloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0)
valloader = torch.utils.data.DataLoader(
val_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0)
testloader = torch.utils.data.DataLoader(
test_dataset,
batch_size=1,
shuffle=False,
num_workers=0)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self, hidden_size=None):
super(Net, self).__init__()
self.model = nn.Sequential()
for i in range(len(hidden_size)-1):
self.model.add_module(name='lin{}'.format(i + 1), module=nn.Linear(in_features=hidden_size[i], out_features=hidden_size[i + 1], bias=True))
if i == len(hidden_size) - 2:
self.model.add_module(name='sft', module=nn.Softmax(dim=-1))
else:
self.model.add_module(name='tanh{}'.format(i+1), module=nn.Tanh())
def forward(self, x):
return self.model(x)
model = Net([3072, 1000, 1000, 1000, 10])
print(model)
import torch.optim as optim
criterion = nn.NLLLoss()
optimizer = optim.Adam(params=model.parameters(), lr=0.001)
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = nn.DataParallel(model)
for epoch in range(30):
train_loss = 0.0
train_acc = 0.0
train_correct_count = 0
model.train()
for i, data in enumerate(trainloader, 0):
inputs, labels = data
inputs = inputs.to(device)
inputs=inputs.view([inputs.shape[0],3072])
labels=labels.to(device)
optimizer.zero_grad()
outputs = model(inputs)
train_correct_count += (torch.argmax(outputs,dim=1)==labels).sum().cpu().item()
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
train_loss += loss.cpu().item()
train_loss=train_loss/(i+1)
train_acc=train_correct_count/(i+1)/batch_size
val_loss = 0.0
val_acc = 0.0
val_correct_count = 0
model.eval()
for i, data in enumerate(valloader, 0):
inputs, labels = data
inputs = inputs.to(device)
inputs=inputs.view([inputs.shape[0],3072])
labels=labels.to(device)
outputs = model(inputs)
val_correct_count += (torch.argmax(outputs,dim=1)==labels).sum().cpu().item()
loss = criterion(outputs, labels)
val_loss += loss.cpu().item()
val_loss=val_loss/(i+1)
val_acc=val_correct_count/(i+1)/batch_size
print('Epoch %d|Train_loss:%.3f Eval_loss:%.3f Train_acc:%.3f Eval_acc:%.3f' % (epoch + 1, train_loss, val_loss, train_acc, val_acc))
class_count=np.zeros(10,dtype=int)
correct_count=np.zeros(10,dtype=int)
model.eval()
for i, data in enumerate(testloader, 0):
inputs, labels = data
inputs = inputs.to(device)
inputs=inputs.view([inputs.shape[0],3072])
outputs = torch.argmax(model(inputs),dim=1).cpu().item()
class_count[labels]+=1
if outputs==labels:
correct_count[labels]+=1
for i in range(10):
print('Test|Class{}('.format(i+1)+classes[i]+')-acc={}'.format(correct_count[i]/class_count[i]))
print('\nTest|Overall-acc={}'.format(np.sum(correct_count/10000))) | [
"torch.nn.Tanh",
"torch.nn.Softmax",
"torch.utils.data.random_split",
"torch.nn.Sequential",
"torch.nn.DataParallel",
"numpy.sum",
"torchvision.datasets.CIFAR10",
"torch.cuda.is_available",
"torch.nn.NLLLoss",
"numpy.zeros",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
... | [((81, 106), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (104, 106), False, 'import torch\n'), ((567, 662), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform'}), "(root='./data', train=True, download=True,\n transform=transform)\n", (595, 662), False, 'import torchvision\n'), ((810, 881), 'torch.utils.data.random_split', 'torch.utils.data.random_split', (['full_dataset', '[train_length, val_length]'], {}), '(full_dataset, [train_length, val_length])\n', (839, 881), False, 'import torch\n'), ((898, 994), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform'}), "(root='./data', train=False, download=True,\n transform=transform)\n", (926, 994), False, 'import torchvision\n'), ((1029, 1128), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(0)'}), '(train_dataset, batch_size=batch_size, shuffle=\n True, num_workers=0)\n', (1056, 1128), False, 'import torch\n'), ((1158, 1255), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(0)'}), '(val_dataset, batch_size=batch_size, shuffle=\n True, num_workers=0)\n', (1185, 1255), False, 'import torch\n'), ((1286, 1375), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(0)'}), '(test_dataset, batch_size=1, shuffle=False,\n num_workers=0)\n', (1313, 1375), False, 'import torch\n'), ((2251, 2263), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (2261, 2263), True, 'import torch.nn as nn\n'), ((2405, 2427), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (2420, 2427), True, 'import torch.nn as nn\n'), ((3844, 3867), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'int'}), '(10, dtype=int)\n', (3852, 3867), True, 'import numpy as np\n'), ((3882, 3905), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'int'}), '(10, dtype=int)\n', (3890, 3905), True, 'import numpy as np\n'), ((138, 163), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (161, 163), False, 'import torch\n'), ((442, 463), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (461, 463), True, 'import torchvision.transforms as transforms\n'), ((471, 546), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (491, 546), True, 'import torchvision.transforms as transforms\n'), ((1649, 1664), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (1662, 1664), True, 'import torch.nn as nn\n'), ((2358, 2383), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2381, 2383), False, 'import torch\n'), ((4378, 4407), 'numpy.sum', 'np.sum', (['(correct_count / 10000)'], {}), '(correct_count / 10000)\n', (4384, 4407), True, 'import numpy as np\n'), ((1780, 1866), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'hidden_size[i]', 'out_features': 'hidden_size[i + 1]', 'bias': '(True)'}), '(in_features=hidden_size[i], out_features=hidden_size[i + 1], bias\n =True)\n', (1789, 1866), True, 'import torch.nn as nn\n'), ((1964, 1982), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (1974, 1982), True, 'import torch.nn as nn\n'), ((2076, 2085), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (2083, 2085), True, 'import torch.nn as nn\n'), ((2845, 2873), 'torch.argmax', 'torch.argmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (2857, 2873), False, 'import torch\n'), ((3473, 3501), 'torch.argmax', 'torch.argmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (3485, 3501), False, 'import torch\n')] |
import sympy
from cached_property import cached_property
from devito import Dimension
from devito.types import SparseTimeFunction
from devito.logger import error
import numpy as np
__all__ = ['PointSource', 'Receiver', 'Shot', 'RickerSource', 'GaborSource', 'TimeAxis']
class TimeAxis(object):
"""
Data object to store the TimeAxis. Exactly three of the four key arguments
must be prescribed. Because of remainder values it is not possible to create
a TimeAxis that exactly adhears to the inputs therefore start, stop, step
and num values should be taken from the TimeAxis object rather than relying
upon the input values.
The four possible cases are:
* start is None: start = step*(1 - num) + stop
* step is None: step = (stop - start)/(num - 1)
* num is None: num = ceil((stop - start + step)/step) and
because of remainder stop = step*(num - 1) + start
* stop is None: stop = step*(num - 1) + start
Parameters
----------
start : float, optional
Start of time axis.
step : float, optional
Time interval.
num : int, optional
Number of values (Note: this is the number of intervals + 1).
Stop value is reset to correct for remainder.
stop : float, optional
End time.
"""
def __init__(self, start=None, step=None, num=None, stop=None):
try:
if start is None:
start = step*(1 - num) + stop
elif step is None:
step = (stop - start)/(num - 1)
elif num is None:
num = int(np.ceil((stop - start + step)/step))
stop = step*(num - 1) + start
elif stop is None:
stop = step*(num - 1) + start
else:
raise ValueError("Only three of start, step, num and stop may be set")
except:
raise ValueError("Three of args start, step, num and stop may be set")
if not isinstance(num, int):
raise TypeError("input argument must be of type int")
self.start = start
self.stop = stop
self.step = step
self.num = num
def __str__(self):
return "TimeAxis: start=%g, stop=%g, step=%g, num=%g" % \
(self.start, self.stop, self.step, self.num)
def _rebuild(self):
return TimeAxis(start=self.start, stop=self.stop, num=self.num)
@cached_property
def time_values(self):
return np.linspace(self.start, self.stop, self.num)
class PointSource(SparseTimeFunction):
"""
Symbolic data object for a set of sparse point sources
Parameters
----------
name: String
Name of the symbol representing this source
grid: Grid
Grid object defining the computational domain.
coordinates: Array
Point coordinates for this source
data: (Optional) Data
values to initialise point data
ntime: Int (Optional)
Number of timesteps for which to allocate data
npoint: Int (Optional)
Number of sparse points represented by this source
dimension: Dimension (Optional)
object for representing the number of points in this source
Note, either the dimensions `ntime` and `npoint` or the fully
initialised `data` array need to be provided.
"""
def __new__(cls, *args, **kwargs):
options = kwargs.get('options', {})
key = cls
obj = cls._cache_get(key)
if obj is not None:
newobj = sympy.Function.__new__(cls, *args, **options)
newobj.__init_cached__(key)
return newobj
p_dim = kwargs.get('dimension', Dimension('p_%s' % kwargs.get("name")))
npoint = kwargs.get("npoint")
coords = kwargs.get("coordinates")
if npoint is None:
if coords is None:
raise TypeError("Need either `npoint` or `coordinates`")
else:
npoint = coords.shape[0]
grid = kwargs.get("grid")
ntime = kwargs.get("ntime")
if kwargs.get("data") is None:
if ntime is None:
error('Either data or ntime are required to'
'initialise source/receiver objects')
else:
ntime = kwargs.get("ntime") or kwargs.get("data").shape[0]
# Create the underlying SparseTimeFunction object
kwargs["nt"] = ntime
kwargs['npoint'] = npoint
obj = SparseTimeFunction.__new__(cls, dimensions=[grid.time_dim, p_dim], **kwargs)
# If provided, copy initial data into the allocated buffer
if kwargs.get("data") is not None:
obj.data[:] = kwargs.get("data")
return obj
Receiver = PointSource
Shot = PointSource
class WaveletSource(PointSource):
"""
Abstract base class for symbolic objects that encapsulate a set of
sources with a pre-defined source signal wavelet.
name: Name for the resulting symbol
grid: :class:`Grid` object defining the computational domain.
f0: Peak frequency for Ricker wavelet in kHz
time: Discretized values of time in ms
"""
def __new__(cls, *args, **kwargs):
options = kwargs.get('options', {})
key = cls
obj = cls._cache_get(key)
if obj is not None:
newobj = sympy.Function.__new__(cls, *args, **options)
newobj.__init_cached__(key)
return newobj
time = kwargs.get('time')
npoint = kwargs.get('npoint', 1)
kwargs['ntime'] = len(time)
kwargs['npoint'] = npoint
obj = PointSource.__new__(cls, *args, **kwargs)
obj.time = time
obj.f0 = kwargs.get('f0')
for p in range(npoint):
obj.data[:, p] = obj.wavelet(obj.f0, obj.time)
return obj
def wavelet(self, f0, t):
"""
Defines a wavelet with a peak frequency f0 at time t.
f0: Peak frequency in kHz
t: Discretized values of time in ms
"""
raise NotImplementedError('Wavelet not defined')
class RickerSource(WaveletSource):
"""
Symbolic object that encapsulate a set of sources with a
pre-defined Ricker wavelet:
http://subsurfwiki.org/wiki/Ricker_wavelet
name: Name for the resulting symbol
grid: :class:`Grid` object defining the computational domain.
f0: Peak frequency for Ricker wavelet in kHz
time: Discretized values of time in ms
"""
def wavelet(self, f0, t):
"""
Defines a Ricker wavelet with a peak frequency f0 at time t.
f0: Peak frequency in kHz
t: Discretized values of time in ms
"""
r = (np.pi * f0 * (t - 1./f0))
return (1-2.*r**2)*np.exp(-r**2)
class GaborSource(WaveletSource):
"""
Symbolic object that encapsulate a set of sources with a
pre-defined Gabor wavelet:
https://en.wikipedia.org/wiki/Gabor_wavelet
name: Name for the resulting symbol
grid: :class:`Grid` object defining the computational domain.
f0: Peak frequency for Ricker wavelet in kHz
time: Discretized values of time in ms
"""
def wavelet(self, f0, t):
"""
Defines a Gabor wavelet with a peak frequency f0 at time t.
f0: Peak frequency in kHz
t: Discretized values of time in ms
"""
agauss = 0.5 * f0
tcut = 1.5 / agauss
s = (t-tcut) * agauss
return np.exp(-2*s**2) * np.cos(2 * np.pi * s)
| [
"numpy.ceil",
"sympy.Function.__new__",
"devito.logger.error",
"numpy.exp",
"numpy.linspace",
"numpy.cos",
"devito.types.SparseTimeFunction.__new__"
] | [((2466, 2510), 'numpy.linspace', 'np.linspace', (['self.start', 'self.stop', 'self.num'], {}), '(self.start, self.stop, self.num)\n', (2477, 2510), True, 'import numpy as np\n'), ((4445, 4521), 'devito.types.SparseTimeFunction.__new__', 'SparseTimeFunction.__new__', (['cls'], {'dimensions': '[grid.time_dim, p_dim]'}), '(cls, dimensions=[grid.time_dim, p_dim], **kwargs)\n', (4471, 4521), False, 'from devito.types import SparseTimeFunction\n'), ((3499, 3544), 'sympy.Function.__new__', 'sympy.Function.__new__', (['cls', '*args'], {}), '(cls, *args, **options)\n', (3521, 3544), False, 'import sympy\n'), ((5304, 5349), 'sympy.Function.__new__', 'sympy.Function.__new__', (['cls', '*args'], {}), '(cls, *args, **options)\n', (5326, 5349), False, 'import sympy\n'), ((6698, 6713), 'numpy.exp', 'np.exp', (['(-r ** 2)'], {}), '(-r ** 2)\n', (6704, 6713), True, 'import numpy as np\n'), ((7402, 7421), 'numpy.exp', 'np.exp', (['(-2 * s ** 2)'], {}), '(-2 * s ** 2)\n', (7408, 7421), True, 'import numpy as np\n'), ((7420, 7441), 'numpy.cos', 'np.cos', (['(2 * np.pi * s)'], {}), '(2 * np.pi * s)\n', (7426, 7441), True, 'import numpy as np\n'), ((4119, 4198), 'devito.logger.error', 'error', (['"""Either data or ntime are required toinitialise source/receiver objects"""'], {}), "('Either data or ntime are required toinitialise source/receiver objects')\n", (4124, 4198), False, 'from devito.logger import error\n'), ((1586, 1623), 'numpy.ceil', 'np.ceil', (['((stop - start + step) / step)'], {}), '((stop - start + step) / step)\n', (1593, 1623), True, 'import numpy as np\n')] |
import numpy as np
#I'm dumb, so I'm reducing the problem to 2D so I can see what's happening
##Make fake data
# an N x 5 array containing a regular mesh representing the stimulus params
stim_params=np.mgrid[10:25,20:22].reshape(2,-1).T
# an N x 3 array representing the output values for each simulation run
stimnum=15*2
output_vals=np.arange(stimnum*3).reshape(stimnum,3)
# shuffle the rows for a bit of added realism
shuf=np.random.permutation(stim_params.shape[0])
stim_params=stim_params[shuf]
output_vals=output_vals[shuf]
##Now we have to arrays, one with the independent variables (stim_params) that
##will represent T and mu values, the other (output_vals) is all the measurements
##you made.
##You can use lexical sort to get the indices of the independent variables in the
##right order, then apply the same indexes to the measurement array
# get the number of unique values for each stimulus parameter
#Due to float precision, you might have to round to the nearest decimal via round
params_shape=tuple(np.unique(col).shape[0] for col in stim_params.T)
# get the set of row indices that will sort the stimulus parameters in ascending
# order, starting with the final column
indx=np.lexsort(stim_params[:,::-1].T)
# sort and reshape the stimulus parameters:
sorted_params=stim_params[indx].T.reshape((2,)+params_shape)
# sort and reshape the output values
sorted_output=output_vals[indx].T.reshape((3,)+params_shape)
###What do the dimensions mean?
## array of stimulus parameters, with dimensions (n_params, p1, p2, p3, p4, p5)
#print(sorted_params.shape)
#
## to check that the sorting worked as expected, we can look at the values of the
## 5th parameter when all the others are held constant at 0:
#print(sorted_params[4,0,0,0,0,:])
#
## ... and the 1st parameter when we hold all the others constant:
#print(sorted_params[0,:,0,0,0,0])
#
## ... now let the 1st and 2nd parameters covary:
#print(sorted_params[:2, :, :, 0, 0, 0])
#
###The same indexing logic applies to the sorted simulation outputs:
## array of outputs, with dimensions (n_outputs, p1, p2, p3, p4, p5)
#print(sorted_output.shape)
#
## the first output variable whilst holding the first 4 simulation parameters
## constant at 0:
#print(sorted_output[0, 0, 0, 0, 0, :])
| [
"numpy.lexsort",
"numpy.unique",
"numpy.arange",
"numpy.random.permutation"
] | [((429, 472), 'numpy.random.permutation', 'np.random.permutation', (['stim_params.shape[0]'], {}), '(stim_params.shape[0])\n', (450, 472), True, 'import numpy as np\n'), ((1200, 1234), 'numpy.lexsort', 'np.lexsort', (['stim_params[:, ::-1].T'], {}), '(stim_params[:, ::-1].T)\n', (1210, 1234), True, 'import numpy as np\n'), ((337, 359), 'numpy.arange', 'np.arange', (['(stimnum * 3)'], {}), '(stimnum * 3)\n', (346, 359), True, 'import numpy as np\n'), ((1023, 1037), 'numpy.unique', 'np.unique', (['col'], {}), '(col)\n', (1032, 1037), True, 'import numpy as np\n')] |
import numpy as np
from math import pi
from numpy import linalg as LA
def project_point(vector, point):
"""Given a line vector and a point, projects the point
on the line, resulting to a point that is closest to
the given point.
Args:
vector: A 2D array of points in the form [[x1, y1], [x2, y2]]
point: A 2D point in the form [x, y]
Returns:
closest_point: A 2D point in the form [x, y] that lies on
given vector.
"""
p0 = vector[0]
p1 = vector[1]
v1 = np.subtract(point, p0)
v2 = np.subtract(p1, p0)
distance = np.dot(v1, v2) / np.power(LA.norm(v2), 2)
if distance < 0.0:
closest_point = p0
elif distance > 1.0:
closest_point = p1
else:
closest_point = p0 + distance * v2
return closest_point
def next_carrot(vector, pose_2d, lookahead_dis):
"""Given a line vector, position and look-ahead distance,
determine the next carrot point.
Args:
vector: A 2D array of points in the form [[x1, y1], [x2, y2]]
pose_2d: A 2D point in the form [x, y]
lookahead_dis: A float distance determining how far ahead
we want to look.
Returns:
carrot: A 2D point in the form [x, y].
"""
p0 = vector[0]
p1 = vector[1]
projected_point = project_point(vector, pose_2d)
# Calculate unit vector of trajectory
vec_diff = np.subtract(p1, p0)
unit_vec = vec_diff / LA.norm(vec_diff)
carrot = projected_point + lookahead_dis * unit_vec
return carrot
def calculate_delta(position, carrot, delta_max):
"""Given a 2D position and carrot pose, determine the steering
angle delta.
This angle should be constrained by `delta_max`, determined based
on the model. For instance for a car, this will depend on the properties
of the car (for instance using Ackermann steering geometry you can
calculate the center of the turning circle).
Args:
position: A 2D array of points in the form [[x1, y1], [x2, y2]]
carrot: A 2D point in the form [x, y]
delta_max: A float distance determining how far ahead we want to look.
Returns:
delta: A float representing the steering angle in unit radians.
"""
theta = position[2]
# Calculate the angle between position and carrot
x = carrot[0] - position[0]
y = carrot[1] - position[1]
angle_of_vec = np.arctan2(y, x)
# Limit delta to pi and -pi
delta = -(theta - angle_of_vec)
delta = np.mod(delta + pi, 2 * pi) - pi
# Limit delta to steering angle max
if delta > delta_max:
delta = delta_max
elif delta < -delta_max:
delta = -delta_max
return delta
def update_waypoint_trajectory(waypoints, waypoint_counter):
"""Given a list of waypoints, and waypoint_counter, determine
the next set up waypoints.
Args:
waypoints: An array of waypoints in the format [wp1, wp2, ..., wpn]
where each wp is a 2D point in the form [x, y].
waypoint_counter: A counter representing a pointer to the current
waypoint. This should not exceed the total size of waypoint_counter.
Returns:
wp1 : First waypoint of the updated trajectory.
wp2: Second waypoint of the updated trajectory.
update_trajectory: A flag to determine whether we should continue.
"""
update_trajectory = True
if waypoint_counter >= len(waypoints):
print('Ran out of waypoints.')
update_trajectory = False
wp1 = wp2 = None
elif waypoint_counter == len(waypoints) - 1:
# Grab the last waypoint and the initial to get back
# to the starting point
wp1 = waypoints[waypoint_counter]
wp2 = waypoints[0]
else:
wp1 = waypoints[waypoint_counter]
wp2 = waypoints[waypoint_counter + 1]
return wp1, wp2, update_trajectory
def calculate_distance(point1, point2):
"""Given two 2D points, calculate the distance.
Args:
point1: A 2D array in the form [x, y]
point2: A 2D array in the form [x, y]
Returns:
distance: A float representing the distance between
the points.
"""
distance = np.sqrt(np.power((point2[1] - point1[1]), 2) +
np.power((point2[0] - point1[0]), 2))
return distance
| [
"numpy.power",
"numpy.subtract",
"numpy.dot",
"numpy.arctan2",
"numpy.linalg.norm",
"numpy.mod"
] | [((533, 555), 'numpy.subtract', 'np.subtract', (['point', 'p0'], {}), '(point, p0)\n', (544, 555), True, 'import numpy as np\n'), ((565, 584), 'numpy.subtract', 'np.subtract', (['p1', 'p0'], {}), '(p1, p0)\n', (576, 584), True, 'import numpy as np\n'), ((1417, 1436), 'numpy.subtract', 'np.subtract', (['p1', 'p0'], {}), '(p1, p0)\n', (1428, 1436), True, 'import numpy as np\n'), ((2424, 2440), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (2434, 2440), True, 'import numpy as np\n'), ((601, 615), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (607, 615), True, 'import numpy as np\n'), ((1463, 1480), 'numpy.linalg.norm', 'LA.norm', (['vec_diff'], {}), '(vec_diff)\n', (1470, 1480), True, 'from numpy import linalg as LA\n'), ((2522, 2548), 'numpy.mod', 'np.mod', (['(delta + pi)', '(2 * pi)'], {}), '(delta + pi, 2 * pi)\n', (2528, 2548), True, 'import numpy as np\n'), ((627, 638), 'numpy.linalg.norm', 'LA.norm', (['v2'], {}), '(v2)\n', (634, 638), True, 'from numpy import linalg as LA\n'), ((4241, 4275), 'numpy.power', 'np.power', (['(point2[1] - point1[1])', '(2)'], {}), '(point2[1] - point1[1], 2)\n', (4249, 4275), True, 'import numpy as np\n'), ((4303, 4337), 'numpy.power', 'np.power', (['(point2[0] - point1[0])', '(2)'], {}), '(point2[0] - point1[0], 2)\n', (4311, 4337), True, 'import numpy as np\n')] |
import numpy as np
"""
Q:
Write a binomial tree program to calculate the put prices of Bermuda options.
For such options, early exercise is allowed only on specific dates.
Inputs:
S (stock price)
X (strike price)
r (continuously compounded annual interest rate in percentage)
s (annual volatility in percentage)
T (time to maturity in days, which of course is also an exercise date)
E (set of early exercise dates from now)
m (number of periods per day for the tree)
Output:
11.2486
Long put:MAX{X-ST,0}
X (Strike Price)
ST (represent the price of the underlying at maturity)
"""
class node:
def __init__(self,price):
self.price = price
self.value = 0
self.u_parent = None
self.d_parent = None
self.u_child = None
self.d_child = None
def price(S,X,r,s,T,E,m):
if T not in E:
E.append(T)
deltaT = (T/365)/(T*m)
R = np.exp(r*deltaT)
u = np.exp(s*np.sqrt(deltaT))
d = 1/u
p = (R - d) / (u-d) #Risk Neutral Probability
root = node(S)
BinomialTree = [[root]]
for period in range(T*m):
currentLevel = BinomialTree[-1]
childLevel = [node(currentLevel[0].price * u)]
#print(childLevel[0].price)
for currentNode in currentLevel:
currentNode.u_child = childLevel[-1]
currentNode.u_child.d_parent = currentNode
downChild = node(currentNode.price * d)
downChild.u_parent = currentNode
currentNode.d_child = downChild
childLevel.append(downChild)
BinomialTree.append(childLevel)
total = T * m
for levelNodes in BinomialTree[::-1]:
if int(total/m) in E:
for tree_node in levelNodes:
if tree_node.u_child != None:
binomialValue = ( ( 1/R ) * (p * tree_node.u_child.value + (1 - p) * tree_node.d_child.value))
else:
exerciseValue = max(0,X - tree_node.price)
binomialValue = exerciseValue
#print(exerciseValue)
exerciseValue = max(0,X - tree_node.price)
tree_node.value = max(binomialValue, exerciseValue)
else:
for tree_node in levelNodes:
binomialValue = ( ( 1/R ) * (p * tree_node.u_child.value + (1 - p) * tree_node.d_child.value))
tree_node.value = binomialValue
total -= 1
putPrice = root.value
return putPrice
if __name__ == "__main__":
S = float(input("輸入Stock Price ex:100\n"))
X = float(input("輸入Strike price ex:110\n"))
r = float(input("輸入Continuously compounded annual interest rate in percentage(%) ex:3\n"))/100
s = float(input("輸入Annual volatility in percentage(%) ex:30\n"))/100
T = int(input("輸入time to maturity in days,which of course is also an exercise date ex:60\n"))
Temp = input("輸入set of early exercise dates from now ex:10 20 30 40 50\n").split(' ')
E = []
for temp in Temp:
E.append(int(temp))
m = int(input("輸入number of periods per day for the tree ex:5\n"))
print("S= %f" %S)
print("X= %f" %X)
print("r= %f" %r)
print("s= %f" %s)
print("T= %d" %T)
print("E=",E)
print("m= %d\n" %m)
output = price(S,X,r,s,T,E,m)
print(output) | [
"numpy.exp",
"numpy.sqrt"
] | [((932, 950), 'numpy.exp', 'np.exp', (['(r * deltaT)'], {}), '(r * deltaT)\n', (938, 950), True, 'import numpy as np\n'), ((967, 982), 'numpy.sqrt', 'np.sqrt', (['deltaT'], {}), '(deltaT)\n', (974, 982), True, 'import numpy as np\n')] |
import itertools
from collections import defaultdict
import numpy as np
from scipy import stats
def max_accuracy(y_true, y_pred):
names_true, names_pred, max_result = list(set(y_true)), list(set(y_pred)), 0
for perm in itertools.permutations(names_pred):
acc = np.average([1. if names_true.index(ti) == perm.index(pi) else 0. for ti, pi in zip(y_true, y_pred)])
if acc > max_result:
max_result = acc
return max_result
def rand_index(y_true, y_pred):
good, all = 0, 0
for i in range(len(y_true)):
for j in range(i + 1, len(y_pred)):
if (y_true[i] == y_true[j]) == (y_pred[i] == y_pred[j]):
good += 1
all += 1
return good / all
def triplet_measure(y_true, D_pred):
good, all = 0, 0
for i in range(len(y_true)):
for j in range(i + 1, len(y_true)):
for k in range(j + 1, len(y_true)):
items_by_class = defaultdict(list)
for item in [i, j, k]:
items_by_class[y_true[item]].append(item)
if len(items_by_class.keys()) == 2: # seems to two items in one class
key1, key2 = items_by_class.keys()
if len(items_by_class[key1]) == 2:
same_class_pair, another_class_item = items_by_class[key1], items_by_class[key2][0]
else:
same_class_pair, another_class_item = items_by_class[key2], items_by_class[key1][0]
# check d(s1, s2) < d(s1, a) and d(s1, s2) < d(s2, a)
d_s1_s2 = D_pred[same_class_pair[0], same_class_pair[1]]
d_s1_a = D_pred[same_class_pair[0], another_class_item]
d_s2_a = D_pred[same_class_pair[1], another_class_item]
if d_s1_s2 < d_s1_a:
good += 1
if d_s1_s2 < d_s2_a:
good += 1
all += 2
return good / all
def ranking(measure1_ari, measure2_ari):
assert measure1_ari.shape == measure2_ari.shape
n = measure1_ari.shape[0]
# 1. генерируем ранги
measure1_rank = stats.rankdata(-measure1_ari)
measure2_rank = stats.rankdata(-measure2_ari)
# 2. Для каждой пары мер считаем сумму квадратов разностей
sum_sq_delta = np.sum(np.power(measure1_rank - measure2_rank, 2))
# 3. По формуле Спирмена считаем элементы матрицы корреляций
return 1 - (6 * sum_sq_delta) / ((n - 1) * n * (n + 1))
def copeland(results):
scores = defaultdict(lambda: 0)
for a, b in list(itertools.combinations(results, 2)):
if a[1] > b[1]:
scores[a[0]] += 1
scores[b[0]] -= 1
elif a[1] < b[1]:
scores[a[0]] -= 1
scores[b[0]] += 1
return scores
def _getMs(comms1, comms2):
if len(comms1) != len(comms2):
raise ValueError
l = len(comms1)
m1 = max(comms1) + 1
m2 = max(comms2) + 1
M = [[sum(1 for v in range(l) if comms1[v] == i and comms2[v] == j) for j in range(m2)] for i in range(m1)]
return np.array(M)
def _getMatch(M, perm):
return sum(M[i, j] if i < M.shape[0] and j < M.shape[1] else 0 for i, j in enumerate(perm))
def FC(comms1, comms2):
l = len(comms1)
M = _getMs(comms1, comms2)
return 1 - 1 / l * max(_getMatch(M, perm) for perm in itertools.permutations(range(max(M.shape))))
def modularity(A: np.array, partition):
"""
Simplified version only for undirected graphs
"""
n_edges = np.sum(A)
degrees = np.sum(A, axis=1, keepdims=True)
Q_items = A + np.diagonal(A) - degrees.dot(degrees.T) / n_edges
Q = 0
for class_name in set(partition):
mask = np.array(partition) == class_name
Q += np.sum(Q_items[mask][:, mask])
return Q / n_edges
def _create_krondecker(partition):
n = len(partition)
kron_mask = np.tile(partition, n) == np.repeat(partition, n)
return np.reshape(kron_mask, (n, n))
def modularity2(AIJ, partition):
# reverse = {}
# for comm_idx, comm in enumerate(partition):
# for item in comm.tolist():
# reverse[item] = comm_idx + 1
# partition = [reverse[x] for x in range(len(reverse))]
n = len(AIJ)
m = np.sum(AIJ) # no of edges
k = np.sum(AIJ, axis=1)
expectation = np.reshape(np.tile(k, n) * np.repeat(k, n), (n, n)) / m
kron = _create_krondecker(partition)
# Q = (1 / 2m) * SUM(AIJ - (ki.kj / 2m)) ∂(ci, cj)
return (1.0 / m) * np.sum(kron * (AIJ - expectation)) | [
"numpy.tile",
"numpy.diagonal",
"numpy.reshape",
"numpy.repeat",
"scipy.stats.rankdata",
"numpy.power",
"itertools.combinations",
"numpy.array",
"numpy.sum",
"collections.defaultdict",
"itertools.permutations"
] | [((230, 264), 'itertools.permutations', 'itertools.permutations', (['names_pred'], {}), '(names_pred)\n', (252, 264), False, 'import itertools\n'), ((2183, 2212), 'scipy.stats.rankdata', 'stats.rankdata', (['(-measure1_ari)'], {}), '(-measure1_ari)\n', (2197, 2212), False, 'from scipy import stats\n'), ((2233, 2262), 'scipy.stats.rankdata', 'stats.rankdata', (['(-measure2_ari)'], {}), '(-measure2_ari)\n', (2247, 2262), False, 'from scipy import stats\n'), ((2563, 2586), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (2574, 2586), False, 'from collections import defaultdict\n'), ((3116, 3127), 'numpy.array', 'np.array', (['M'], {}), '(M)\n', (3124, 3127), True, 'import numpy as np\n'), ((3552, 3561), 'numpy.sum', 'np.sum', (['A'], {}), '(A)\n', (3558, 3561), True, 'import numpy as np\n'), ((3576, 3608), 'numpy.sum', 'np.sum', (['A'], {'axis': '(1)', 'keepdims': '(True)'}), '(A, axis=1, keepdims=True)\n', (3582, 3608), True, 'import numpy as np\n'), ((3979, 4008), 'numpy.reshape', 'np.reshape', (['kron_mask', '(n, n)'], {}), '(kron_mask, (n, n))\n', (3989, 4008), True, 'import numpy as np\n'), ((4279, 4290), 'numpy.sum', 'np.sum', (['AIJ'], {}), '(AIJ)\n', (4285, 4290), True, 'import numpy as np\n'), ((4315, 4334), 'numpy.sum', 'np.sum', (['AIJ'], {'axis': '(1)'}), '(AIJ, axis=1)\n', (4321, 4334), True, 'import numpy as np\n'), ((2355, 2397), 'numpy.power', 'np.power', (['(measure1_rank - measure2_rank)', '(2)'], {}), '(measure1_rank - measure2_rank, 2)\n', (2363, 2397), True, 'import numpy as np\n'), ((2607, 2641), 'itertools.combinations', 'itertools.combinations', (['results', '(2)'], {}), '(results, 2)\n', (2629, 2641), False, 'import itertools\n'), ((3788, 3818), 'numpy.sum', 'np.sum', (['Q_items[mask][:, mask]'], {}), '(Q_items[mask][:, mask])\n', (3794, 3818), True, 'import numpy as np\n'), ((3918, 3939), 'numpy.tile', 'np.tile', (['partition', 'n'], {}), '(partition, n)\n', (3925, 3939), True, 'import numpy as np\n'), ((3943, 3966), 'numpy.repeat', 'np.repeat', (['partition', 'n'], {}), '(partition, n)\n', (3952, 3966), True, 'import numpy as np\n'), ((4529, 4563), 'numpy.sum', 'np.sum', (['(kron * (AIJ - expectation))'], {}), '(kron * (AIJ - expectation))\n', (4535, 4563), True, 'import numpy as np\n'), ((3628, 3642), 'numpy.diagonal', 'np.diagonal', (['A'], {}), '(A)\n', (3639, 3642), True, 'import numpy as np\n'), ((3741, 3760), 'numpy.array', 'np.array', (['partition'], {}), '(partition)\n', (3749, 3760), True, 'import numpy as np\n'), ((949, 966), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (960, 966), False, 'from collections import defaultdict\n'), ((4364, 4377), 'numpy.tile', 'np.tile', (['k', 'n'], {}), '(k, n)\n', (4371, 4377), True, 'import numpy as np\n'), ((4380, 4395), 'numpy.repeat', 'np.repeat', (['k', 'n'], {}), '(k, n)\n', (4389, 4395), True, 'import numpy as np\n')] |
import os
import numpy as np
import pytest
import tensorflow as tf
from bentoml.tensorflow import TensorflowModel
from tests._internal.frameworks.tensorflow_utils import (
KerasSequentialModel,
NativeModel,
NativeRaggedModel,
)
native_data = [[1, 2, 3, 4, 5]]
native_tensor = tf.constant(np.asfarray(native_data))
ragged_data = [[15], [7, 8], [1, 2, 3, 4, 5]]
ragged_tensor = tf.ragged.constant(ragged_data, dtype=tf.float64)
def predict__model(model, tensor):
return model(tensor)
@pytest.mark.parametrize(
"model_class, input_type, predict_fn",
[
(KerasSequentialModel(), native_tensor, predict__model),
(NativeModel(), native_tensor, predict__model),
(NativeRaggedModel(), ragged_tensor, predict__model),
],
)
def test_tensorflow_v2_save_load(model_class, input_type, predict_fn, tmpdir):
TensorflowModel(model_class).save(tmpdir)
assert os.path.exists(os.path.join(tmpdir, "saved_model.pb"))
tf2_loaded = TensorflowModel.load(tmpdir)
comparison = predict_fn(tf2_loaded, input_type) == predict_fn(
model_class, input_type
)
assert all(comparison)
| [
"tensorflow.ragged.constant",
"bentoml.tensorflow.TensorflowModel",
"bentoml.tensorflow.TensorflowModel.load",
"os.path.join",
"numpy.asfarray",
"tests._internal.frameworks.tensorflow_utils.KerasSequentialModel",
"tests._internal.frameworks.tensorflow_utils.NativeModel",
"tests._internal.frameworks.te... | [((392, 441), 'tensorflow.ragged.constant', 'tf.ragged.constant', (['ragged_data'], {'dtype': 'tf.float64'}), '(ragged_data, dtype=tf.float64)\n', (410, 441), True, 'import tensorflow as tf\n'), ((303, 327), 'numpy.asfarray', 'np.asfarray', (['native_data'], {}), '(native_data)\n', (314, 327), True, 'import numpy as np\n'), ((981, 1009), 'bentoml.tensorflow.TensorflowModel.load', 'TensorflowModel.load', (['tmpdir'], {}), '(tmpdir)\n', (1001, 1009), False, 'from bentoml.tensorflow import TensorflowModel\n'), ((924, 962), 'os.path.join', 'os.path.join', (['tmpdir', '"""saved_model.pb"""'], {}), "(tmpdir, 'saved_model.pb')\n", (936, 962), False, 'import os\n'), ((856, 884), 'bentoml.tensorflow.TensorflowModel', 'TensorflowModel', (['model_class'], {}), '(model_class)\n', (871, 884), False, 'from bentoml.tensorflow import TensorflowModel\n'), ((590, 612), 'tests._internal.frameworks.tensorflow_utils.KerasSequentialModel', 'KerasSequentialModel', ([], {}), '()\n', (610, 612), False, 'from tests._internal.frameworks.tensorflow_utils import KerasSequentialModel, NativeModel, NativeRaggedModel\n'), ((655, 668), 'tests._internal.frameworks.tensorflow_utils.NativeModel', 'NativeModel', ([], {}), '()\n', (666, 668), False, 'from tests._internal.frameworks.tensorflow_utils import KerasSequentialModel, NativeModel, NativeRaggedModel\n'), ((711, 730), 'tests._internal.frameworks.tensorflow_utils.NativeRaggedModel', 'NativeRaggedModel', ([], {}), '()\n', (728, 730), False, 'from tests._internal.frameworks.tensorflow_utils import KerasSequentialModel, NativeModel, NativeRaggedModel\n')] |
#import open3d
import os, sys
import cv2
import numpy as np
import argparse
imgs_path = 'result/kitti_tracking/0019/'
target_size = (1920, 1080)
target_fps = 8.0
# 输出文件名
target_video = 'out.mp4'
# 是否保存 resize 的中间图像
saveResizeFlag = False
img_types = ('.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff')
import cv2
import imutils
import numpy as np
def contract_and_bright(img,c,b):
# after test ,c =11 b=6 is better
cnum = c
bnum = b
cimg = np.ones((img.shape[0], img.shape[1], 3), dtype=np.uint8)
for i in range(img.shape[0]):
for j in range(img.shape[1]):
lst = 0.1*cnum*img[i, j] + bnum
cimg[i, j] = [int(ele) if ele < 255 else 255 for ele in lst]
return cimg
def s_and_b(hlsImg,l,s):
lsImg = np.zeros(hlsImg.shape, np.float32)
hlsCopy = np.copy(hlsImg)
l = l
s = s
MAX_VALUE = 100
# 1.调整亮度饱和度(线性变换)、 2.将hlsCopy[:,:,1]和hlsCopy[:,:,2]中大于1的全部截取
hlsCopy[:, :, 1] = (1.0 + l / float(MAX_VALUE)) * hlsCopy[:, :, 1]
hlsCopy[:, :, 1][hlsCopy[:, :, 1] > 1] = 1
# HLS空间通道2是饱和度,对饱和度进行线性变换,且最大值在255以内,这一归一化了,所以应在1以内
hlsCopy[:, :, 2] = (1.0 + s / float(MAX_VALUE)) * hlsCopy[:, :, 2]
hlsCopy[:, :, 2][hlsCopy[:, :, 2] > 1] = 1
# HLS2BGR
lsImg = cv2.cvtColor(hlsCopy, cv2.COLOR_HLS2BGR)
return lsImg
def imgs2video(imgs_path,ps =True):
output_path = imgs_path + 'out/'
output_ps_path = imgs_path + 'out_ps/'
if not os.path.exists(output_path):
os.mkdir(output_path)
if not os.path.exists(output_ps_path):
os.mkdir(output_ps_path)
target_path = output_path + target_video
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
vw = cv2.VideoWriter(target_path, fourcc, target_fps, target_size)
images = os.listdir(imgs_path)
images = sorted(images)
count = 0
for image in images:
if not (image.lower().endswith(img_types)):
continue
try:
print(image)
frame = cv2.imdecode(np.fromfile(imgs_path + image, dtype=np.uint8),
cv2.IMREAD_COLOR) # , cv2.IMREAD_UNCHANGED
if ps:
frame = contract_and_bright(frame,11,10)
# 图像归一化,且转换为浮点型, 颜色空间转换 BGR转为HLS
fImg = frame.astype(np.float32)
fImg = fImg / 255.0
# HLS空间,三个通道分别是: Hue色相、lightness亮度、saturation饱和度
# 通道0是色相、通道1是亮度、通道2是饱和度
hlsImg = cv2.cvtColor(fImg, cv2.COLOR_BGR2HLS)
frame = s_and_b(hlsImg,20,50)
cv2.imwrite(output_ps_path+image,frame*255)
# 写入视频
vw.write(frame)
count += 1
except Exception as exc:
print(image, exc)
vw.release()
print('\r\nConvert Success! Total ' + str(count) + ' images be combined into the video at: ' + target_path + '\r\n')
imgs_path = 'result/kitti_tracking/'
seq = ['0020']
for s in seq:
imgs_path_seq = imgs_path + s +'/out_ps/'
imgs2video(imgs_path_seq,ps=False) | [
"numpy.copy",
"os.path.exists",
"os.listdir",
"numpy.fromfile",
"numpy.ones",
"cv2.imwrite",
"cv2.VideoWriter",
"numpy.zeros",
"os.mkdir",
"cv2.VideoWriter_fourcc",
"cv2.cvtColor"
] | [((490, 546), 'numpy.ones', 'np.ones', (['(img.shape[0], img.shape[1], 3)'], {'dtype': 'np.uint8'}), '((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n', (497, 546), True, 'import numpy as np\n'), ((790, 824), 'numpy.zeros', 'np.zeros', (['hlsImg.shape', 'np.float32'], {}), '(hlsImg.shape, np.float32)\n', (798, 824), True, 'import numpy as np\n'), ((839, 854), 'numpy.copy', 'np.copy', (['hlsImg'], {}), '(hlsImg)\n', (846, 854), True, 'import numpy as np\n'), ((1278, 1318), 'cv2.cvtColor', 'cv2.cvtColor', (['hlsCopy', 'cv2.COLOR_HLS2BGR'], {}), '(hlsCopy, cv2.COLOR_HLS2BGR)\n', (1290, 1318), False, 'import cv2\n'), ((1658, 1689), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (1680, 1689), False, 'import cv2\n'), ((1699, 1760), 'cv2.VideoWriter', 'cv2.VideoWriter', (['target_path', 'fourcc', 'target_fps', 'target_size'], {}), '(target_path, fourcc, target_fps, target_size)\n', (1714, 1760), False, 'import cv2\n'), ((1775, 1796), 'os.listdir', 'os.listdir', (['imgs_path'], {}), '(imgs_path)\n', (1785, 1796), False, 'import os, sys\n'), ((1464, 1491), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (1478, 1491), False, 'import os, sys\n'), ((1501, 1522), 'os.mkdir', 'os.mkdir', (['output_path'], {}), '(output_path)\n', (1509, 1522), False, 'import os, sys\n'), ((1534, 1564), 'os.path.exists', 'os.path.exists', (['output_ps_path'], {}), '(output_ps_path)\n', (1548, 1564), False, 'import os, sys\n'), ((1574, 1598), 'os.mkdir', 'os.mkdir', (['output_ps_path'], {}), '(output_ps_path)\n', (1582, 1598), False, 'import os, sys\n'), ((2008, 2054), 'numpy.fromfile', 'np.fromfile', (['(imgs_path + image)'], {'dtype': 'np.uint8'}), '(imgs_path + image, dtype=np.uint8)\n', (2019, 2054), True, 'import numpy as np\n'), ((2472, 2509), 'cv2.cvtColor', 'cv2.cvtColor', (['fImg', 'cv2.COLOR_BGR2HLS'], {}), '(fImg, cv2.COLOR_BGR2HLS)\n', (2484, 2509), False, 'import cv2\n'), ((2572, 2620), 'cv2.imwrite', 'cv2.imwrite', (['(output_ps_path + image)', '(frame * 255)'], {}), '(output_ps_path + image, frame * 255)\n', (2583, 2620), False, 'import cv2\n')] |
'''
Function:
load the train data.
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import os
import glob
import torch
import random
import numpy as np
import pandas as pd
from PIL import Image
from torch.utils.data import Dataset
from skimage.transform import resize
'''load data'''
class ImageFolder(Dataset):
def __init__(self, imagespath, labpath, shape=(350, 350), is_shuffle=True, mode='train'):
self.img_shape = shape
self.imagepaths = sorted(glob.glob(os.path.join(imagespath, '*.*')))
if mode == 'train':
self.imagepaths = self.imagepaths[:int(len(self.imagepaths) * 0.8)]
elif mode == 'test':
self.imagepaths = self.imagepaths[int(len(self.imagepaths) * 0.8):]
else:
raise ValueError('ImageFolder --> mode should be <train> or <test>, not %s...' % mode)
if is_shuffle:
random.shuffle(self.imagepaths)
ratings = pd.read_excel(labpath)
filenames = ratings.groupby('Filename').size().index.tolist()
self.labels = []
for filename in filenames:
score = round(ratings[ratings['Filename'] == filename]['Rating'].mean(), 2)
self.labels.append({'Filename': filename, 'score': score})
self.labels = pd.DataFrame(self.labels)
def __getitem__(self, index):
# Image
img_path = self.imagepaths[index % len(self.imagepaths)]
img = np.array(Image.open(img_path)) / 255.
input_img = resize(img, (*self.img_shape, 3), mode='reflect')
input_img = np.transpose(input_img, (2, 0, 1))
input_img = torch.from_numpy(input_img).float()
# Label
filename = img_path.split('/')[-1]
label = self.labels[self.labels.Filename == filename].score.values
return img_path, input_img, label
def __len__(self):
return len(self.imagepaths)
| [
"PIL.Image.open",
"random.shuffle",
"os.path.join",
"torch.from_numpy",
"pandas.read_excel",
"pandas.DataFrame",
"numpy.transpose",
"skimage.transform.resize"
] | [((843, 865), 'pandas.read_excel', 'pd.read_excel', (['labpath'], {}), '(labpath)\n', (856, 865), True, 'import pandas as pd\n'), ((1135, 1160), 'pandas.DataFrame', 'pd.DataFrame', (['self.labels'], {}), '(self.labels)\n', (1147, 1160), True, 'import pandas as pd\n'), ((1321, 1370), 'skimage.transform.resize', 'resize', (['img', '(*self.img_shape, 3)'], {'mode': '"""reflect"""'}), "(img, (*self.img_shape, 3), mode='reflect')\n", (1327, 1370), False, 'from skimage.transform import resize\n'), ((1385, 1419), 'numpy.transpose', 'np.transpose', (['input_img', '(2, 0, 1)'], {}), '(input_img, (2, 0, 1))\n', (1397, 1419), True, 'import numpy as np\n'), ((799, 830), 'random.shuffle', 'random.shuffle', (['self.imagepaths'], {}), '(self.imagepaths)\n', (813, 830), False, 'import random\n'), ((460, 491), 'os.path.join', 'os.path.join', (['imagespath', '"""*.*"""'], {}), "(imagespath, '*.*')\n", (472, 491), False, 'import os\n'), ((1278, 1298), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1288, 1298), False, 'from PIL import Image\n'), ((1434, 1461), 'torch.from_numpy', 'torch.from_numpy', (['input_img'], {}), '(input_img)\n', (1450, 1461), False, 'import torch\n')] |
# Author: <NAME>, <NAME>, 2008
# Clustering of position weight matrices based on
# "A Novel Bayesian DNA Motif Comparison Method for Clustering and Retrieval"
# Habib et al, 2008
# PLOS Computational Biology, Volume 4, Issue 2
import math
import numpy
import copy
import os,sys
SENSE = 0
ANTISENSE = 1
CUTOFF = {0.25:lambda x:0.1127*x+0.4485, 0.05:lambda x:0.2461*x+0.7060, \
0.01:lambda x:0.4260*x+0.5751, 0.001:lambda x:0.6706*x+0.1165, \
0.0001:lambda x:0.7301*x+0.2404}
def pwm2ic(pwm):
"""Judge the information content of bps in a given pwm, return the number
of ic bigger than 0.85[0.49,0.49,0.01,0.01]."""
count = 0
for pos in pwm:
ic = 2
for bp in pos:
ic += bp*math.log(bp+0.0000001,2)
if ic > 0.85:
count += 1
return count
def log_E_p_standard(n):
"""Log of the standard dirichlet prior."""
E_p = n + 1
E_p /= numpy.tile(E_p.sum(1), (4,1)).T
return numpy.log(E_p)
def BLiC_score(M1, M2, cutoff=0.05):
"""
if flag_r is ANTISENSE the reverse complement is the better match
returns alignment score shift and orientation of M1 in the match
matrix A is shifted before reversing
"""
n1 = M1.shape[0]
n2 = M2.shape[0]
if n1 >= n2: # A the dame length with B, no changes
A,B = M1,M2 # make sure A is longer, or the same length.
else:
A,B = M2,M1
n1,n2 = n2,n1
max_score, i_max= -999, -999
flag_strand, flag_merge = False, False
Brev = B[::-1,::-1]
#align B to A, cut the edge of unaligned sequence.
#i<0: B:xxxxx
# A: xxxxxxxx
#i<=n1-n2: B:xxxxx
# A:xxxxxxxx
#i>n1-n2: B: xxxxx
# A:xxxxxxxx
for i in range(1-n2, n1):
if i<0:
Bsub = B[-i:, :]
Brev_sub = Brev[-i:, :]
Asub = A[:n2+i, :]
#ii = n1-i
elif i <= n1-n2:
Bsub = B
Brev_sub = Brev
Asub = A[i:i+n2, :]
#ii = n1
elif n1-i < n2:
Bsub = B[:n1-i, :] #B: xCATCGCxxx
Brev_sub = Brev[:n1-i, :]
Asub = A[i:, :] #A: xxxxxxTCGC
#ii = n2+i
score = BLiC_score_aligned( Asub, Bsub )
score_r = BLiC_score_aligned( Asub, Brev_sub )
if score_r > score:
flag, score = ANTISENSE, score_r
else:
flag = SENSE
if score > max_score:
max_score = score
flag_strand = flag
i_max = i
cutoff_len = max(pwm2ic(A), pwm2ic(B))
if max_score >= CUTOFF[cutoff](cutoff_len):
flag_merge = True
return max_score, i_max, flag_strand, flag_merge
def BLiC_score_aligned(M1, M2):
sum_M1_i = M1.sum(axis=1)
sum_M1_i = 1.0*(sum_M1_i==0) + sum_M1_i
A1 = M1.transpose()/sum_M1_i
A1 = A1.transpose()
sum_M2_i = M2[0:M1.shape[0],].sum(axis=1)
sum_M2_i = 1.0*(sum_M2_i==0) + sum_M2_i
A2 = M2[0:M1.shape[0],].transpose()/sum_M2_i
A2 = A2.transpose()
A12 = A1 + A2
log_p1 = log_E_p_standard(A1)
log_p2 = log_E_p_standard(A2)
log_p12 = log_E_p_standard(A1*A2)
log_pBG = log_E_p_standard(numpy.ones(A1.shape))
s = 2 * (A12 * log_p12).sum() - (A1 * log_p1 + A2 * log_p2 + A12 * log_pBG).sum()
return s
| [
"numpy.log",
"numpy.ones",
"math.log"
] | [((919, 933), 'numpy.log', 'numpy.log', (['E_p'], {}), '(E_p)\n', (928, 933), False, 'import numpy\n'), ((3223, 3243), 'numpy.ones', 'numpy.ones', (['A1.shape'], {}), '(A1.shape)\n', (3233, 3243), False, 'import numpy\n'), ((707, 730), 'math.log', 'math.log', (['(bp + 1e-07)', '(2)'], {}), '(bp + 1e-07, 2)\n', (715, 730), False, 'import math\n')] |
'''
Created on Feb 8, 2017
@author: julien
'''
from inspect import isfunction, isclass
from random import Random
import numpy
rand = Random()
class ParameterConstraint(object):
levels = ['row', 'block', 'layer']
def __init__(self, namespace_id, name, value, **kwargs):
self.namespace_id = namespace_id
self.name = name
self.value = value
for level in self.levels:
if level in kwargs:
setattr(self, level, kwargs[level])
class Parameter(object):
def __init__(self, param_type, values=None,
lo=None, hi=None, default=None, optional=False, mutable=True):
self.param_type = param_type
self.values = values
self.lo = lo
self.hi = hi
self.default = default
self.optional = optional
self.mutable = mutable
class Function(object):
def __init__(self, func, params):
self.func = func
self.params = params
def str_param_name(element):
if isinstance(element, str):
return element
if isclass(element) or isfunction(element):
return element.__name__
return str(element)
def param_id(path):
return '.'.join([str_param_name(e) for e in path])
def param_path(_id):
return _id.split('.')
def expand_param_path(path):
path = [
e
for p in path
for e in p.split('.')]
return path
def boolean_param(default=None, optional=False):
return Parameter(
bool,
(True, False),
default=default,
optional=optional)
def float_param(lo=0., hi=1., default=None, values=None, optional=False):
return Parameter(
float,
lo=lo,
hi=hi,
values=values,
default=default,
optional=optional)
def int_param(lo=0, hi=100, default=None, values=None, optional=False):
return Parameter(
int,
lo=lo,
hi=hi,
values=values,
default=default,
optional=optional)
def string_param(values, default=None, optional=False, mutable=True):
return Parameter(
str,
values=values,
default=default,
optional=optional,
mutable=mutable)
def param(values, default=None, optional=False):
return Parameter(
dict,
values=values,
default=default,
optional=optional)
def func_param(function_definitions, default=None, optional=False):
return Parameter(
Function,
values=function_definitions,
default=default)
def is_valid_param_value(param, value):
if not isinstance(value, param.param_type):
return False
if param.values is not None and len(param.values) > 0:
return value in param.values
if param.lo is not None and value < param.lo:
return False
if param.hi is not None and value > param.hi:
return False
return True
def mutate_param(param, value, mutation_std=0.1):
if isinstance(param, dict):
return {
name: mutate_param(nested, value.get(name, None))
for name, nested in param.items()}
if not isinstance(param, Parameter):
return value
if param.optional and value is not None and rand.random() < 0.5:
return None
if param.values:
if len(set(param.values)) == 1:
return param.values[0]
element = random_list_element(param.values)
while element == value:
element = random_list_element(param.values)
return element
elif param.lo == param.hi:
return value
value = value or 0
std = mutation_std * (param.hi - param.lo)
if param.param_type == int:
std = max(1, std)
new_value = value + param.param_type(numpy.random.normal(0, std, 1)[0])
while not is_valid_param_value(param, new_value) or new_value == value:
new_value = value + param.param_type(numpy.random.normal(0, std, 1)[0])
return new_value
def _is_optional_param(param):
return isinstance(param, Parameter) and param.optional
def _random_dict_param_value(param):
keys = [
key
for key in param.keys()
if not _is_optional_param(param[key])
or rand.random() < 0.5]
return {
key: random_param_value(param[key])
for key in keys}
def _random_list_param_value(param):
if len(param) == 0:
return None
if len(param) == 1:
return param[0]
if len(param) == 2:
param = Parameter(type(param[0]), lo=param[0], hi=param[1])
elif len(param) > 2:
param = Parameter(type(param[0]), values=param)
return random_param_value(param)
def random_param_value(param):
if isinstance(param, dict):
return _random_dict_param_value(param)
if isinstance(param, list) or isinstance(param, tuple):
return _random_list_param_value(param)
if not isinstance(param, Parameter):
return param
value = 0
if param.values:
value = random_list_element(param.values)
elif param.lo is not None and param.hi is not None:
if param.param_type == int:
value = rand.randint(param.lo, param.hi)
else:
value = param.lo + (rand.random() * (param.hi - param.lo))
elif param.lo is not None:
value = param.lo * (1 + rand.random())
elif param.hi is not None:
value = rand.random() * param.hi
return param.param_type(value)
def random_initial_param_value(param, mutation_std=0.1):
if isinstance(param, dict):
return _random_dict_param_value(param)
if isinstance(param, list) or isinstance(param, tuple):
return _random_list_param_value(param)
if not isinstance(param, Parameter):
return param
if param.values:
return random_list_element(param.values)
initial_value = None
if param.default is not None:
initial_value = param.default
elif param.lo is not None:
initial_value = param.lo
else:
initial_value = 0
return mutate_param(param, initial_value, mutation_std)
def random_list_element(elements):
if len(elements) == 0:
return None
if len(elements) == 1:
return elements[0]
return elements[rand.randint(0, len(elements) - 1)]
def default_param_value(param):
if isinstance(param, dict):
return {
name: default_param_value(value)
for name, value in param.items()}
elif isinstance(param, Parameter):
if param.default is not None or param.optional:
return param.default
else:
return param
| [
"random.Random",
"inspect.isclass",
"inspect.isfunction",
"numpy.random.normal"
] | [((138, 146), 'random.Random', 'Random', ([], {}), '()\n', (144, 146), False, 'from random import Random\n'), ((1069, 1085), 'inspect.isclass', 'isclass', (['element'], {}), '(element)\n', (1076, 1085), False, 'from inspect import isfunction, isclass\n'), ((1089, 1108), 'inspect.isfunction', 'isfunction', (['element'], {}), '(element)\n', (1099, 1108), False, 'from inspect import isfunction, isclass\n'), ((3756, 3786), 'numpy.random.normal', 'numpy.random.normal', (['(0)', 'std', '(1)'], {}), '(0, std, 1)\n', (3775, 3786), False, 'import numpy\n'), ((3912, 3942), 'numpy.random.normal', 'numpy.random.normal', (['(0)', 'std', '(1)'], {}), '(0, std, 1)\n', (3931, 3942), False, 'import numpy\n')] |
# """
# The code will split the training set into k-fold for cross-validation
# """
# import os
# import numpy as np
# from sklearn.model_selection import StratifiedKFold
# root = './data/2018/MICCAI_BraTS_2018_Data_Training'
# valid_data_dir = './data/2018/MICCAI_BraTS_2018_Data_Validation'
# def write(data, fname, root=root):
# fname = os.path.join(root, fname)
# with open(fname, 'w') as f:
# f.write('\n'.join(data))
# hgg = os.listdir(os.path.join(root, 'HGG'))
# hgg = [os.path.join('HGG', f) for f in hgg]
# lgg = os.listdir(os.path.join(root, 'LGG'))
# lgg = [os.path.join('LGG', f) for f in lgg]
# X = hgg + lgg
# Y = [1] * len(hgg) + [0] * len(lgg)
# write(X, 'all.txt')
# X, Y = np.array(X), np.array(Y)
# skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=2018)
# for k, (train_index, valid_index) in enumerate(skf.split(Y, Y)):
# train_list = list(X[train_index])
# valid_list = list(X[valid_index])
# write(train_list, 'train_{}.txt'.format(k))
# write(valid_list, 'valid_{}.txt'.format(k))
# valid = os.listdir(os.path.join(valid_data_dir))
# valid = [f for f in valid if not (f.endswith('.csv') or f.endswith('.txt'))]
# write(valid, 'valid.txt', root=valid_data_dir)
"""
The code will split the training set into k-fold for cross-validation
"""
import os
import sys
import numpy as np
from sklearn.model_selection import StratifiedKFold
import shutil
root = './data/2018/MICCAI_BraTS_2018_Data_Training'
valid_data_dir = './data/2018/MICCAI_BraTS_2018_Data_Validation'
backup = './2018/datasets'
backup_files = os.listdir(backup)
if len(backup_files) != 0:
print("Copy from backup")
for file in backup_files:
shutil.copy(os.path.join(backup, file), os.path.join(root, file))
count=0
with open(os.path.join(root, file), 'r') as f:
for line in f:
count += 1
print("File {} has {} lines.".format(file, count))
sys.exit()
def write(data, fname, root=root):
fname = os.path.join(root, fname)
with open(fname, 'w') as f:
f.write('\n'.join(data))
limit = float(sys.argv[1])
hgg = os.listdir(os.path.join(root, 'HGG'))
hgg = [os.path.join('HGG', f) for f in hgg]
lgg = os.listdir(os.path.join(root, 'LGG'))
lgg = [os.path.join('LGG', f) for f in lgg]
print("Original size: HGG:{}, LGG:{}, Total:{}".format(len(hgg), len(lgg), len(hgg) + len(lgg)))
hgg = hgg[:int(limit*len(hgg))]
lgg = lgg[:int(limit*len(lgg))]
print("Limited size: HGG:{}, LGG:{}, Total:{}".format(len(hgg), len(lgg), len(hgg) + len(lgg)))
X = hgg + lgg
Y = [1] * len(hgg) + [0] * len(lgg)
write(X, 'all.txt')
shutil.copy(os.path.join(root,'all.txt'), os.path.join(backup, 'all.txt'))
X, Y = np.array(X), np.array(Y)
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=2018)
for k, (train_index, valid_index) in enumerate(skf.split(Y, Y)):
train_list = list(X[train_index])
valid_list = list(X[valid_index])
write(train_list, 'train_{}.txt'.format(k))
write(valid_list, 'valid_{}.txt'.format(k))
shutil.copy(os.path.join(root,'train_{}.txt'.format(k)),
os.path.join(backup, 'train_{}.txt'.format(k)))
shutil.copy(os.path.join(root,'valid_{}.txt'.format(k)),
os.path.join(backup, 'valid_{}.txt'.format(k)))
valid = os.listdir(os.path.join(valid_data_dir))
valid = [f for f in valid if not (f.endswith('.csv') or f.endswith('.txt'))]
write(valid, 'valid.txt', root=valid_data_dir) | [
"os.listdir",
"os.path.join",
"sklearn.model_selection.StratifiedKFold",
"numpy.array",
"sys.exit"
] | [((1652, 1670), 'os.listdir', 'os.listdir', (['backup'], {}), '(backup)\n', (1662, 1670), False, 'import os\n'), ((2857, 2917), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': '(2018)'}), '(n_splits=5, shuffle=True, random_state=2018)\n', (2872, 2917), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((2034, 2044), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2042, 2044), False, 'import sys\n'), ((2096, 2121), 'os.path.join', 'os.path.join', (['root', 'fname'], {}), '(root, fname)\n', (2108, 2121), False, 'import os\n'), ((2239, 2264), 'os.path.join', 'os.path.join', (['root', '"""HGG"""'], {}), "(root, 'HGG')\n", (2251, 2264), False, 'import os\n'), ((2274, 2296), 'os.path.join', 'os.path.join', (['"""HGG"""', 'f'], {}), "('HGG', f)\n", (2286, 2296), False, 'import os\n'), ((2329, 2354), 'os.path.join', 'os.path.join', (['root', '"""LGG"""'], {}), "(root, 'LGG')\n", (2341, 2354), False, 'import os\n'), ((2364, 2386), 'os.path.join', 'os.path.join', (['"""LGG"""', 'f'], {}), "('LGG', f)\n", (2376, 2386), False, 'import os\n'), ((2752, 2781), 'os.path.join', 'os.path.join', (['root', '"""all.txt"""'], {}), "(root, 'all.txt')\n", (2764, 2781), False, 'import os\n'), ((2782, 2813), 'os.path.join', 'os.path.join', (['backup', '"""all.txt"""'], {}), "(backup, 'all.txt')\n", (2794, 2813), False, 'import os\n'), ((2823, 2834), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2831, 2834), True, 'import numpy as np\n'), ((2836, 2847), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (2844, 2847), True, 'import numpy as np\n'), ((3467, 3495), 'os.path.join', 'os.path.join', (['valid_data_dir'], {}), '(valid_data_dir)\n', (3479, 3495), False, 'import os\n'), ((1782, 1808), 'os.path.join', 'os.path.join', (['backup', 'file'], {}), '(backup, file)\n', (1794, 1808), False, 'import os\n'), ((1810, 1834), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (1822, 1834), False, 'import os\n'), ((1872, 1896), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (1884, 1896), False, 'import os\n')] |
# Copyright (c) 2012-2018, University of Strathclyde
# Authors: <NAME>
# License: BSD-3-Clause
"""
This is an examplar script to produce a plot of the cycle-averaged magnitude
and phase of the fields
"""
import sys
import numpy as np
from numpy import pi
from numpy import arange
import matplotlib.pyplot as plt
import tables
from puffdata import fdata
from puffdata import puffData
from retrieve import getPow
# can maybe use argparse for more complex plotting options...
def plotPowVsZ2(h5fname, cfr=None, dfr=None, gav = 3):
mdata = fdata(h5fname)
sampleFreq = 1.0 / mdata.vars.dz2
lenz2 = (mdata.vars.nz2-1) * mdata.vars.dz2
z2axis = (np.arange(0,mdata.vars.nz2)) * mdata.vars.dz2
saxis = z2axis * mdata.vars.lc * 1e6
xaxis = (np.arange(0,mdata.vars.nx)) * mdata.vars.dxbar
yaxis = (np.arange(0,mdata.vars.ny)) * mdata.vars.dybar
fcount = 0
pows = getPow(h5fname, cfr, dfr, irtype = gav, qScale = 0)
plotLab = 'SI Power'
axLab = 'Power (W)'
z = mdata.vars.z
ax1 = plt.subplot(111)
plt.plot(saxis, pows)
plt.xlabel(r'$ct-z (\mu m)$')
plt.ylabel(axLab)
ax1.set_title('z = ' + str(z) + 'm')
# plt.xlim(200, 210);
nameparts = h5fname.split('_')
basename = nameparts[0]
#plt.show()
plt.savefig(basename + "-powvsz2-step-" + str(mdata.vars.step) + "-filt-" \
+ str(cfr) + '-' + str(dfr) + "-yfield.png")
if __name__ == '__main__':
h5fname=sys.argv[1]
if len(sys.argv) == 4:
cfr = float(sys.argv[2])
dfr = float(sys.argv[3])
else:
cfr=None
dfr=None
plotPowVsZ2(h5fname, cfr=cfr, dfr=dfr)
# plot(xaxis,magxrms);
# hold on;
# plot(xaxis,phasex,'r');
# hold off;
| [
"matplotlib.pyplot.ylabel",
"puffdata.fdata",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"retrieve.getPow",
"matplotlib.pyplot.subplot",
"numpy.arange"
] | [((548, 562), 'puffdata.fdata', 'fdata', (['h5fname'], {}), '(h5fname)\n', (553, 562), False, 'from puffdata import fdata\n'), ((901, 948), 'retrieve.getPow', 'getPow', (['h5fname', 'cfr', 'dfr'], {'irtype': 'gav', 'qScale': '(0)'}), '(h5fname, cfr, dfr, irtype=gav, qScale=0)\n', (907, 948), False, 'from retrieve import getPow\n'), ((1038, 1054), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1049, 1054), True, 'import matplotlib.pyplot as plt\n'), ((1064, 1085), 'matplotlib.pyplot.plot', 'plt.plot', (['saxis', 'pows'], {}), '(saxis, pows)\n', (1072, 1085), True, 'import matplotlib.pyplot as plt\n'), ((1090, 1119), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$ct-z (\\\\mu m)$"""'], {}), "('$ct-z (\\\\mu m)$')\n", (1100, 1119), True, 'import matplotlib.pyplot as plt\n'), ((1124, 1141), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['axLab'], {}), '(axLab)\n', (1134, 1141), True, 'import matplotlib.pyplot as plt\n'), ((665, 693), 'numpy.arange', 'np.arange', (['(0)', 'mdata.vars.nz2'], {}), '(0, mdata.vars.nz2)\n', (674, 693), True, 'import numpy as np\n'), ((766, 793), 'numpy.arange', 'np.arange', (['(0)', 'mdata.vars.nx'], {}), '(0, mdata.vars.nx)\n', (775, 793), True, 'import numpy as np\n'), ((826, 853), 'numpy.arange', 'np.arange', (['(0)', 'mdata.vars.ny'], {}), '(0, mdata.vars.ny)\n', (835, 853), True, 'import numpy as np\n')] |
from keras.models import load_model
import cv2
import numpy as np
model = load_model('models\model_final_bin.h5')
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
img = cv2.imread('2.jpg')
img = cv2.resize(img,(256,256))
img = np.reshape(img,[1,256,256,3])
classes = model.predict_classes(img)
print (classes)
| [
"cv2.resize",
"keras.models.load_model",
"numpy.reshape",
"cv2.imread"
] | [((79, 119), 'keras.models.load_model', 'load_model', (['"""models\\\\model_final_bin.h5"""'], {}), "('models\\\\model_final_bin.h5')\n", (89, 119), False, 'from keras.models import load_model\n'), ((246, 265), 'cv2.imread', 'cv2.imread', (['"""2.jpg"""'], {}), "('2.jpg')\n", (256, 265), False, 'import cv2\n'), ((273, 300), 'cv2.resize', 'cv2.resize', (['img', '(256, 256)'], {}), '(img, (256, 256))\n', (283, 300), False, 'import cv2\n'), ((306, 339), 'numpy.reshape', 'np.reshape', (['img', '[1, 256, 256, 3]'], {}), '(img, [1, 256, 256, 3])\n', (316, 339), True, 'import numpy as np\n')] |
import numpy as np
import random
def heightmap_1D(iter, smoothing, seed, init):
"""Create 2^iter + 1 linear heightmap via midpoint displacement.
"""
if init == None:
random.seed(seed + "init")
heightmap = np.array([random.random(), random.random()])
else:
heightmap = init
random.seed(seed + "iterate")
for i in range(iter):
temp_list = []
for j in range(2**i):
temp_list.append(heightmap[j])
temp_list.append((heightmap[j]+heightmap[j+1])/2
+ random.uniform(-1,1)*2**(-smoothing*(i+1)))
temp_list.append(heightmap[-1])
heightmap = np.array(temp_list)
# normalize
heightmap += heightmap.min()
heightmap /= heightmap.max()
return(heightmap)
def diamond_square(iter, smoothing, seed, init):
"""Create 2^iter + 1 square heightmap via diamond square algorithm.
"""
if init == None:
random.seed(seed + "init")
heightmap = np.array([[random.random(), random.random()],
[random.random(), random.random()]])
else:
heightmap = np.array(init)
random.seed(seed + "iterate")
for n in range(iter):
rows, cols = heightmap.shape
temp_map = np.zeros((2*rows - 1, 2*cols - 1))
jitter_diamond = 2**(-smoothing*(2*n + 1))
jitter_square = 2**(-smoothing*(2*n + 2))
for (i, j), value_ij in np.ndenumerate(heightmap):
north_exists = i > 0
south_exists = i < rows - 1
west_exists = j > 0
east_exists = j < cols - 1
temp_map[2*i, 2*j] = heightmap[i, j]
if east_exists and south_exists:
diamond_center = (heightmap[i, j] + heightmap[i, j+1]
+ heightmap[i+1, j] + heightmap[i+1, j+1])
diamond_center /= 4
diamond_center += random.uniform(-1,1)*jitter_diamond
temp_map[2*i + 1, 2*j + 1] = diamond_center
square_top = (heightmap[i, j] + heightmap[i, j+1]
+ diamond_center)
if north_exists:
square_top += temp_map[2*i - 1, 2*j + 1]
square_top /= 4
else:
square_top /= 3
square_top += random.uniform(-1,1)*jitter_square
temp_map[2*i, 2*j + 1] = square_top
square_left = (heightmap[i, j] + heightmap[i+1, j]
+ diamond_center)
if west_exists:
square_left += temp_map[2*i + 1, 2*j - 1]
square_left /= 4
else:
square_left /= 3
square_left += random.uniform(-1,1)*jitter_square
temp_map[2*i + 1, 2*j] = square_left
elif east_exists and not south_exists:
square_top = (heightmap[i, j] + heightmap[i, j+1]
+ temp_map[2*i - 1, 2*j + 1])/3
square_top += random.uniform(-1,1)*jitter_square
temp_map[2*i, 2*j + 1] = square_top
elif not east_exists and south_exists:
square_left = (heightmap[i, j] + heightmap[i+1, j]
+ temp_map[2*i + 1, 2*j - 1])/3
square_left += random.uniform(-1,1)*jitter_square
temp_map[2*i + 1, 2*j] = square_left
heightmap = temp_map
# noise = 2*np.random.random(heightmap.shape) - 1
# noise /= 2**(n+5)
# heightmap += noise
# heightmap_to_png(heightmap, seed + ' ' + str(n))
# normalize
heightmap -= heightmap.min()
heightmap /= heightmap.max()
return(heightmap)
def map_interp(heightmap):
rows, cols = heightmap.shape
temp_map = np.zeros((rows - 1, cols - 1))
for (i, j), value_ij in np.ndenumerate(temp_map):
average = (heightmap[i, j] + heightmap[i, j+1]
+ heightmap[i+1, j] + heightmap[i+1, j+1])/4
temp_map[i, j] = average
return(temp_map)
def trim_and_flatten(heightmap, rows=1, cols=1):
for i in range(rows):
heightmap = np.delete(heightmap, -1, 0)
for j in range(cols):
heightmap = np.delete(heightmap, -1, 1)
heightmap = heightmap.flatten()
return(heightmap)
def entrywise_product(heightmap0, heightmap1, normalize=True):
output = np.zeros(heightmap0.shape)
for (i, j), value in np.ndenumerate(heightmap0):
output[i,j] = heightmap0[i,j]*heightmap1[i,j]
if normalize:
output = heightmap_normalize(output)
return(output)
def heightmap_normalize(heightmap):
heightmap -= heightmap.min()
heightmap /= heightmap.max()
return(heightmap)
def mean_normalize(heightmap, new_mean):
size = heightmap.shape
old_mean = np.sum(heightmap.reshape(1, -1))/size
heightmap = heightmap*new_mean/old_mean
return(heightmap)
def heightmap_radar_list(heightmap, r_step, theta_step,
init_angle=0, sweep=np.pi):
"""Read and interpolate a square heightmap radially from the center.
"""
N = heightmap.shape[0]
list = []
for turn in range(theta_step):
angle = sweep*turn/theta_step
for length in range(r_step):
x, y = length*np.cos(angle), length*np.sin(angle)
# correct for orientation
i, j = int(np.floor(y + N/2)), int(np.floor(x + N/2))
u, v = y + N/2 - i, x + N/2 - j
A, B = heightmap[ i, j], heightmap[ i, j+1]
C, D = heightmap[i+1, j], heightmap[i+1, j+1]
interp = (A*(1-v) + B*v)*(1-u) + (C*(1-v) + D*v)*u
list.append(interp)
list = np.array(list)
return(list)
def erode(heightmap, seed, iter):
rows, cols = heightmap.shape
random.seed(seed + "rain")
for n in range(iter):
i = random.randint(0, rows-1)
j = random.randint(0, cols-1)
droplet_volume = 1
while droplet_volume > 0:
north_exists = i > 0
south_exists = i < rows - 1
west_exists = j > 0
east_exists = j < cols - 1
current_min = heightmap[i, j]
choices = [(0, 0)]
if north_exists:
new_height = heightmap[i - 1, j]
if new_height < current_min:
min = heightmap
choices = [(i - 1, j)]
elif new_height == current_min:
choices.append((i - 1, j))
if south_exists:
new_height = heightmap[i + 1, j]
if new_height < current_min:
min = heightmap
choices = [(i + 1, j)]
elif new_height == current_min:
choices.append((i + 1, j))
if west_exists:
new_height = heightmap[i, j - 1]
if new_height < current_min:
min = heightmap
choices = [(i, j - 1)]
elif new_height == current_min:
choices.append((i, j - 1))
if east_exists:
new_height = heightmap[i, j + 1]
if new_height < current_min:
min = heightmap
choices = [(i, j + 1)]
elif new_height == current_min:
choices.append((i, j + 1))
if len(choices) == 1:
new_i, new_j = choices[0]
else:
random.seed(seed + "choose")
new_i, new_j = random.choice(choices)
if (i, j) == (new_i, new_j):
droplet_volume = -1
else:
average = (heightmap[i, j] + heightmap[new_i, new_j])/2
heightmap[i, j] = average
heightmap[new_i, new_j] = average
droplet_volume -= random.random()/8
i, j = new_i, new_j
# normalize
heightmap -= heightmap.min()
heightmap /= heightmap.max()
return(heightmap)
def heightmap_to_png(heightmap, filename):
rows, cols = heightmap.shape
x = [i for i in range(cols)]
# correct for orientation
y = [-i for i in range(rows)]
# prepare for plot_surface
x, y = np.meshgrid(x, y)
z = heightmap
px = 1/plt.rcParams['figure.dpi'] # pixel in inches
fig = plt.figure(figsize = (800*px, 800*px))
ax = plt.axes(projection = '3d')
my_cmap = plt.get_cmap('cool')
stride = 1
surf = ax.plot_surface(x, y, z, cmap = my_cmap, rstride=stride, cstride=stride, antialiased=False)
ax.view_init(elev=20, azim=290)
ax.set_title(filename)
plt.gca().axes.get_xaxis().set_ticks([])
plt.gca().axes.get_yaxis().set_ticks([])
plt.xlabel('X')
plt.ylabel('Y')
filename += '.png'
plt.savefig(filename)
plt.close('all')
| [
"random.uniform",
"random.choice",
"numpy.delete",
"numpy.floor",
"numpy.ndenumerate",
"random.seed",
"numpy.array",
"numpy.zeros",
"numpy.cos",
"numpy.sin",
"numpy.meshgrid",
"random.random",
"random.randint"
] | [((319, 348), 'random.seed', 'random.seed', (["(seed + 'iterate')"], {}), "(seed + 'iterate')\n", (330, 348), False, 'import random\n'), ((1161, 1190), 'random.seed', 'random.seed', (["(seed + 'iterate')"], {}), "(seed + 'iterate')\n", (1172, 1190), False, 'import random\n'), ((3847, 3877), 'numpy.zeros', 'np.zeros', (['(rows - 1, cols - 1)'], {}), '((rows - 1, cols - 1))\n', (3855, 3877), True, 'import numpy as np\n'), ((3906, 3930), 'numpy.ndenumerate', 'np.ndenumerate', (['temp_map'], {}), '(temp_map)\n', (3920, 3930), True, 'import numpy as np\n'), ((4438, 4464), 'numpy.zeros', 'np.zeros', (['heightmap0.shape'], {}), '(heightmap0.shape)\n', (4446, 4464), True, 'import numpy as np\n'), ((4490, 4516), 'numpy.ndenumerate', 'np.ndenumerate', (['heightmap0'], {}), '(heightmap0)\n', (4504, 4516), True, 'import numpy as np\n'), ((5741, 5755), 'numpy.array', 'np.array', (['list'], {}), '(list)\n', (5749, 5755), True, 'import numpy as np\n'), ((5845, 5871), 'random.seed', 'random.seed', (["(seed + 'rain')"], {}), "(seed + 'rain')\n", (5856, 5871), False, 'import random\n'), ((8306, 8323), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (8317, 8323), True, 'import numpy as np\n'), ((187, 213), 'random.seed', 'random.seed', (["(seed + 'init')"], {}), "(seed + 'init')\n", (198, 213), False, 'import random\n'), ((667, 686), 'numpy.array', 'np.array', (['temp_list'], {}), '(temp_list)\n', (675, 686), True, 'import numpy as np\n'), ((951, 977), 'random.seed', 'random.seed', (["(seed + 'init')"], {}), "(seed + 'init')\n", (962, 977), False, 'import random\n'), ((1141, 1155), 'numpy.array', 'np.array', (['init'], {}), '(init)\n', (1149, 1155), True, 'import numpy as np\n'), ((1273, 1311), 'numpy.zeros', 'np.zeros', (['(2 * rows - 1, 2 * cols - 1)'], {}), '((2 * rows - 1, 2 * cols - 1))\n', (1281, 1311), True, 'import numpy as np\n'), ((1442, 1467), 'numpy.ndenumerate', 'np.ndenumerate', (['heightmap'], {}), '(heightmap)\n', (1456, 1467), True, 'import numpy as np\n'), ((4201, 4228), 'numpy.delete', 'np.delete', (['heightmap', '(-1)', '(0)'], {}), '(heightmap, -1, 0)\n', (4210, 4228), True, 'import numpy as np\n'), ((4275, 4302), 'numpy.delete', 'np.delete', (['heightmap', '(-1)', '(1)'], {}), '(heightmap, -1, 1)\n', (4284, 4302), True, 'import numpy as np\n'), ((5911, 5938), 'random.randint', 'random.randint', (['(0)', '(rows - 1)'], {}), '(0, rows - 1)\n', (5925, 5938), False, 'import random\n'), ((5949, 5976), 'random.randint', 'random.randint', (['(0)', '(cols - 1)'], {}), '(0, cols - 1)\n', (5963, 5976), False, 'import random\n'), ((244, 259), 'random.random', 'random.random', ([], {}), '()\n', (257, 259), False, 'import random\n'), ((261, 276), 'random.random', 'random.random', ([], {}), '()\n', (274, 276), False, 'import random\n'), ((7554, 7582), 'random.seed', 'random.seed', (["(seed + 'choose')"], {}), "(seed + 'choose')\n", (7565, 7582), False, 'import random\n'), ((7614, 7636), 'random.choice', 'random.choice', (['choices'], {}), '(choices)\n', (7627, 7636), False, 'import random\n'), ((1009, 1024), 'random.random', 'random.random', ([], {}), '()\n', (1022, 1024), False, 'import random\n'), ((1026, 1041), 'random.random', 'random.random', ([], {}), '()\n', (1039, 1041), False, 'import random\n'), ((1075, 1090), 'random.random', 'random.random', ([], {}), '()\n', (1088, 1090), False, 'import random\n'), ((1092, 1107), 'random.random', 'random.random', ([], {}), '()\n', (1105, 1107), False, 'import random\n'), ((1928, 1949), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (1942, 1949), False, 'import random\n'), ((2356, 2377), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (2370, 2377), False, 'import random\n'), ((2780, 2801), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (2794, 2801), False, 'import random\n'), ((5335, 5348), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (5341, 5348), True, 'import numpy as np\n'), ((5357, 5370), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (5363, 5370), True, 'import numpy as np\n'), ((5432, 5451), 'numpy.floor', 'np.floor', (['(y + N / 2)'], {}), '(y + N / 2)\n', (5440, 5451), True, 'import numpy as np\n'), ((5456, 5475), 'numpy.floor', 'np.floor', (['(x + N / 2)'], {}), '(x + N / 2)\n', (5464, 5475), True, 'import numpy as np\n'), ((7930, 7945), 'random.random', 'random.random', ([], {}), '()\n', (7943, 7945), False, 'import random\n'), ((563, 584), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (577, 584), False, 'import random\n'), ((3076, 3097), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (3090, 3097), False, 'import random\n'), ((3373, 3394), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (3387, 3394), False, 'import random\n')] |
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
import torch.nn as nn
from baselines.common import tf_util as U
from baselines.ppo1 import mlp_policy
from codes.envs.utils import make_env
from codes.model.expert_policy.normal_mlp import NormalMLPPolicy
from codes.model.expert_policy.utils import convert_tf_variable
def convert_tf_to_pytorch(model_file, env_id, seed, num_cpu=1, hid_size=64, num_hid_layers=2):
def policy_fn(name, ob_space, ac_space):
return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=hid_size, num_hid_layers=num_hid_layers)
env = make_env(env_id, seed)
with U.make_session(num_cpu=num_cpu) as sess:
pi_tf = policy_fn("pi", env.observation_space, env.action_space)
graph = tf.get_default_graph()
# Load the TensorFlow model
saver = tf.train.Saver(pi_tf.get_variables())
saver.restore(sess, model_file)
# Convert layers
state_dict = OrderedDict()
for i in range(num_hid_layers):
state_dict['layers.{0}.weight'.format(2 * i)] = convert_tf_variable(
graph, sess, 'pi/pol/fc{0}/kernel:0'.format(i + 1)).t()
state_dict['layers.{0}.bias'.format(2 * i)] = convert_tf_variable(
graph, sess, 'pi/pol/fc{0}/bias:0'.format(i + 1))
# Convert last layer
state_dict['mean.weight'] = convert_tf_variable(graph, sess, 'pi/pol/final/kernel:0').t()
state_dict['mean.bias'] = convert_tf_variable(graph, sess, 'pi/pol/final/bias:0')
# Convert log std
state_dict['logstd'] = convert_tf_variable(graph, sess, 'pi/pol/logstd:0')[0]
# Convert observation normalization
state_dict['obs_rms.sum'] = convert_tf_variable(graph, sess, 'pi/obfilter/runningsum:0')
state_dict['obs_rms.sumsq'] = convert_tf_variable(graph, sess, 'pi/obfilter/runningsumsq:0')
count_tf = graph.get_tensor_by_name('pi/obfilter/count:0')
state_dict['obs_rms.count'] = torch.tensor(sess.run(count_tf), dtype=torch.float64)
# Check if the state dict can be loaded
pi_th = NormalMLPPolicy(int(np.prod(env.observation_space.shape)), int(np.prod(env.action_space.shape)),
hid_size, num_hid_layers, nonlinearity=nn.Tanh)
pi_th.load_state_dict(state_dict)
return state_dict, pi_th
def main(config_dict):
expert_policy_config = config_dict.model.expert_policy
name = '{0}__{1}'.format(config_dict.env.name, expert_policy_config.name)
model_file = os.path.join(expert_policy_config.save_dir, '{0}.ckpt'.format(name))
state_dict, model = convert_tf_to_pytorch(model_file, config_dict.env.name,
config_dict.general.seed,
num_cpu=expert_policy_config.num_cpu,
hid_size=expert_policy_config.hidden_size,
num_hid_layers=expert_policy_config.num_layers)
# Save the Pytorch model
file_name = os.path.join(expert_policy_config.save_dir, '{0}.th.pt'.format(name))
torch.save(state_dict, file_name)
return model
def test_conversion(config_dict):
expert_policy_config = config_dict.model.expert_policy
name = '{0}__{1}'.format(config_dict.env.name, expert_policy_config.name)
model_file_tf = os.path.join(expert_policy_config.save_dir, '{0}.ckpt'.format(name))
model_file_th = os.path.join(expert_policy_config.save_dir, '{0}.th.pt'.format(name))
env = make_env(config_dict.env.name, config_dict.general.seed)
pi_tf = mlp_policy.MlpPolicy(name='pi', ob_space=env.observation_space,
ac_space=env.action_space, hid_size=expert_policy_config.hidden_size,
num_hid_layers=expert_policy_config.num_layers)
observations_tf = []
with U.make_session(num_cpu=expert_policy_config.num_cpu) as sess:
# Load TF model
saver = tf.train.Saver(pi_tf.get_variables())
saver.restore(tf.get_default_session(), model_file_tf)
# Sample trajectory
# env.seed(config_dict.general.seed)
observation, done = env.reset(), False
observations_tf.append(observation)
while not done:
action = pi_tf.act(stochastic=False, ob=observation)[0]
observation, _, done, _ = env.step(action)
observations_tf.append(observation)
pi_th = NormalMLPPolicy(int(np.prod(env.observation_space.shape)),
int(np.prod(env.action_space.shape)), expert_policy_config.hidden_size,
expert_policy_config.num_layers, nonlinearity=nn.Tanh)
observations_th = []
# Load Pytorch model
with open(model_file_th, 'rb') as f:
state_dict = torch.load(f)
pi_th.load_state_dict(state_dict)
# Sample trajectory
env.seed(config_dict.general.seed)
observation, done = env.reset(), False
observations_th.append(observation)
while not done:
observation_tensor = torch.from_numpy(observation).unsqueeze(0).float()
action_tensor = pi_th(observation_tensor).mean[0]
action = action_tensor.detach().cpu().numpy()
observation, _, done, _ = env.step(action)
observations_th.append(observation)
# Compare the trajectories
linf_norm = np.max(np.abs(np.asarray(observations_tf) - np.asarray(observations_th)))
print('Maximum absolute difference between observations: {0}'.format(linf_norm))
| [
"numpy.prod",
"collections.OrderedDict",
"baselines.ppo1.mlp_policy.MlpPolicy",
"codes.envs.utils.make_env",
"baselines.common.tf_util.make_session",
"torch.load",
"tensorflow.get_default_session",
"numpy.asarray",
"torch.from_numpy",
"torch.save",
"tensorflow.get_default_graph",
"codes.model.... | [((672, 694), 'codes.envs.utils.make_env', 'make_env', (['env_id', 'seed'], {}), '(env_id, seed)\n', (680, 694), False, 'from codes.envs.utils import make_env\n'), ((3029, 3062), 'torch.save', 'torch.save', (['state_dict', 'file_name'], {}), '(state_dict, file_name)\n', (3039, 3062), False, 'import torch\n'), ((3442, 3498), 'codes.envs.utils.make_env', 'make_env', (['config_dict.env.name', 'config_dict.general.seed'], {}), '(config_dict.env.name, config_dict.general.seed)\n', (3450, 3498), False, 'from codes.envs.utils import make_env\n'), ((3513, 3703), 'baselines.ppo1.mlp_policy.MlpPolicy', 'mlp_policy.MlpPolicy', ([], {'name': '"""pi"""', 'ob_space': 'env.observation_space', 'ac_space': 'env.action_space', 'hid_size': 'expert_policy_config.hidden_size', 'num_hid_layers': 'expert_policy_config.num_layers'}), "(name='pi', ob_space=env.observation_space, ac_space=\n env.action_space, hid_size=expert_policy_config.hidden_size,\n num_hid_layers=expert_policy_config.num_layers)\n", (3533, 3703), False, 'from baselines.ppo1 import mlp_policy\n'), ((529, 652), 'baselines.ppo1.mlp_policy.MlpPolicy', 'mlp_policy.MlpPolicy', ([], {'name': 'name', 'ob_space': 'ob_space', 'ac_space': 'ac_space', 'hid_size': 'hid_size', 'num_hid_layers': 'num_hid_layers'}), '(name=name, ob_space=ob_space, ac_space=ac_space,\n hid_size=hid_size, num_hid_layers=num_hid_layers)\n', (549, 652), False, 'from baselines.ppo1 import mlp_policy\n'), ((704, 735), 'baselines.common.tf_util.make_session', 'U.make_session', ([], {'num_cpu': 'num_cpu'}), '(num_cpu=num_cpu)\n', (718, 735), True, 'from baselines.common import tf_util as U\n'), ((834, 856), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (854, 856), True, 'import tensorflow as tf\n'), ((1035, 1048), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1046, 1048), False, 'from collections import OrderedDict\n'), ((1548, 1603), 'codes.model.expert_policy.utils.convert_tf_variable', 'convert_tf_variable', (['graph', 'sess', '"""pi/pol/final/bias:0"""'], {}), "(graph, sess, 'pi/pol/final/bias:0')\n", (1567, 1603), False, 'from codes.model.expert_policy.utils import convert_tf_variable\n'), ((1796, 1856), 'codes.model.expert_policy.utils.convert_tf_variable', 'convert_tf_variable', (['graph', 'sess', '"""pi/obfilter/runningsum:0"""'], {}), "(graph, sess, 'pi/obfilter/runningsum:0')\n", (1815, 1856), False, 'from codes.model.expert_policy.utils import convert_tf_variable\n'), ((1895, 1957), 'codes.model.expert_policy.utils.convert_tf_variable', 'convert_tf_variable', (['graph', 'sess', '"""pi/obfilter/runningsumsq:0"""'], {}), "(graph, sess, 'pi/obfilter/runningsumsq:0')\n", (1914, 1957), False, 'from codes.model.expert_policy.utils import convert_tf_variable\n'), ((3745, 3797), 'baselines.common.tf_util.make_session', 'U.make_session', ([], {'num_cpu': 'expert_policy_config.num_cpu'}), '(num_cpu=expert_policy_config.num_cpu)\n', (3759, 3797), True, 'from baselines.common import tf_util as U\n'), ((4634, 4647), 'torch.load', 'torch.load', (['f'], {}), '(f)\n', (4644, 4647), False, 'import torch\n'), ((1661, 1712), 'codes.model.expert_policy.utils.convert_tf_variable', 'convert_tf_variable', (['graph', 'sess', '"""pi/pol/logstd:0"""'], {}), "(graph, sess, 'pi/pol/logstd:0')\n", (1680, 1712), False, 'from codes.model.expert_policy.utils import convert_tf_variable\n'), ((2194, 2230), 'numpy.prod', 'np.prod', (['env.observation_space.shape'], {}), '(env.observation_space.shape)\n', (2201, 2230), True, 'import numpy as np\n'), ((2237, 2268), 'numpy.prod', 'np.prod', (['env.action_space.shape'], {}), '(env.action_space.shape)\n', (2244, 2268), True, 'import numpy as np\n'), ((3907, 3931), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (3929, 3931), True, 'import tensorflow as tf\n'), ((4340, 4376), 'numpy.prod', 'np.prod', (['env.observation_space.shape'], {}), '(env.observation_space.shape)\n', (4347, 4376), True, 'import numpy as np\n'), ((4391, 4422), 'numpy.prod', 'np.prod', (['env.action_space.shape'], {}), '(env.action_space.shape)\n', (4398, 4422), True, 'import numpy as np\n'), ((1452, 1509), 'codes.model.expert_policy.utils.convert_tf_variable', 'convert_tf_variable', (['graph', 'sess', '"""pi/pol/final/kernel:0"""'], {}), "(graph, sess, 'pi/pol/final/kernel:0')\n", (1471, 1509), False, 'from codes.model.expert_policy.utils import convert_tf_variable\n'), ((5205, 5232), 'numpy.asarray', 'np.asarray', (['observations_tf'], {}), '(observations_tf)\n', (5215, 5232), True, 'import numpy as np\n'), ((5235, 5262), 'numpy.asarray', 'np.asarray', (['observations_th'], {}), '(observations_th)\n', (5245, 5262), True, 'import numpy as np\n'), ((4885, 4914), 'torch.from_numpy', 'torch.from_numpy', (['observation'], {}), '(observation)\n', (4901, 4914), False, 'import torch\n')] |
from unittest import TestCase
from dexpy.simplex_centroid import build_simplex_centroid
from dexpy.eval import det_xtxi
from dexpy.model import make_quadratic_model
import numpy as np
import patsy
class TestSimplexCentroid(TestCase):
@classmethod
def test_d_optimality(cls):
answer_d = [ 2.513455e3, 2.197654e6, 5.52777e9,
1.85905e13, 3.447727e16, 1.275709e19 ]
actual_d = []
for i in range(3, 9):
design = build_simplex_centroid(i)
model = "-1 + " + make_quadratic_model(design.columns,
include_squared=False)
x_matrix = patsy.dmatrix(model,
design,
return_type="dataframe")
actual_d.append(det_xtxi(x_matrix, use_log=False))
np.testing.assert_allclose(answer_d, actual_d, rtol=1e-5)
| [
"numpy.testing.assert_allclose",
"dexpy.simplex_centroid.build_simplex_centroid",
"patsy.dmatrix",
"dexpy.eval.det_xtxi",
"dexpy.model.make_quadratic_model"
] | [((865, 923), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['answer_d', 'actual_d'], {'rtol': '(1e-05)'}), '(answer_d, actual_d, rtol=1e-05)\n', (891, 923), True, 'import numpy as np\n'), ((475, 500), 'dexpy.simplex_centroid.build_simplex_centroid', 'build_simplex_centroid', (['i'], {}), '(i)\n', (497, 500), False, 'from dexpy.simplex_centroid import build_simplex_centroid\n'), ((665, 718), 'patsy.dmatrix', 'patsy.dmatrix', (['model', 'design'], {'return_type': '"""dataframe"""'}), "(model, design, return_type='dataframe')\n", (678, 718), False, 'import patsy\n'), ((531, 590), 'dexpy.model.make_quadratic_model', 'make_quadratic_model', (['design.columns'], {'include_squared': '(False)'}), '(design.columns, include_squared=False)\n', (551, 590), False, 'from dexpy.model import make_quadratic_model\n'), ((821, 854), 'dexpy.eval.det_xtxi', 'det_xtxi', (['x_matrix'], {'use_log': '(False)'}), '(x_matrix, use_log=False)\n', (829, 854), False, 'from dexpy.eval import det_xtxi\n')] |
"""
This script define the helper function for the agent to use
"""
import numpy as np
from collections import deque
import torch.nn as nn
import cv2
from src.params import *
# from params import *
def init_weight(layers):
for layer in layers:
if type(layer) == nn.Conv2d or type(layer)==nn.Linear:
nn.init.xavier_uniform_(layer.weight) # initialize weights
nn.init.constant_(layer.bias,0) # bias set to 0
elif type(layer) == nn.LSTMCell:
nn.init.constant_(layer.bias_ih,0)
nn.init.constant_(layer.bias_hh,0)
def preprocess(frame):
if frame is not None:
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY) # convert to grey scale to improve trainning speed
frame = cv2.resize(frame,(84,84))[None,:,:]/255.
return frame
else:
return np.zeros((1,84,84)) | [
"torch.nn.init.constant_",
"torch.nn.init.xavier_uniform_",
"numpy.zeros",
"cv2.cvtColor",
"cv2.resize"
] | [((651, 690), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2GRAY'], {}), '(frame, cv2.COLOR_RGB2GRAY)\n', (663, 690), False, 'import cv2\n'), ((846, 867), 'numpy.zeros', 'np.zeros', (['(1, 84, 84)'], {}), '((1, 84, 84))\n', (854, 867), True, 'import numpy as np\n'), ((327, 364), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['layer.weight'], {}), '(layer.weight)\n', (350, 364), True, 'import torch.nn as nn\n'), ((398, 430), 'torch.nn.init.constant_', 'nn.init.constant_', (['layer.bias', '(0)'], {}), '(layer.bias, 0)\n', (415, 430), True, 'import torch.nn as nn\n'), ((502, 537), 'torch.nn.init.constant_', 'nn.init.constant_', (['layer.bias_ih', '(0)'], {}), '(layer.bias_ih, 0)\n', (519, 537), True, 'import torch.nn as nn\n'), ((549, 584), 'torch.nn.init.constant_', 'nn.init.constant_', (['layer.bias_hh', '(0)'], {}), '(layer.bias_hh, 0)\n', (566, 584), True, 'import torch.nn as nn\n'), ((759, 786), 'cv2.resize', 'cv2.resize', (['frame', '(84, 84)'], {}), '(frame, (84, 84))\n', (769, 786), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
"""
:mod:`channel.worker` -- Multi-device sync API for a single computation device
==============================================================================
.. module:: worker
:platform: Unix
:synopsis: Provide methods for single device Theano code that enable
homogeneous operations across multiple devices.
Contains :class:`Worker` which provides Platoon's basic API for multi-device
operations. Upon creation, a Worker will initiate connections with its node's
:class:`Controller` process (ZMQ) and get access to intra-node lock. A worker
process is meant to have only one Worker instance to manage a corresponding
computation device, e.g. GPU. Thus, Worker is a singleton class.
Worker's available API depends on available backend frameworks. Currently, there
are two ways to use a Worker for global operations on parameters:
1. Through :meth:`Worker.sync_params`, which is its default interface.
2. Or :meth:`Worker.all_reduce` which is a multi-node/GPU collective
operation.
For detailed information about these methods please check their corresponding
documentation, as well as the brief table which compares the two in project's
:file:`README.md`.
Worker also has :meth:`Worker.recv_mb` interface for collecting mini-batches to
work on from Controller.
"""
from __future__ import absolute_import, print_function
import argparse
import os
import sys
import signal
import base64
import numpy
import posix_ipc
import six
import zmq
try:
import pygpu
from pygpu import collectives as gpucoll
from theano import gpuarray as theanoga
from theano import config as theanoconf
except ImportError:
pygpu = None
from ..util import (mmap, PlatoonError, PlatoonWarning, SingletonType)
if six.PY3:
buffer_ = memoryview
else:
buffer_ = buffer # noqa
@six.add_metaclass(SingletonType)
class Worker(object):
"""
Interface for multi-device operations.
This class handles communication/synchronization with other processes.
The features to do so (control channel, mini-batch channel and shared
parameters) are all independent and optional so you don't have to use all
of them.
Parameters
----------
control_port : int
The tcp port number for control (ZMQ).
port : int, optional
The tcp port number for data (ZMQ).
socket_timeout : int, optional
Timeout in ms for both control and data sockets. Default: 5 min
hwm : int, optional
High water mark (see pyzmq docs) for data transfer.
Attributes
----------
shared_params : list of :class:`numpy.ndarray`
This will have `numpy.ndarray` in the same order as `params_descr`
(see :meth:`init_shared_params`). These arrays are backed by shared
memory. Used by :meth:`sync_params` interface.
shared_arrays : dict of str to :class:`numpy.ndarray`
Maps size in bytes to a ndarray in shared memory. Needed in multi-node
operations. Used by :meth:`all_reduce` interface.
"""
def __init__(self, control_port, data_port=None, socket_timeout=300000,
data_hwm=10, port=None):
if port is not None:
raise RuntimeError(
"The port parameter of Worker was renamed to data_port"
" (as in the Controller)")
self.context = zmq.Context()
self._socket_timeout = socket_timeout
self._worker_id = os.getpid()
if data_port:
self.init_mb_sock(data_port, data_hwm)
self._init_control_socket(control_port)
self._job_uid = self.send_req("platoon-get_job_uid")
print("JOB UID received from the controler {}".format(self._job_uid))
self._lock = posix_ipc.Semaphore("{}_lock".format(self._job_uid))
signal.signal(signal.SIGINT, self._handle_force_close)
try:
self._register_to_platoon()
except Exception as exc:
print(PlatoonWarning("Failed to register in a local GPU comm world.", exc),
file=sys.stderr)
print(PlatoonWarning("Platoon `all_reduce` interface will not be functional."),
file=sys.stderr)
self._local_comm = None
self._shmem_names = dict()
self._shmrefs = dict()
self.shared_arrays = dict()
################################################################################
# Basic Control Interface #
################################################################################
def send_req(self, req, info=None):
"""
Sends a control request to node's :class:`Controller`.
Parameters
----------
req : object
Json-encodable object (usually Python string) that represents the
type of request being sent to Controller.
info : object, optional
Json-encodable object used as input for this Worker's request to
Controller.
Returns
-------
object
Json-decoded object.
"""
query = {'worker_id': self._worker_id, 'req': req, 'req_info': info}
self.csocket.send_json(query)
socks = dict(self.cpoller.poll(self._socket_timeout))
if socks and socks.get(self.csocket) == zmq.POLLIN:
return self.csocket.recv_json()
else:
raise PlatoonError("Control Socket: recv timeout")
def lock(self, timeout=None):
"""
Acquire intra-node lock.
This is advisory and does not prevent concurrent access. This method
will subtracts 1 in underlying POSIX semaphore, will block the rest
calls at 0. The underlying semaphore, :attr:`_lock`, starts at 1.
Parameters
----------
timeout : int, optional
Amount of time to wait for the lock to be available. A timeout of 0
will raise an error immediately if the lock is not available.
Default: None, which will block until the lock is released.
.. versionchanged:: 0.6.0
This method used to be called `lock_params`.
"""
self._lock.acquire(timeout)
def unlock(self):
"""
Release intra-node lock.
The current implementation does not ensure that the process
that locked :attr:`shared_params` is also the one that unlocks them.
It also does not prevent one process from unlocking more than once
(which will allow more than one process to hold the lock). This method
will add 1 in underlying POSIX semaphore, :attr:`_lock`.
Make sure you follow proper lock/unlock logic in your program
to avoid these problems.
.. versionchanged:: 0.6.0
This method used to be called `unlock_params`.
"""
self._lock.release()
@property
def local_size(self):
"Number of workers assigned to local host's controller."
return self._local_size
@property
def local_rank(self):
"Worker's rank in respect to local host's controller (NCCL comm world)."
return self._local_rank
@property
def global_size(self):
"Number of workers spawned across all hosts in total."
return self._global_size
@property
def global_rank(self):
"Worker's rank in respect to all hosts' controllers in total."
return self._global_rank
################################################################################
# Initialization and Finalization Methods #
################################################################################
def _handle_force_close(self, signum, frame):
"""Handle SIGINT signals from Controller.
This is expected to happen when something abnormal has happened in other
workers which implies that training procedure should stop and fail.
.. versionadded:: 0.6.0
"""
self.close()
sys.exit(1) # Exit normally with non success value.
def close(self):
"""
Closes ZMQ connections, POSIX semaphores and shared memory.
"""
print("Closing connections and unlinking memory...", file=sys.stderr)
if hasattr(self, 'asocket'):
self.asocket.close()
if hasattr(self, 'csocket'):
self.csocket.close()
self.context.term()
self._lock.close()
try:
self._lock.unlink()
except posix_ipc.ExistentialError:
pass
if hasattr(self, '_shmref'):
try:
self._shmref.unlink()
except posix_ipc.ExistentialError:
pass
for shmref in self._shmrefs.values():
try:
shmref.unlink()
except posix_ipc.ExistentialError:
pass
def _register_to_platoon(self):
"""
Asks Controller for configuration information and creates a NCCL
communicator that participate in the local node's workers world.
For this it is needed that Theano is imported. Through Theano, this
methods gets access to the single GPU context of this worker process.
This context is to be used in all computations done by a worker's
process.
.. note::
It is necessary that this initialization method is called
successfully before :meth:`all_reduce` in order to be available
and functional.
.. versionadded:: 0.6.0
"""
if pygpu:
self.ctx_name = None
self.gpuctx = theanoga.get_context(self.ctx_name)
self.device = theanoconf.device
self._local_id = gpucoll.GpuCommCliqueId(context=self.gpuctx)
# Ask controller for local's info to participate in
lid = base64.b64encode(self._local_id.comm_id).decode('ascii')
response = self.send_req("platoon-get_platoon_info",
info={'device': self.device,
'local_id': lid})
nlid = base64.b64decode(response['local_id'].encode('ascii'))
self._local_id.comm_id = bytearray(nlid)
self._local_size = response['local_size']
self._local_rank = response['local_rank']
self._local_comm = gpucoll.GpuComm(self._local_id,
self._local_size,
self._local_rank)
self._multinode = response['multinode']
self._global_size = response['global_size']
self._global_rank = response['global_rank']
else:
raise AttributeError("pygpu or theano is not imported")
def init_mb_sock(self, port, data_hwm=10):
"""
Initialize the mini-batch data socket.
Parameters
----------
port : int
The tcp port to reach the mini-batch server on.
data_hwm : int, optional
High water mark, see pyzmq docs.
.. note::
This must be called before using :meth:`recv_mb`.
"""
self.asocket = self.context.socket(zmq.PULL)
self.asocket.setsockopt(zmq.LINGER, 0)
self.asocket.set_hwm(data_hwm)
self.asocket.connect("tcp://localhost:{}".format(port))
self.apoller = zmq.Poller()
self.apoller.register(self.asocket, zmq.POLLIN)
def _init_control_socket(self, port):
"""
Initialize control socket.
Parameters
---------
port : int
The tcp port where the control master is listening at.
.. note::
This must be called before using :meth:`send_req`.
"""
self.csocket = self.context.socket(zmq.REQ)
self.csocket.setsockopt(zmq.LINGER, 0)
self.csocket.connect('tcp://localhost:{}'.format(port))
self.cpoller = zmq.Poller()
self.cpoller.register(self.csocket, zmq.POLLIN)
################################################################################
# Collectives Interface #
################################################################################
def shared(self, array):
"""Creates a new POSIX shared memory buffer to be shared among Workers
and their Controller and maps the size of `array` to that buffer.
Controller is requested to create a new shared memory buffer with the
same size as `array` in order to be used in multi-GPU/node Platoon
collective operations through :meth:`all_reduce` interface. All
participants in the same node have access to that memory.
:param array: This array's size in bytes will be mapped to a shared
memory buffer in host with the same size.
:type array: :ref:`pygpu.gpuarray.GpuArray`
Returns
-------
shared_array : :ref:`numpy.ndarray`
A newly created shared memory buffer with the same size or an already
allocated one.
Notes
-----
*For internal implementation*: There should probably be a barrier across
nodes' Workers to ensure that, so far, each Controller has serviced
a new shared memory's name to all Workers. This is due to the fact that
Controller can service one Worker at a time and a Platoon collective
service is a blocking one across Controllers. Current implementation
is valid because calls to `pygpu.collectives` interface are synchronous
across workers.
.. versionadded:: 0.6.0
"""
if not isinstance(array, pygpu.gpuarray.GpuArray):
raise TypeError("`array` input is not pygpu.gpuarray.GpuArray.")
# This is not a problem, unless we have concurrent calls in
# :meth:`all_reduce` in the same worker-process and we are running in
# multi-node. This due to the fact that :attr:`shared_arrays` are being
# used as temporary buffers for the internal inter-node MPI collective
# operation. We only need a shared buffer with Controller in order to
# execute multi-node operation, so a mapping with size in bytes
# suffices. See:
# https://github.com/mila-udem/platoon/pull/66#discussion_r74988680
bytesize = array.size * array.itemsize
if bytesize in self.shared_arrays:
return self.shared_arrays[bytesize]
else:
if array.flags['F']:
order = 'F'
else:
order = 'C'
try:
shared_mem_name = self.send_req("platoon-init_new_shmem",
info={'size': bytesize})
shmref = posix_ipc.SharedMemory(shared_mem_name)
shm = mmap(fd=shmref.fd, length=bytesize)
shmref.close_fd()
except Exception as exc:
try:
shmref.unlink()
except (NameError, posix_ipc.ExistentialError):
pass
raise PlatoonError("Failed to get access to shared memory buffer.", exc)
shared_array = numpy.ndarray(array.shape, dtype=array.dtype,
buffer=shm, offset=0, order=order)
self._shmem_names[bytesize] = shared_mem_name # Keep for common ref with Controller
self._shmrefs[bytesize] = shmref # Keep for unlinking when closing
self.shared_arrays[bytesize] = shared_array
return shared_array
def all_reduce(self, src, op, dest=None):
"""
AllReduce collective operation for workers in a multi-node/GPU Platoon.
Parameters
----------
src : :ref:`pygpu.gpuarray.GpuArray`
Array to be reduced.
op : str
Reference name to reduce operation type.
See :ref:`pygpu.collectives.TO_RED_OP`.
dest : :ref:`pygpu.gpuarray.GpuArray`, optional
Array to collect reduce operation result.
Returns
-------
result: None or :ref:`pygpu.gpuarray.GpuArray`
New Theano gpu shared variable which contains operation result
if `dest` is None, else nothing.
.. warning::
Repeated unnecessary calls with no `dest`, where a logically valid
pygpu GpuArray exists, should be avoided for optimal performance.
.. versionadded:: 0.6.0
"""
if self._local_comm is None:
raise PlatoonError("`all_reduce` interface is not available. Check log.")
if not isinstance(src, pygpu.gpuarray.GpuArray):
raise TypeError("`src` input is not pygpu.gpuarray.GpuArray.")
if dest is not None:
if not isinstance(dest, pygpu.gpuarray.GpuArray):
raise TypeError("`dest` input is not pygpu.gpuarray.GpuArray.")
try:
# Execute collective operation in local NCCL communicator world
res = self._local_comm.all_reduce(src, op, dest)
except Exception as exc:
raise PlatoonError("Failed to execute pygpu all_reduce", exc)
if dest is not None:
res = dest
res.sync()
# If running with multi-node mode
if self._multinode:
# Create new shared buffer which corresponds to result GpuArray buffer
res_array = self.shared(res)
self.lock()
first = self.send_req("platoon-am_i_first")
if first:
# Copy from GpuArray to shared memory buffer
res.read(res_array)
res.sync()
# Request from controller to perform the same collective operation
# in MPI communicator world using shared memory buffer
self.send_req("platoon-all_reduce", info={'shmem': self._shmem_names[res.size * res.itemsize],
'dtype': str(res.dtype),
'op': op})
self.unlock()
# Concurrently copy from shared memory back to result GpuArray
# after Controller has finished global collective operation
res.write(res_array)
res.sync()
if dest is None:
return res
################################################################################
# Param Sync Interface #
################################################################################
def _get_descr_size(self, dtype, shape):
size = dtype.itemsize
for s in shape:
size *= s
return size
def init_shared_params(self, params, param_sync_rule):
"""
Initialize shared memory parameters.
This must be called before accessing the params attribute
and/or calling :meth:`sync_params`.
Parameters
----------
params : list of :ref:`theano.compile.SharedVariable`
Theano shared variables representing the weights of your model.
param_sync_rule : :class:`param_sync.ParamSyncRule`
Update rule for the parameters
"""
self.update_fn = param_sync_rule.make_update_function(params)
self.local_params = params
params_descr = [(numpy.dtype(p.dtype), p.get_value(borrow=True).shape)
for p in params]
params_size = sum(self._get_descr_size(*d) for d in params_descr)
shared_mem_name = "{}_params".format(self._job_uid)
# Acquire lock to decide who will init the shared memory
self.lock()
need_init = self.send_req("platoon-need_init")
if need_init:
# The ExistentialError is apparently the only way to verify
# if the shared_memory exists.
try:
posix_ipc.unlink_shared_memory(shared_mem_name)
except posix_ipc.ExistentialError:
pass
self._shmref = posix_ipc.SharedMemory(shared_mem_name,
posix_ipc.O_CREAT,
size=params_size)
else:
self._shmref = posix_ipc.SharedMemory(shared_mem_name)
self._shm = mmap(fd=self._shmref.fd, length=params_size)
self._shmref.close_fd()
self.shared_params = []
off = 0
for dtype, shape in params_descr:
self.shared_params.append(numpy.ndarray(shape, dtype=dtype,
buffer=self._shm,
offset=off))
off += self._get_descr_size(dtype, shape)
if need_init:
self.copy_to_global(synchronous=False)
self.unlock()
def sync_params(self, synchronous=True):
"""
Update the worker's parameters and the central parameters according
to the provided parameter update rule.
Parameters
----------
synchronous : bool
If false, the lock won't be acquired before touching the
shared weights.
"""
if synchronous:
self.lock()
self.update_fn(self.shared_params)
if synchronous:
self.unlock()
def copy_to_local(self, synchronous=True):
"""
Copy the global params to the local ones.
Parameters
----------
synchronous : bool
If False, the lock won't be acquired before touching the
shared weights.
"""
if synchronous:
self.lock()
for p, v in zip(self.local_params, self.shared_params):
p.set_value(v)
if synchronous:
self.unlock()
def copy_to_global(self, synchronous=True):
"""
Copy the global params to the local ones.
Parameters
----------
synchronous : bool
If False, the lock won't be acquired before touching the
shared weights.
"""
if synchronous:
self.lock()
for p, v in zip(self.local_params, self.shared_params):
v[:] = p.get_value(borrow=True)
if synchronous:
self.unlock()
################################################################################
# Distribute Data Batches #
################################################################################
def recv_mb(self):
"""
Receive a mini-batch for processing.
A mini-batch is composed of a number of numpy arrays.
Returns
-------
list
The list of numpy arrays for the mini-batch
"""
socks = dict(self.apoller.poll(self._socket_timeout))
if socks:
if socks.get(self.asocket) == zmq.POLLIN:
headers = self.asocket.recv_json()
else:
raise Exception("Batch socket: recv timeout")
arrays = []
for header in headers:
data = self.asocket.recv(copy=False)
buf = buffer_(data)
array = numpy.ndarray(
buffer=buf, shape=header['shape'],
dtype=numpy.dtype(header['descr']),
order='F' if header['fortran_order'] else 'C')
arrays.append(array)
return arrays
@staticmethod
def default_parser():
"""
Returns base :class:`Controller`'s class parser for its arguments.
This parser can be augmented with more arguments, if it is needed, in
case a class which inherits :class:`Controller` exists.
.. versionadded:: 0.6.1
"""
parser = argparse.ArgumentParser(
description="Base Platoon Worker process.")
parser.add_argument('--control-port', default=5567, type=int, required=False, help='The control port number.')
parser.add_argument('--data-port', type=int, required=False, help='The data port number.')
parser.add_argument('--data-hwm', default=10, type=int, required=False, help='The data port high water mark')
return parser
@staticmethod
def default_arguments(args):
"""
Static method which returns the correct arguments for a base
:class:`Controller` class.
:param args:
Object returned by calling :meth:`argparse.ArgumentParser.parse_args`
to a parser returned by :func:`default_parser`.
.. versionadded:: 0.6.0
"""
DEFAULT_KEYS = ['control_port', 'data_hwm', 'data_port']
d = args.__dict__
return dict((k, d[k]) for k in six.iterkeys(d) if k in DEFAULT_KEYS)
| [
"signal.signal",
"pygpu.collectives.GpuCommCliqueId",
"posix_ipc.unlink_shared_memory",
"argparse.ArgumentParser",
"six.add_metaclass",
"base64.b64encode",
"posix_ipc.SharedMemory",
"pygpu.collectives.GpuComm",
"theano.gpuarray.get_context",
"zmq.Poller",
"numpy.ndarray",
"os.getpid",
"sys.e... | [((1847, 1879), 'six.add_metaclass', 'six.add_metaclass', (['SingletonType'], {}), '(SingletonType)\n', (1864, 1879), False, 'import six\n'), ((3355, 3368), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (3366, 3368), False, 'import zmq\n'), ((3443, 3454), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3452, 3454), False, 'import os\n'), ((3801, 3855), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'self._handle_force_close'], {}), '(signal.SIGINT, self._handle_force_close)\n', (3814, 3855), False, 'import signal\n'), ((8031, 8042), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8039, 8042), False, 'import sys\n'), ((11433, 11445), 'zmq.Poller', 'zmq.Poller', ([], {}), '()\n', (11443, 11445), False, 'import zmq\n'), ((11995, 12007), 'zmq.Poller', 'zmq.Poller', ([], {}), '()\n', (12005, 12007), False, 'import zmq\n'), ((23963, 24030), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Base Platoon Worker process."""'}), "(description='Base Platoon Worker process.')\n", (23986, 24030), False, 'import argparse\n'), ((9653, 9688), 'theano.gpuarray.get_context', 'theanoga.get_context', (['self.ctx_name'], {}), '(self.ctx_name)\n', (9673, 9688), True, 'from theano import gpuarray as theanoga\n'), ((9762, 9806), 'pygpu.collectives.GpuCommCliqueId', 'gpucoll.GpuCommCliqueId', ([], {'context': 'self.gpuctx'}), '(context=self.gpuctx)\n', (9785, 9806), True, 'from pygpu import collectives as gpucoll\n'), ((10404, 10471), 'pygpu.collectives.GpuComm', 'gpucoll.GpuComm', (['self._local_id', 'self._local_size', 'self._local_rank'], {}), '(self._local_id, self._local_size, self._local_rank)\n', (10419, 10471), True, 'from pygpu import collectives as gpucoll\n'), ((15305, 15390), 'numpy.ndarray', 'numpy.ndarray', (['array.shape'], {'dtype': 'array.dtype', 'buffer': 'shm', 'offset': '(0)', 'order': 'order'}), '(array.shape, dtype=array.dtype, buffer=shm, offset=0, order=order\n )\n', (15318, 15390), False, 'import numpy\n'), ((20204, 20280), 'posix_ipc.SharedMemory', 'posix_ipc.SharedMemory', (['shared_mem_name', 'posix_ipc.O_CREAT'], {'size': 'params_size'}), '(shared_mem_name, posix_ipc.O_CREAT, size=params_size)\n', (20226, 20280), False, 'import posix_ipc\n'), ((20422, 20461), 'posix_ipc.SharedMemory', 'posix_ipc.SharedMemory', (['shared_mem_name'], {}), '(shared_mem_name)\n', (20444, 20461), False, 'import posix_ipc\n'), ((14874, 14913), 'posix_ipc.SharedMemory', 'posix_ipc.SharedMemory', (['shared_mem_name'], {}), '(shared_mem_name)\n', (14896, 14913), False, 'import posix_ipc\n'), ((19518, 19538), 'numpy.dtype', 'numpy.dtype', (['p.dtype'], {}), '(p.dtype)\n', (19529, 19538), False, 'import numpy\n'), ((20060, 20107), 'posix_ipc.unlink_shared_memory', 'posix_ipc.unlink_shared_memory', (['shared_mem_name'], {}), '(shared_mem_name)\n', (20090, 20107), False, 'import posix_ipc\n'), ((20689, 20752), 'numpy.ndarray', 'numpy.ndarray', (['shape'], {'dtype': 'dtype', 'buffer': 'self._shm', 'offset': 'off'}), '(shape, dtype=dtype, buffer=self._shm, offset=off)\n', (20702, 20752), False, 'import numpy\n'), ((9889, 9929), 'base64.b64encode', 'base64.b64encode', (['self._local_id.comm_id'], {}), '(self._local_id.comm_id)\n', (9905, 9929), False, 'import base64\n'), ((23477, 23505), 'numpy.dtype', 'numpy.dtype', (["header['descr']"], {}), "(header['descr'])\n", (23488, 23505), False, 'import numpy\n'), ((24908, 24923), 'six.iterkeys', 'six.iterkeys', (['d'], {}), '(d)\n', (24920, 24923), False, 'import six\n')] |
import matplotlib.pyplot as plt
import numpy as np
import statistics as st
from scipy import stats
data = [[73,65,70],[64,61,67],[72,63,66],[58,71,56],[54,70,61],[57,48,56]]
Averaget = []
for i in data:
Averaget.append(st.mean(i))
#define global vars
eoverD = 0.0063
vis = 0.001002 # Pa * s
D = float(0.0127) #m
rho = 1000 #kg/m^3
vol = 5 * 10**-4 #m^3
height = [1,1.5,2,2.5,3,3.5]
heightM = []
for i in height:
heightM.append(i * 0.3048)
#define Re
def calcRe(v,vis,D,rho):
return (D*v*rho)/vis
#calc averageV
areaXS = 3.1415 * (D/2)**2
def flow_rate(vol,t):
return vol/t
def calcV(flow,area):
if area > 0:
return flow/area #m/s
averageV = []
for i in Averaget:
temp_rate = flow_rate(vol,i)
temp_v = calcV(temp_rate,areaXS)
averageV.append(temp_v)
#define the function of Re for friction factor (13-15a)
def ff_turb(Re,eoverD):
inside = 6.9/Re + (eoverD*1/3.7)**(10/9)
out = -3.6 * np.log10(inside)
#recip and square to get ff
sq = 1/out
return sq**2
#find the Re of the data
Redata = []
for i in averageV:
Redata.append(calcRe(i,vis,D,rho))
#plot the data
minV = min(averageV)
maxV = max(averageV)
minRe = calcRe(minV,vis,D,rho)
maxRe = calcRe(maxV,vis,D,rho)
#calc ff for data
ffdata = []
for i in Redata:
ffdata.append(ff_turb(i,eoverD))
X = np.linspace(minRe,maxRe,100)
Y = []
for i in X:
Y.append(ff_turb(i,eoverD))
X_tick = np.linspace(minRe,maxRe, 3)
plt.plot(X,16/X)
plt.plot(X,Y, label = r'$Formula$')
plt.plot(Redata,ffdata, label = r'$Data$', marker = 'o')
plt.title(r'$f_f \: compared \: to \: Re$')
plt.xlabel(r'$Re$')
plt.xticks(np.arange(minRe,maxRe, step = 25))
plt.ylabel(r'$f_f$')
plt.legend()
plt.show()
| [
"statistics.mean",
"numpy.log10",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((1372, 1402), 'numpy.linspace', 'np.linspace', (['minRe', 'maxRe', '(100)'], {}), '(minRe, maxRe, 100)\n', (1383, 1402), True, 'import numpy as np\n'), ((1462, 1490), 'numpy.linspace', 'np.linspace', (['minRe', 'maxRe', '(3)'], {}), '(minRe, maxRe, 3)\n', (1473, 1490), True, 'import numpy as np\n'), ((1491, 1510), 'matplotlib.pyplot.plot', 'plt.plot', (['X', '(16 / X)'], {}), '(X, 16 / X)\n', (1499, 1510), True, 'import matplotlib.pyplot as plt\n'), ((1508, 1541), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {'label': '"""$Formula$"""'}), "(X, Y, label='$Formula$')\n", (1516, 1541), True, 'import matplotlib.pyplot as plt\n'), ((1544, 1596), 'matplotlib.pyplot.plot', 'plt.plot', (['Redata', 'ffdata'], {'label': '"""$Data$"""', 'marker': '"""o"""'}), "(Redata, ffdata, label='$Data$', marker='o')\n", (1552, 1596), True, 'import matplotlib.pyplot as plt\n'), ((1601, 1646), 'matplotlib.pyplot.title', 'plt.title', (['"""$f_f \\\\: compared \\\\: to \\\\: Re$"""'], {}), "('$f_f \\\\: compared \\\\: to \\\\: Re$')\n", (1610, 1646), True, 'import matplotlib.pyplot as plt\n'), ((1645, 1663), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$Re$"""'], {}), "('$Re$')\n", (1655, 1663), True, 'import matplotlib.pyplot as plt\n'), ((1711, 1730), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$f_f$"""'], {}), "('$f_f$')\n", (1721, 1730), True, 'import matplotlib.pyplot as plt\n'), ((1732, 1744), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1742, 1744), True, 'import matplotlib.pyplot as plt\n'), ((1745, 1755), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1753, 1755), True, 'import matplotlib.pyplot as plt\n'), ((1676, 1708), 'numpy.arange', 'np.arange', (['minRe', 'maxRe'], {'step': '(25)'}), '(minRe, maxRe, step=25)\n', (1685, 1708), True, 'import numpy as np\n'), ((225, 235), 'statistics.mean', 'st.mean', (['i'], {}), '(i)\n', (232, 235), True, 'import statistics as st\n'), ((968, 984), 'numpy.log10', 'np.log10', (['inside'], {}), '(inside)\n', (976, 984), True, 'import numpy as np\n')] |
import os
from fnmatch import fnmatch
import pickle
# General Processing
import numpy as np
import pandas as pd
import collections
# DECOMPOSITION
from sklearn.decomposition import NMF
from scipy.linalg import svd
# NLU
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_watson import NaturalLanguageUnderstandingV1 as NLUV1
# from ibm_watson.natural_language_understanding_v1 import \
# Features, ConceptsOptions, EntitiesOptions, KeywordsOptions
# Presentation / apps
import seaborn as sns
# GENERAL FUNCTIONS
# SELECTION
def random_split(lst, split=0.5):
shuffled = np.array(lst)
np.random.shuffle(shuffled)
split = int(split * len(shuffled))
return shuffled[-split:], shuffled[:-split]
# NORMALIZATION
def norm_stat(vec, weights=False):
'''
Normalizes a vector v-v.mean())/v.std()
'''
if weights:
return np.mean(abs(vec - vec.mean()))
return (vec-vec.mean())/vec.std()
# Algebraic normalization - dot product
def norm_dot(vec, weights=False):
'''
Normalizes a vector - dot product: v @ v = 1
'''
if weights:
return np.sqrt(vec @ vec)
return vec / np.sqrt(vec @ vec)
# Algebraic normalization - dot product
def norm_sum(vec, weights=False):
'''
Normalizes a vector - sum: v.sum = 1
'''
if weights:
return vec.sum()
return vec / vec.sum()
# Scaled Normalization -
def scale(vec, weights=False):
'''
Normalizes a vector: v.min = 0, v.max = 1
'''
stop_divide_by_zero = 0.00000001
if weights:
return (vec.max()-vec.min() + stop_divide_by_zero)
return (vec-vec.min())/(vec.max()-vec.min() + stop_divide_by_zero)
def cleanup_chars(string, char_list=('\n', ' ')):
result = string
for char in char_list:
result = result.replace(char, '')
return result
# Matrix dot product
def dotdf(df1, df2):
'''
performs df1 @ df2 without exceptions, when df1.columns and df2.index
are not identical
'''
c = set(df1.columns)
i = set(df2.index)
var = list(c - (c - i))
return df1[var] @ df2.loc[var]
# OS system commands
def ls(search, name_only=False, cos=None):
'''
emulates unix ls (without flags). Accepts wildcard/'*'
'''
search_split = search.replace('/', '/ ').split()
pattern = search_split[-1]
path = ''.join(search_split[:-1])
if cos is None:
# look in filesystem
# numpy array enables Boolean Mask
all_names = np.array(os.listdir(path))
else:
# look in cloud object store
all_names = np.array(cos.get_bucket_contents())
if not name_only and cos is None:
# add path to each name
all_names = np.array([path+name for name in all_names])
mask = [fnmatch(name, pattern) for name in all_names]
result = all_names[mask]
return result
# MATRIX-FACTORIZATION: DIMENSIONALITY REDUCTION & ARCHETYPING
# CLUSTER FEATURES INTO OCCUPATION CATEGORIES
# Use non-zero matrix factorization for clustering
# Use singular value decomposition first state for determining overall
# similarity
class Archetypes:
'''
Archetypes: Performs NMF of order n on X and stores the result as
attributes.
Archetypes are normalized: cosine similarity a(i) @ a(i) = 1.
Atributes:
my_archetypes.n - order / number of archetypes
my_archetypes.X - input matrix
my_archetypes.model - NMF model
my_archetypes.w - NMF w-matrix
my_archetypes.h - NMF h-matrix
my_archetypes.f - features x archetypes matrix (from h-matrix)
my_archetypes.fn - Dot-Normalized archetypes
my_archetypes.o - documents x archetypes matrix (from w-matrix)
my_archetypes.on - Sum-Normalized documents
'''
def __init__(self, X, n,
norm=norm_dot,
bootstrap=False, bootstrap_frac=0.5,
random_state=None):
self.n = n
self.X = X
self.norm = norm
self.random_state = random_state
if bootstrap:
self.bootstrap_n = bootstrap
self.bootstrap_frac = bootstrap_frac
else:
self.bootstrap_n = 1
self.bootstrap_frac = 1
self.model = NMF(
n_components=n,
init='random',
random_state=self.random_state,
max_iter=1000,
tol=0.0000001
)
self.w_dic = {}
self.o_dic = {}
self.h_dic = {}
self.f_dic = {}
for j in range(self.bootstrap_n):
XX = self.X.sample(int(len(self.X) * self.bootstrap_frac))
self.w_dic[j] = self.model.fit_transform(XX)
self.o_dic[j] = pd.DataFrame(self.w_dic[j], index=XX.index)
self.h_dic[j] = self.model.components_
self.f_dic[j] = pd.DataFrame(self.h_dic[j], columns=XX.columns)
self.w = self.w_dic[0] # TEMPORARY
self.o = self.o_dic[0] # TEMPORARY
self.h = self.h_dic[0] # TEMPORARY
self.f = self.f_dic[0] # TEMPORARY
self.fn = self.f.T.apply(norm_dot).T
self.on = self.o.T.apply(norm_sum).T
class Svd:
'''
Singular value decomposition-as-an-object
my_svd = Svd(X) returns
my_svd.u/.s/.vt – U S and VT from the Singular Value Decomposition
(see manual)
my_svd.f – Pandas.DataFrame: f=original features x svd_features
my_svd.o - Pandas.DataFrame: o=occupations x svd_features
my_svd.volume(keep_volume)
- collections.namedtuple ('dotted dicionary'):
Dimensionality reduction. keeps 'keep_volume' of
total variance
'''
def __init__(self, X):
self.u, self.s, self.vt = svd(np.array(X))
self.f = pd.DataFrame(self.vt, columns=X.columns)
self.o = pd.DataFrame(self.u, columns=X.index)
def volume(self, keep_volume):
'''
Dimensionality reduction, keeps 'keep_volume' proportion of
original variance
Type: collections.namedtuple ('dotted dictionary')
Examples of usage:
my_svd.volume(0.9).s - np.array: eigenvalues for 90% variance
my_svd.volume(0.8).f - dataframe: features for 80% variance
my_svd.volume(0.5).o - dataframe: occupations for 50% variance
'''
dotted_dic = collections.namedtuple('dotted_dic', 's f o')
a1 = self.s.cumsum()
a2 = a1/a1[-1]
n_max = np.argmin(np.square(a2 - keep_volume))
cut_dic = dotted_dic(
s=self.s[:n_max],
f=self.f.iloc[:n_max],
o=self.o.iloc[:n_max]
)
return cut_dic
class WatsonDocumentArchetypes:
'''
WatsonDocumentArchetypes performs Archetypal Analysis on a corpus
consisting of a set of documents, for example a set
of articles, books, news stories or medical dictations.
Input parameters:
PATH - Dictionary with paths to I/O
PATH['data'] - Directory for input text files.
Example: './data/input_texts/'
PATH['results'] - Directory for output.
Example: './data/output_nlu/'
NLU - Dictionary with information for running Watson NLU
NLU['apikey'] - apikey for running Watson NLU
NLU['apiurl'] - URL for Watson NLU API
NLU['version'] - Watson NLU version, e.g. '2019-07-12'
NLU['features'] - Features requested from Watson NLU for each
document in the set, e.g.
Features(
categories= CategoriesOptions(),
concepts = ConceptsOptions(),
entities = EntitiesOptions(),
keywords = KeywordsOptions(),
relations = RelationsOptions(),
syntax = SyntaxOptions()
)
Attributes:
self.PATH
'''
def __init__(self, PATH, NLU,
train_test=False,
random_state=None,
use_cloud_store=False):
from cloud_object_store import CloudObjectStore
self.PATH = PATH
self.NLU = NLU
self.random_state = random_state
# To random partition documents into train/test-sets,
# choose relative size of test-set, train_test (1 = 100%)
self.train_test = train_test
self.use_cloud_store = use_cloud_store
# Create clients to interface Watson and Cloud services
authenticator = IAMAuthenticator(NLU['apikey'])
self.nlu_model = NLUV1(
version=NLU['version'], authenticator=authenticator
)
self.nlu_model.set_service_url(NLU['apiurl'])
if self.use_cloud_store:
self.cos_dictations = CloudObjectStore(
PATH['dictation_bucket'],
PATH['cos_dictation_apikey'],
PATH['cos_dictation_crn'],
PATH['cos_dictation_endpoint']
)
self.cos_nlu = CloudObjectStore(
PATH['nlu_bucket'],
PATH['cos_nlu_apikey'],
PATH['cos_nlu_crn'],
PATH['cos_nlu_endpoint']
)
# Initiate X_matrix dictionaries
self.X_matrix_dic = {}
self.X_matrix_train_dic = {}
self.X_matrix_test_dic = {}
self.archetypes_dic = {}
self.svd_dic = {}
# PREPARE DATA
if self.use_cloud_store:
# load from cloud storage bucket
self.filenames = ls(
'*.txt', name_only=True, cos=self.cos_dictations
)
else:
# load from local file system
# all filenames ending with '.txt'
self.filenames = ls(self.PATH['data']+'*.txt', name_only=True)
self.names = [name.replace('.txt', '') for name in self.filenames]
# if train_test - self.names will be set to self.names_train
self.all_names = self.names * 1
# dictionary for dictation files
self.dictation_dic = {}
for name in self.filenames:
if (self.use_cloud_store):
self.dictation_dic[name.replace('.txt', '')] = \
self.cos_dictations.get_item(name).decode('utf-8')
else:
self.dictation_dic[name.replace('.txt', '')] = \
open(self.PATH['data']+name, encoding="utf-8").read()
self.dictation_df = pd.Series(self.dictation_dic)
# TRAIN-TEST SPLIT
if self.train_test:
# 0<train_test<1 - the proportion of names to save as 'test'
# (rounded downwards)
self.names_test, self.names_train = random_split(
self.all_names, self.train_test
)
self.names = self.names_train
# PERFORM WATSON NLU ANALYSIS
# IF DICTATION ALREADY HAS PKL WITH Watson NLU:
# READ EXISTING PKL. SKIP NEW WATSON CALC.
# Dictionary with Watson-NLU results for each dictation
self.watson = {}
if self.use_cloud_store:
# Check in Cloud storage bucket
self.watson_pkl = 'all_dictations_nlu.pkl'
pkl_exists = self.watson_pkl in self.cos_nlu.get_bucket_contents()
else:
# Check in local filesystem
self.watson_pkl = PATH['results']+'all_dictations_nlu.pkl'
pkl_exists = os.path.exists(self.watson_pkl)
if pkl_exists:
if self.use_cloud_store:
# load previous result from Cloud storage
self.watson = pickle.loads(
self.cos_nlu.get_item(self.watson_pkl)
)
else:
# load previous result from local filesystem
self.watson = pickle.load(open(self.watson_pkl, "rb"))
else:
# perform nlu-analysis on dictations
for item in list(self.dictation_dic.items()):
lbl = item[0]
text = item[1]
self.watson[lbl] = self.nlu_model.analyze(
text=text, features=NLU['features']
)
if self.use_cloud_store:
# save result to Cloud storage
self.cos_nlu.create_item(
str(lbl)+'_nlu.pkl',
pickle.dumps(self.watson[lbl])
)
else:
# save result to local filesystem
f = open(PATH['results']+str(lbl)+'_nlu.pkl', 'wb')
pickle.dump(self.watson[lbl], f)
f.close()
if self.use_cloud_store:
# save result to Cloud storage
self.cos_nlu.create_item(
self.watson_pkl, pickle.dumps(self.watson)
)
else:
f = open(self.watson_pkl, 'wb')
pickle.dump(self.watson, f)
f.close()
# Copy Watson NLU results to Pandas Dataframes
self.watson_nlu = {}
for dctn in self.watson.items():
self.watson_nlu[dctn[0]] = {}
for item in list(dctn[1].result.items()):
self.watson_nlu[dctn[0]][item[0]] = \
pd.DataFrame(list(item[1]))
# ARCHETYPAL ANALYSIS
# CONSTRUCT X- MATRIX
def X_matrix(self, typ='entities'):
'''
Construct the archetypal analysis X-matrix by pivoting the dataframe
in the dictionary my_wda.watson_nlu that contains the Watson NLU
analysis in question.
X_matrix(typ)
rows : Dictations
columns: Variables; keywords/entities/concepts, from Watson NLU
analysis
values : Weights, from Watson NLU analysis
The constructed X_matrix(typ) is saved as X_matrix_dic[typ]
If my_wda.train_test has a value (not False), X_matrix_train_dic[typ]
and X_matrix_test[typ] are added computed and added to their
respective dicionaries.
'''
if typ not in self.X_matrix_dic.keys():
df = pd.DataFrame()
for key in self.names:
dfx = self.watson_nlu[key][typ].copy()
dfx['dictation'] = key
df = df.append(dfx, sort=True)
if typ == 'entities':
df = df[df['type'] == 'HealthCondition']
df.rename({'relevance': 'rel0'}, axis=1, inplace=True)
df['relevance'] = df['rel0'] * df['confidence']
self.X_matrix_dic[typ] = df.pivot_table(
index='dictation', columns='text', values='relevance'
).fillna(0)
if self.train_test:
self.X_matrix_train_dic[typ] = self.X_matrix_dic[typ]
df = pd.DataFrame()
for key in self.names_test:
dfx = self.watson_nlu[key][typ].copy()
dfx['dictation'] = key
df = df.append(dfx, sort=True)
if typ == 'entities':
df = df[df['type'] == 'HealthCondition']
df.rename({'relevance': 'rel0'}, axis=1, inplace=True)
df['relevance'] = df['rel0'] * df['confidence']
self.X_matrix_test_dic[typ] = df.pivot_table(
index='dictation', columns='text', values='relevance'
).fillna(0)
return self.X_matrix_dic[typ]
# CALCULATE ARCHETYPES
def archetypes(self, typ='entities',
n_archs=6, bootstrap=False,
bootstrap_frac=0.5,
random_state=False,
norm=norm_sum):
if random_state is False:
random_state = self.random_state
if typ not in self.archetypes_dic.keys():
self.archetypes_dic[typ] = {}
hyperparam = (n_archs, bootstrap, bootstrap_frac, random_state, norm)
self.X_matrix(typ)
self.archetypes_dic[typ][hyperparam] = Archetypes(
self.X_matrix(typ), n_archs, bootstrap=bootstrap,
bootstrap_frac=bootstrap_frac, random_state=random_state,
norm=norm
)
return self.archetypes_dic[typ][hyperparam]
def display_archetype(self, arch_nr=-1, typ='entities',
n_archs=6, var='variables',
threshold=0.10, norm=scale):
fun = {
'variables': 'self.archetypes(typ = typ,n_archs = n_archs).f.T ',
'dictations': 'self.archetypes(typ = typ,n_archs = n_archs).o'
}
f = eval(fun[var])
fn = f.apply(norm)
if arch_nr == -1:
return sns.clustermap(f).data2d
else:
arc = fn.sort_values(by=arch_nr, ascending=False)
# normalized over sum: threshold is ignored volume
if norm is norm_sum:
arc_cs = arc[arch_nr].cumsum()
thresh_idx = abs(arc_cs - (1 - threshold)).values.argmin()
result = arc.iloc[:thresh_idx]
if norm is scale:
result = arc[
arc[arch_nr] >= (threshold * arc[arch_nr][0])
]
return result
# CALCULATE SVD
def svd(self, typ='entities'):
self.X_matrix(typ)
self.svd_dic[typ] = Svd(self.X_matrix(typ))
return
# ANALYZE A TEXT
def analyze(self, text, typ='entities'):
pass
| [
"pandas.Series",
"sklearn.decomposition.NMF",
"os.path.exists",
"collections.namedtuple",
"numpy.sqrt",
"os.listdir",
"pickle.dump",
"pickle.dumps",
"seaborn.clustermap",
"cloud_object_store.CloudObjectStore",
"numpy.square",
"numpy.array",
"ibm_watson.NaturalLanguageUnderstandingV1",
"fnm... | [((605, 618), 'numpy.array', 'np.array', (['lst'], {}), '(lst)\n', (613, 618), True, 'import numpy as np\n'), ((623, 650), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffled'], {}), '(shuffled)\n', (640, 650), True, 'import numpy as np\n'), ((1124, 1142), 'numpy.sqrt', 'np.sqrt', (['(vec @ vec)'], {}), '(vec @ vec)\n', (1131, 1142), True, 'import numpy as np\n'), ((1161, 1179), 'numpy.sqrt', 'np.sqrt', (['(vec @ vec)'], {}), '(vec @ vec)\n', (1168, 1179), True, 'import numpy as np\n'), ((2708, 2755), 'numpy.array', 'np.array', (['[(path + name) for name in all_names]'], {}), '([(path + name) for name in all_names])\n', (2716, 2755), True, 'import numpy as np\n'), ((2764, 2786), 'fnmatch.fnmatch', 'fnmatch', (['name', 'pattern'], {}), '(name, pattern)\n', (2771, 2786), False, 'from fnmatch import fnmatch\n'), ((4323, 4420), 'sklearn.decomposition.NMF', 'NMF', ([], {'n_components': 'n', 'init': '"""random"""', 'random_state': 'self.random_state', 'max_iter': '(1000)', 'tol': '(1e-07)'}), "(n_components=n, init='random', random_state=self.random_state, max_iter\n =1000, tol=1e-07)\n", (4326, 4420), False, 'from sklearn.decomposition import NMF\n'), ((5916, 5956), 'pandas.DataFrame', 'pd.DataFrame', (['self.vt'], {'columns': 'X.columns'}), '(self.vt, columns=X.columns)\n', (5928, 5956), True, 'import pandas as pd\n'), ((5974, 6011), 'pandas.DataFrame', 'pd.DataFrame', (['self.u'], {'columns': 'X.index'}), '(self.u, columns=X.index)\n', (5986, 6011), True, 'import pandas as pd\n'), ((6482, 6527), 'collections.namedtuple', 'collections.namedtuple', (['"""dotted_dic"""', '"""s f o"""'], {}), "('dotted_dic', 's f o')\n", (6504, 6527), False, 'import collections\n'), ((8780, 8811), 'ibm_cloud_sdk_core.authenticators.IAMAuthenticator', 'IAMAuthenticator', (["NLU['apikey']"], {}), "(NLU['apikey'])\n", (8796, 8811), False, 'from ibm_cloud_sdk_core.authenticators import IAMAuthenticator\n'), ((8837, 8895), 'ibm_watson.NaturalLanguageUnderstandingV1', 'NLUV1', ([], {'version': "NLU['version']", 'authenticator': 'authenticator'}), "(version=NLU['version'], authenticator=authenticator)\n", (8842, 8895), True, 'from ibm_watson import NaturalLanguageUnderstandingV1 as NLUV1\n'), ((10716, 10745), 'pandas.Series', 'pd.Series', (['self.dictation_dic'], {}), '(self.dictation_dic)\n', (10725, 10745), True, 'import pandas as pd\n'), ((2497, 2513), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2507, 2513), False, 'import os\n'), ((4785, 4828), 'pandas.DataFrame', 'pd.DataFrame', (['self.w_dic[j]'], {'index': 'XX.index'}), '(self.w_dic[j], index=XX.index)\n', (4797, 4828), True, 'import pandas as pd\n'), ((4908, 4955), 'pandas.DataFrame', 'pd.DataFrame', (['self.h_dic[j]'], {'columns': 'XX.columns'}), '(self.h_dic[j], columns=XX.columns)\n', (4920, 4955), True, 'import pandas as pd\n'), ((5886, 5897), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5894, 5897), True, 'import numpy as np\n'), ((6606, 6633), 'numpy.square', 'np.square', (['(a2 - keep_volume)'], {}), '(a2 - keep_volume)\n', (6615, 6633), True, 'import numpy as np\n'), ((9040, 9175), 'cloud_object_store.CloudObjectStore', 'CloudObjectStore', (["PATH['dictation_bucket']", "PATH['cos_dictation_apikey']", "PATH['cos_dictation_crn']", "PATH['cos_dictation_endpoint']"], {}), "(PATH['dictation_bucket'], PATH['cos_dictation_apikey'],\n PATH['cos_dictation_crn'], PATH['cos_dictation_endpoint'])\n", (9056, 9175), False, 'from cloud_object_store import CloudObjectStore\n'), ((9277, 9389), 'cloud_object_store.CloudObjectStore', 'CloudObjectStore', (["PATH['nlu_bucket']", "PATH['cos_nlu_apikey']", "PATH['cos_nlu_crn']", "PATH['cos_nlu_endpoint']"], {}), "(PATH['nlu_bucket'], PATH['cos_nlu_apikey'], PATH[\n 'cos_nlu_crn'], PATH['cos_nlu_endpoint'])\n", (9293, 9389), False, 'from cloud_object_store import CloudObjectStore\n'), ((11677, 11708), 'os.path.exists', 'os.path.exists', (['self.watson_pkl'], {}), '(self.watson_pkl)\n', (11691, 11708), False, 'import os\n'), ((14402, 14416), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14414, 14416), True, 'import pandas as pd\n'), ((15079, 15093), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (15091, 15093), True, 'import pandas as pd\n'), ((13196, 13223), 'pickle.dump', 'pickle.dump', (['self.watson', 'f'], {}), '(self.watson, f)\n', (13207, 13223), False, 'import pickle\n'), ((16920, 16937), 'seaborn.clustermap', 'sns.clustermap', (['f'], {}), '(f)\n', (16934, 16937), True, 'import seaborn as sns\n'), ((12843, 12875), 'pickle.dump', 'pickle.dump', (['self.watson[lbl]', 'f'], {}), '(self.watson[lbl], f)\n', (12854, 12875), False, 'import pickle\n'), ((13070, 13095), 'pickle.dumps', 'pickle.dumps', (['self.watson'], {}), '(self.watson)\n', (13082, 13095), False, 'import pickle\n'), ((12622, 12652), 'pickle.dumps', 'pickle.dumps', (['self.watson[lbl]'], {}), '(self.watson[lbl])\n', (12634, 12652), False, 'import pickle\n')] |
#!/usr/bin/env python
# coding: utf-8
"""
This script assembles the training dataset
from the folder relative to different bags
"""
from __future__ import print_function, division
import os
import glob
import torch
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, ConcatDataset
from torchvision import transforms
from opts import parser
from img_utils import load_img
FLAGS = parser.parse_args()
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
class SteeringAnglesDataset(Dataset):
"""Steering Angles dataset."""
def __init__(self, csv_file, root_dir, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
# Transforms
if transform == None:
transform = transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor()
])
self.transform = transform
# Read the csv file
self.steering_df = pd.read_csv(csv_file[0])
self.root_dir = root_dir
def __len__(self):
return len(self.steering_df)
def __getitem__(self, idx):
img_path = os.path.join(self.root_dir,
self.steering_df.iloc[idx, 0])
target_size = (FLAGS.img_width,FLAGS.img_height)
crop_size = (FLAGS.crop_img_width,FLAGS.crop_img_height)
image = load_img(img_path, grayscale=True, target_size=target_size, crop_size=crop_size)
steering = self.steering_df.iloc[idx, 1]
steering = np.array(steering)
steering = steering.astype('float')
# Transform image to tensor
image = self.transform(image) # apply transforms to images
image = image/255. # rescale between [0,1]
image = image.type(torch.FloatTensor)
steering = torch.tensor(steering, dtype=torch.float)
return (image,steering)
class Concat(Dataset):
def __init__(self, datasets):
self.datasets = datasets
self.lengths = [len(d) for d in datasets]
self.offsets = np.cumsum(self.lengths)
self.length = np.sum(self.lengths)
def __getitem__(self, index):
for i, offset in enumerate(self.offsets):
if index < offset:
if i > 0:
index -= self.offsets[i-1]
return self.datasets[i][index]
raise IndexError(f'{index} exceeds {self.length}')
def __len__(self):
return self.length
def create_dataset(folder=None,mode=None):
# Path to the data extracted from the Udacity dataset
# folder = "training" or "testing" or "validation"
assert folder, "You should provide the dataset folder"
experiments = glob.glob(folder + "/*")
rotation_range = 0.2
width_shift_range = 0.2
height_shift_range = 0.2
translation_range = [width_shift_range, height_shift_range]
train_tf= transforms.Compose([
transforms.ToPILImage(),
transforms.RandomAffine(degrees=rotation_range, translate=translation_range),
transforms.ToTensor()
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
datasets = []
for exp in experiments:
csv_file = glob.glob(exp + "/sync_steering.csv")
root_dir = exp
# Create custom dataset for the current subfolder in folder
dataset = SteeringAnglesDataset(csv_file=csv_file, root_dir=root_dir)
datasets.append(dataset)
if mode == 'augmentation':
# Create custom dataset for the current subfolder in folder using data augmentation
dataset = SteeringAnglesDataset(csv_file=csv_file, root_dir=root_dir, transform=train_tf)
datasets.append(dataset)
# Concatenate datasets in datasets list to build final custom training dataset
your_custom_dataset = ConcatDataset(datasets)
return your_custom_dataset
| [
"torch.utils.data.ConcatDataset",
"torchvision.transforms.RandomAffine",
"torchvision.transforms.ToPILImage",
"pandas.read_csv",
"os.path.join",
"opts.parser.parse_args",
"numpy.array",
"torch.tensor",
"numpy.sum",
"numpy.cumsum",
"img_utils.load_img",
"torchvision.transforms.ToTensor",
"war... | [((407, 426), 'opts.parser.parse_args', 'parser.parse_args', ([], {}), '()\n', (424, 426), False, 'from opts import parser\n'), ((462, 495), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (485, 495), False, 'import warnings\n'), ((2936, 2960), 'glob.glob', 'glob.glob', (["(folder + '/*')"], {}), "(folder + '/*')\n", (2945, 2960), False, 'import glob\n'), ((4096, 4119), 'torch.utils.data.ConcatDataset', 'ConcatDataset', (['datasets'], {}), '(datasets)\n', (4109, 4119), False, 'from torch.utils.data import Dataset, ConcatDataset\n'), ((1188, 1212), 'pandas.read_csv', 'pd.read_csv', (['csv_file[0]'], {}), '(csv_file[0])\n', (1199, 1212), True, 'import pandas as pd\n'), ((1359, 1417), 'os.path.join', 'os.path.join', (['self.root_dir', 'self.steering_df.iloc[idx, 0]'], {}), '(self.root_dir, self.steering_df.iloc[idx, 0])\n', (1371, 1417), False, 'import os\n'), ((1588, 1673), 'img_utils.load_img', 'load_img', (['img_path'], {'grayscale': '(True)', 'target_size': 'target_size', 'crop_size': 'crop_size'}), '(img_path, grayscale=True, target_size=target_size, crop_size=crop_size\n )\n', (1596, 1673), False, 'from img_utils import load_img\n'), ((1746, 1764), 'numpy.array', 'np.array', (['steering'], {}), '(steering)\n', (1754, 1764), True, 'import numpy as np\n'), ((2038, 2079), 'torch.tensor', 'torch.tensor', (['steering'], {'dtype': 'torch.float'}), '(steering, dtype=torch.float)\n', (2050, 2079), False, 'import torch\n'), ((2287, 2310), 'numpy.cumsum', 'np.cumsum', (['self.lengths'], {}), '(self.lengths)\n', (2296, 2310), True, 'import numpy as np\n'), ((2333, 2353), 'numpy.sum', 'np.sum', (['self.lengths'], {}), '(self.lengths)\n', (2339, 2353), True, 'import numpy as np\n'), ((3472, 3509), 'glob.glob', 'glob.glob', (["(exp + '/sync_steering.csv')"], {}), "(exp + '/sync_steering.csv')\n", (3481, 3509), False, 'import glob\n'), ((3160, 3183), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (3181, 3183), False, 'from torchvision import transforms\n'), ((3197, 3273), 'torchvision.transforms.RandomAffine', 'transforms.RandomAffine', ([], {'degrees': 'rotation_range', 'translate': 'translation_range'}), '(degrees=rotation_range, translate=translation_range)\n', (3220, 3273), False, 'from torchvision import transforms\n'), ((3287, 3308), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3306, 3308), False, 'from torchvision import transforms\n'), ((1020, 1043), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (1041, 1043), False, 'from torchvision import transforms\n'), ((1061, 1082), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1080, 1082), False, 'from torchvision import transforms\n')] |
"""
Copyright 2018 Novartis Institutes for BioMedical Research Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import hnswlib
import importlib
import itertools
import numpy as np
import operator
import os
import sys
import warnings
from contextlib import contextmanager
from scipy.ndimage.interpolation import zoom
from scipy.stats import norm
from sklearn.neighbors import BallTree
from sklearn.preprocessing import MinMaxScaler
from typing import Callable, List
# Stupid Keras things is a smart way to always print. See:
# https://github.com/keras-team/keras/issues/1406
stderr = sys.stderr
sys.stderr = open(os.devnull, "w")
import keras
from keras.layers import Input
from keras.models import Model
sys.stderr = stderr
flatten = itertools.chain.from_iterable
def compare_lists(
a: List, b: List, conditionator: Callable = all, comparator: Callable = operator.eq
):
return conditionator(map(comparator, a, itertools.islice(a, 1, None)))
def unpredictability(p: np.ndarray) -> float:
"""Unpredictability score
Unpredictability is defined as the minimum deviation of the prediction probability
from `0.5` to `0` or `1`. For example, for a prediction probability of 0.6 the
unpredictability is 0.4. The highest unpredictability is 1 and the lowest is 0.
"""
return np.mean(np.abs(p - np.round(p))) * 2
def prediction_proba_change(p0: np.ndarray, p1: np.ndarray) -> float:
"""Unpredictability score
Total amount of change in the prediction probability
"""
return np.mean(np.abs(p0 - p1))
def prediction_change(p0: np.ndarray, p1: np.ndarray, border: float = 0.5) -> float:
"""Prediction change score
Prediction change is defined as the number of times the predicted class changes
based on the border probability.
"""
return np.mean(np.sign(p0 - border) != np.sign(p1 - border))
# def uncertainty(model, X_train: np.ndarray, X_test: np.ndarray) -> float:
# """Unpredictability score
#
# Unpredictability is defined as the minimum deviation of the prediction probability
# from `0.5` to `0` or `1`. For example, for a prediction probability of 0.6 the
# unpredictability is 0.4. The highest unpredictability is 1 and the lowest is 0.
# """
# return random_forest_error(model, X_train, X_test).mean()
def convergence(
x0: np.ndarray, x1: np.ndarray, x2: np.ndarray, decimals: int = 2
) -> float:
"""Convergence score
Given three measurements, the convergence score is the percentage of changes that
increase or decrease in both steps. The highest convergence score is 1 and the
lowest is 0.
"""
x0r = np.round(x0, decimals=decimals)
x1r = np.round(x1, decimals=decimals)
x2r = np.round(x2, decimals=decimals)
return np.mean(np.abs(np.sign(x1r - x0r) + np.sign(x2r - x1r)) == 2)
def divergence(
x0: np.ndarray, x1: np.ndarray, x2: np.ndarray, decimals: int = 3
) -> float:
"""Divergence score
Given three measurements, the divergence score is the percentage of changes that
increase in one step and decrease in the other step or vice versa. The highest
convergence score is 1 and the lowest is 0.
"""
x0r = np.round(x0, decimals=decimals)
x1r = np.round(x1, decimals=decimals)
x2r = np.round(x2, decimals=decimals)
d0 = np.sign(x1r - x0r)
d1 = np.sign(x2r - x1r)
return np.mean((d0 + d1 == 0) * (np.abs(d0) > 0))
def normalize(data, percentile: float = 99.9):
cutoff = np.percentile(data, (0, percentile))
data_norm = np.copy(data)
data_norm[np.where(data_norm < cutoff[0])] = cutoff[0]
data_norm[np.where(data_norm > cutoff[1])] = cutoff[1]
return MinMaxScaler().fit_transform(data_norm)
def normalize_simple(data: np.ndarray):
data -= np.min(data)
return data / np.max(data)
def load_model(filepath: str, silent: bool = False, additional_args: list = None):
try:
if silent:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
model = keras.models.load_model(filepath)
else:
model = keras.models.load_model(filepath)
except Exception:
# We assume it's a custom model
Model = getattr(
importlib.import_module(os.path.dirname(filepath)),
os.path.basename(filepath)
)
model = Model.load(*additional_args)
return model
def get_encoder(autoencoder):
# Find embedding layer
embedding_layer_idx = None
for i, layer in enumerate(autoencoder.layers):
if layer.name == "embed":
embedding_layer_idx = i
# Create encoder
inputs = autoencoder.input
encoded = inputs
for i in range(1, embedding_layer_idx + 1):
encoded = autoencoder.layers[i](encoded)
return Model(inputs, encoded)
def get_decoder(autoencoder):
# Find embedding layer
embedding_layer = None
embedding_layer_idx = None
for i, layer in enumerate(autoencoder.layers):
if layer.name == "embed":
embedding_layer = layer
embedding_layer_idx = i
embedding = embedding_layer.output_shape[1]
encoded_input = Input(shape=(embedding,), name="input")
decoded_input = encoded_input
for i in range(embedding_layer_idx + 1, len(autoencoder.layers)):
decoded_input = autoencoder.layers[i](decoded_input)
return Model(encoded_input, decoded_input)
def get_search_target_windows(
db, search_id, window_size, abs_offset, no_stack: bool = False
):
# Get search target window
search = db.get_search(search_id)
search_target_windows = get_target_window_idx(
search["target_from"],
search["target_to"],
window_size,
search["config"]["step_freq"],
abs_offset,
)
# stwi == search target window indices
stwi = np.arange(*search_target_windows[1])
if no_stack:
return stwi
return np.hstack(
(
stwi.reshape(stwi.shape[0], 1),
np.ones(stwi.shape[0]).reshape(stwi.shape[0], 1),
)
).astype(int)
def get_search_target_classif(db, search_id, window_size, abs_offset):
# Get search target window
search = db.get_search(search_id)
search_target_windows = get_target_window_idx(
search["target_from"],
search["target_to"],
window_size,
search["config"]["step_freq"],
abs_offset,
)
# stwi == search target window indices
stwi = np.arange(*search_target_windows[1])
return np.hstack(
(
stwi.reshape(stwi.shape[0], 1),
np.ones(stwi.shape[0]).reshape(stwi.shape[0], 1),
)
).astype(int)
def get_num_windows(chrom_size, window_size, step_size):
return np.ceil((chrom_size - window_size) / step_size).astype(int) + 1
def scaleup_vector(v, out_len, aggregator: Callable = np.mean):
in_len = v.shape[0]
lcm = np.lcm(in_len, out_len)
blowup = np.repeat(v, lcm / in_len)
return aggregator(blowup.reshape(-1, (lcm / out_len).astype(int)), axis=1)
def zoom_array(
in_array,
final_shape,
same_sum=False,
aggregator=np.mean,
zoomor=zoom,
**zoomor_kwargs
):
"""Rescale vectors savely.
Normally, one can use scipy.ndimage.zoom to do array/image rescaling.
However, scipy.ndimage.zoom does not coarsegrain images well. It basically
takes nearest neighbor, rather than averaging all the pixels, when
coarsegraining arrays. This increases noise. Photoshop doesn't do that, and
performs some smart interpolation-averaging instead.
If you were to coarsegrain an array by an integer factor, e.g. 100x100 ->
25x25, you just need to do block-averaging, that's easy, and it reduces
noise. But what if you want to coarsegrain 100x100 -> 30x30?
Then my friend you are in trouble. But this function will help you. This
function will blow up your 100x100 array to a 120x120 array using
scipy.ndimage zoom Then it will coarsegrain a 120x120 array by
block-averaging in 4x4 chunks.
It will do it independently for each dimension, so if you want a 100x100
array to become a 60x120 array, it will blow up the first and the second
dimension to 120, and then block-average only the first dimension.
Parameters
----------
in_array: n-dimensional numpy array (1D also works)
final_shape: resulting shape of an array
same_sum: bool, preserve a sum of the array, rather than values.
by default, values are preserved
aggregator: by default, np.mean. You can plug your own.
zoomor: by default, scipy.ndimage.zoom. You can plug your own.
zoomor_kwargs: a dict of options to pass to zoomor.
"""
in_array = np.asarray(in_array, dtype=np.double)
in_shape = in_array.shape
assert len(in_shape) == len(final_shape), "Number of dimensions need to equal"
mults = [] # multipliers for the final coarsegraining
for i in range(len(in_shape)):
if final_shape[i] < in_shape[i]:
mults.append(int(np.ceil(in_shape[i] / final_shape[i])))
else:
mults.append(1)
# shape to which to blow up
temp_shape = tuple([i * j for i, j in zip(final_shape, mults)])
# stupid zoom doesn't accept the final shape. Carefully crafting the
# multipliers to make sure that it will work.
zoom_multipliers = np.array(temp_shape) / np.array(in_shape) + 0.0000001
assert zoom_multipliers.min() >= 1
# applying zoom
rescaled = zoomor(in_array, zoom_multipliers, **zoomor_kwargs)
for ind, mult in enumerate(mults):
if mult != 1:
sh = list(rescaled.shape)
assert sh[ind] % mult == 0
newshape = sh[:ind] + [sh[ind] // mult, mult] + sh[ind + 1 :]
rescaled.shape = newshape
rescaled = aggregator(rescaled, axis=ind + 1)
assert rescaled.shape == final_shape
if same_sum:
extra_size = np.prod(final_shape) / np.prod(in_shape)
rescaled /= extra_size
return rescaled
def merge_interleaved(v, step_freq, aggregator=np.nanmean):
v_len = v.shape[0]
out_len = v_len + (step_freq - 1)
blowup = np.zeros((out_len, step_freq))
blowup[:] = np.nan
for i in np.arange(step_freq):
blowup[:, i][i : min(i + v_len, out_len)] = v[: min(v_len, out_len - i)]
return aggregator(blowup, axis=1)
def get_norm_sym_norm_kernel(size):
half_a = np.ceil(size / 2).astype(int)
half_b = np.floor(size / 2).astype(int)
# Normal distribution from the 1st to the 99th percentile
k = norm.pdf(np.linspace(norm.ppf(0.01), norm.ppf(0.99), size))
# Normalize to 1
k /= np.max(k)
# Make symmetric to be usable for convex combination (e.g., in weighted
# averaging)
kn = k
kn[:half_a] = k[:half_a] / (k[:half_a] + k[:half_a][::-1])
kn[half_b:] = kn[:half_a][::-1]
return kn
def merge_interleaved_mat(m: np.ndarray, step_freq: int, kernel: np.ndarray = None):
if kernel is None:
# Take the mean of the interleave vectors by default
kernel = np.ones(m.shape[1])
# length of one consecutive encoding
M = np.int(m.shape[0] / step_freq) * m.shape[1]
# Step size of windows
# I.e., including binning, so 12Kb at 100 bins = 120 bin windows
SZ = np.int(m.shape[1] / step_freq)
# Out length
# N = M + ((step_freq - 1) * SZ)
# Out matrix
o = np.zeros((M, step_freq))
o[:] = np.nan
# Kernel matrix
k = np.zeros((M, step_freq))
k[:] = np.nan
long_k = np.tile(kernel, M)
for i in np.arange(step_freq):
# Linear, consecutive encoding
LCE = m[i::step_freq].flatten()
j = i * SZ
o[:, i][j:M] = LCE[: M - j]
k[:, i][j:M] = long_k[: M - j]
# Normalize kernels
k /= np.nansum(k, axis=1).reshape(k.shape[0], -1)
return np.nansum(o * k, axis=1)
def hashify(l: list, key: str) -> dict:
h = {}
for item in l:
key_value = item.get(key, "unknown")
h[key_value] = item
return h
def is_int(s: str, is_pos: bool) -> bool:
if s is None:
return False
try:
i = int(s)
if is_pos:
return i >= 0
return True
except ValueError:
return False
def kNN(data: np.ndarray, id: int, n: int) -> np.ndarray:
dist = np.sqrt(np.sum((data - data[id]) ** 2, axis=1))
return np.argsort(dist)[1 : n + 1]
def enforce_window_size(start, end, window_size):
if end - start == window_size:
return np.array([start, end])
size = end - start
center = start + (size // 2)
return np.array([center - window_size // 2, center + window_size // 2])
def serialize_classif(classif):
sorting = np.argsort(classif[:, 0])
merged = classif[:, 0] * classif[:, 1]
return merged[sorting].tobytes()
def unserialize_classif(serialized_classif):
return np.frombuffer(serialized_classif, dtype=np.int)
def impact(data, impact=1.0):
impact = min(1, max(0, impact))
return impact * data + (1 - impact)
def get_target_window_idx(
target_from: int,
target_to: int,
window_size: int,
step_freq: int,
abs_offset: int,
max_offset: float = 0.66,
) -> list:
step_size = window_size / step_freq
target_locus = enforce_window_size(target_from, target_to, window_size)
target_locus[0] -= abs_offset
target_locus[1] -= abs_offset
window_from_idx = int(target_locus[0] // step_size)
window_from_pos = int(window_from_idx * step_size)
window_to_idx = window_from_idx + step_freq
# Remove windows that overlap too much with the target search
offset = (target_locus[0] - window_from_pos) / window_size
k = step_freq * (offset - max_offset)
m = np.ceil(k).astype(int)
n = step_freq * offset
return (
# Including any kind of overlaping window
(window_from_idx + np.floor(k), window_to_idx + np.ceil(n)),
# Only include windows that overlap at least 33% with the target
(window_from_idx + m, window_to_idx + m),
)
def knn_density(
data: np.ndarray,
k: int = 5,
dist_metric: str = "euclidean",
summary: Callable[[np.ndarray], np.float64] = np.mean,
):
n, dim = data.shape
if (n > 100000):
# Declaring index
p = hnswlib.Index(space='l2', dim=dim)
# Also see https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md
ef = np.int(np.ceil(20 * np.log2(n)))
# Initing index - the maximum number of elements should be known beforehand
p.init_index(max_elements=n, ef_construction=ef, M=16)
# Element insertion (can be called several times):
p.add_items(data, np.arange(n))
# Controlling the recall by setting ef
p.set_ef(ef)
_, dist = p.knn_query(data, k = k)
# Delete the index
del p
else:
leaf_size = np.int(np.round(10 * np.log(n)))
bt = BallTree(data, leaf_size=leaf_size)
dist, _ = bt.query(data, k, dualtree=True, sort_results=False)
try:
return summary(dist, axis=1)
except Exception:
out = np.zeros(dist.shape[0])
out[:] = np.nan
return out
@contextmanager
def suppress_with_default(*exceptions, **kwargs):
"""Like contextlib.suppress but with a default value on exception
Decorators:
contextmanager
Arguments:
*exceptions {list} -- List of exceptions to suppress. By default all exceptions are suppressed.
**kwargs {dict} -- Dictionary of key word arguments
Yields:
any -- Default value from ``kwargs``
"""
try:
yield kwargs.get("default", None)
except exceptions or Exception:
pass
def get_c(target_c: list, bg_c: list, opacity: float):
target = np.array(target_c) / 255
bg = np.array(bg_c) / 255
return ((target * (1 / opacity) - bg * ((1 - opacity) / opacity)) * 255).astype(int)
| [
"numpy.prod",
"numpy.log",
"numpy.argsort",
"numpy.array",
"numpy.arange",
"keras.models.Model.load",
"numpy.repeat",
"numpy.where",
"hnswlib.Index",
"numpy.asarray",
"numpy.max",
"keras.models.Model",
"numpy.min",
"warnings.simplefilter",
"numpy.frombuffer",
"sklearn.preprocessing.Min... | [((3118, 3149), 'numpy.round', 'np.round', (['x0'], {'decimals': 'decimals'}), '(x0, decimals=decimals)\n', (3126, 3149), True, 'import numpy as np\n'), ((3160, 3191), 'numpy.round', 'np.round', (['x1'], {'decimals': 'decimals'}), '(x1, decimals=decimals)\n', (3168, 3191), True, 'import numpy as np\n'), ((3202, 3233), 'numpy.round', 'np.round', (['x2'], {'decimals': 'decimals'}), '(x2, decimals=decimals)\n', (3210, 3233), True, 'import numpy as np\n'), ((3666, 3697), 'numpy.round', 'np.round', (['x0'], {'decimals': 'decimals'}), '(x0, decimals=decimals)\n', (3674, 3697), True, 'import numpy as np\n'), ((3708, 3739), 'numpy.round', 'np.round', (['x1'], {'decimals': 'decimals'}), '(x1, decimals=decimals)\n', (3716, 3739), True, 'import numpy as np\n'), ((3750, 3781), 'numpy.round', 'np.round', (['x2'], {'decimals': 'decimals'}), '(x2, decimals=decimals)\n', (3758, 3781), True, 'import numpy as np\n'), ((3791, 3809), 'numpy.sign', 'np.sign', (['(x1r - x0r)'], {}), '(x1r - x0r)\n', (3798, 3809), True, 'import numpy as np\n'), ((3819, 3837), 'numpy.sign', 'np.sign', (['(x2r - x1r)'], {}), '(x2r - x1r)\n', (3826, 3837), True, 'import numpy as np\n'), ((3954, 3990), 'numpy.percentile', 'np.percentile', (['data', '(0, percentile)'], {}), '(data, (0, percentile))\n', (3967, 3990), True, 'import numpy as np\n'), ((4007, 4020), 'numpy.copy', 'np.copy', (['data'], {}), '(data)\n', (4014, 4020), True, 'import numpy as np\n'), ((4245, 4257), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (4251, 4257), True, 'import numpy as np\n'), ((5277, 5299), 'keras.models.Model', 'Model', (['inputs', 'encoded'], {}), '(inputs, encoded)\n', (5282, 5299), False, 'from keras.models import Model\n'), ((5644, 5683), 'keras.layers.Input', 'Input', ([], {'shape': '(embedding,)', 'name': '"""input"""'}), "(shape=(embedding,), name='input')\n", (5649, 5683), False, 'from keras.layers import Input\n'), ((5861, 5896), 'keras.models.Model', 'Model', (['encoded_input', 'decoded_input'], {}), '(encoded_input, decoded_input)\n', (5866, 5896), False, 'from keras.models import Model\n'), ((6321, 6357), 'numpy.arange', 'np.arange', (['*search_target_windows[1]'], {}), '(*search_target_windows[1])\n', (6330, 6357), True, 'import numpy as np\n'), ((6957, 6993), 'numpy.arange', 'np.arange', (['*search_target_windows[1]'], {}), '(*search_target_windows[1])\n', (6966, 6993), True, 'import numpy as np\n'), ((7394, 7417), 'numpy.lcm', 'np.lcm', (['in_len', 'out_len'], {}), '(in_len, out_len)\n', (7400, 7417), True, 'import numpy as np\n'), ((7431, 7457), 'numpy.repeat', 'np.repeat', (['v', '(lcm / in_len)'], {}), '(v, lcm / in_len)\n', (7440, 7457), True, 'import numpy as np\n'), ((9210, 9247), 'numpy.asarray', 'np.asarray', (['in_array'], {'dtype': 'np.double'}), '(in_array, dtype=np.double)\n', (9220, 9247), True, 'import numpy as np\n'), ((10657, 10687), 'numpy.zeros', 'np.zeros', (['(out_len, step_freq)'], {}), '((out_len, step_freq))\n', (10665, 10687), True, 'import numpy as np\n'), ((10725, 10745), 'numpy.arange', 'np.arange', (['step_freq'], {}), '(step_freq)\n', (10734, 10745), True, 'import numpy as np\n'), ((11154, 11163), 'numpy.max', 'np.max', (['k'], {}), '(k)\n', (11160, 11163), True, 'import numpy as np\n'), ((11790, 11820), 'numpy.int', 'np.int', (['(m.shape[1] / step_freq)'], {}), '(m.shape[1] / step_freq)\n', (11796, 11820), True, 'import numpy as np\n'), ((11900, 11924), 'numpy.zeros', 'np.zeros', (['(M, step_freq)'], {}), '((M, step_freq))\n', (11908, 11924), True, 'import numpy as np\n'), ((11971, 11995), 'numpy.zeros', 'np.zeros', (['(M, step_freq)'], {}), '((M, step_freq))\n', (11979, 11995), True, 'import numpy as np\n'), ((12027, 12045), 'numpy.tile', 'np.tile', (['kernel', 'M'], {}), '(kernel, M)\n', (12034, 12045), True, 'import numpy as np\n'), ((12060, 12080), 'numpy.arange', 'np.arange', (['step_freq'], {}), '(step_freq)\n', (12069, 12080), True, 'import numpy as np\n'), ((12348, 12372), 'numpy.nansum', 'np.nansum', (['(o * k)'], {'axis': '(1)'}), '(o * k, axis=1)\n', (12357, 12372), True, 'import numpy as np\n'), ((13102, 13166), 'numpy.array', 'np.array', (['[center - window_size // 2, center + window_size // 2]'], {}), '([center - window_size // 2, center + window_size // 2])\n', (13110, 13166), True, 'import numpy as np\n'), ((13215, 13240), 'numpy.argsort', 'np.argsort', (['classif[:, 0]'], {}), '(classif[:, 0])\n', (13225, 13240), True, 'import numpy as np\n'), ((13379, 13426), 'numpy.frombuffer', 'np.frombuffer', (['serialized_classif'], {'dtype': 'np.int'}), '(serialized_classif, dtype=np.int)\n', (13392, 13426), True, 'import numpy as np\n'), ((2011, 2026), 'numpy.abs', 'np.abs', (['(p0 - p1)'], {}), '(p0 - p1)\n', (2017, 2026), True, 'import numpy as np\n'), ((4035, 4066), 'numpy.where', 'np.where', (['(data_norm < cutoff[0])'], {}), '(data_norm < cutoff[0])\n', (4043, 4066), True, 'import numpy as np\n'), ((4094, 4125), 'numpy.where', 'np.where', (['(data_norm > cutoff[1])'], {}), '(data_norm > cutoff[1])\n', (4102, 4125), True, 'import numpy as np\n'), ((4276, 4288), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (4282, 4288), True, 'import numpy as np\n'), ((11571, 11590), 'numpy.ones', 'np.ones', (['m.shape[1]'], {}), '(m.shape[1])\n', (11578, 11590), True, 'import numpy as np\n'), ((11641, 11671), 'numpy.int', 'np.int', (['(m.shape[0] / step_freq)'], {}), '(m.shape[0] / step_freq)\n', (11647, 11671), True, 'import numpy as np\n'), ((12830, 12868), 'numpy.sum', 'np.sum', (['((data - data[id]) ** 2)'], {'axis': '(1)'}), '((data - data[id]) ** 2, axis=1)\n', (12836, 12868), True, 'import numpy as np\n'), ((12881, 12897), 'numpy.argsort', 'np.argsort', (['dist'], {}), '(dist)\n', (12891, 12897), True, 'import numpy as np\n'), ((13011, 13033), 'numpy.array', 'np.array', (['[start, end]'], {}), '([start, end])\n', (13019, 13033), True, 'import numpy as np\n'), ((14784, 14818), 'hnswlib.Index', 'hnswlib.Index', ([], {'space': '"""l2"""', 'dim': 'dim'}), "(space='l2', dim=dim)\n", (14797, 14818), False, 'import hnswlib\n'), ((15425, 15460), 'sklearn.neighbors.BallTree', 'BallTree', (['data'], {'leaf_size': 'leaf_size'}), '(data, leaf_size=leaf_size)\n', (15433, 15460), False, 'from sklearn.neighbors import BallTree\n'), ((16276, 16294), 'numpy.array', 'np.array', (['target_c'], {}), '(target_c)\n', (16284, 16294), True, 'import numpy as np\n'), ((16310, 16324), 'numpy.array', 'np.array', (['bg_c'], {}), '(bg_c)\n', (16318, 16324), True, 'import numpy as np\n'), ((1404, 1432), 'itertools.islice', 'itertools.islice', (['a', '(1)', 'None'], {}), '(a, 1, None)\n', (1420, 1432), False, 'import itertools\n'), ((2295, 2315), 'numpy.sign', 'np.sign', (['(p0 - border)'], {}), '(p0 - border)\n', (2302, 2315), True, 'import numpy as np\n'), ((2319, 2339), 'numpy.sign', 'np.sign', (['(p1 - border)'], {}), '(p1 - border)\n', (2326, 2339), True, 'import numpy as np\n'), ((4151, 4165), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4163, 4165), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((4586, 4619), 'keras.models.load_model', 'keras.models.load_model', (['filepath'], {}), '(filepath)\n', (4609, 4619), False, 'import keras\n'), ((4836, 4864), 'keras.models.Model.load', 'Model.load', (['*additional_args'], {}), '(*additional_args)\n', (4846, 4864), False, 'from keras.models import Model\n'), ((9856, 9876), 'numpy.array', 'np.array', (['temp_shape'], {}), '(temp_shape)\n', (9864, 9876), True, 'import numpy as np\n'), ((9879, 9897), 'numpy.array', 'np.array', (['in_shape'], {}), '(in_shape)\n', (9887, 9897), True, 'import numpy as np\n'), ((10427, 10447), 'numpy.prod', 'np.prod', (['final_shape'], {}), '(final_shape)\n', (10434, 10447), True, 'import numpy as np\n'), ((10450, 10467), 'numpy.prod', 'np.prod', (['in_shape'], {}), '(in_shape)\n', (10457, 10467), True, 'import numpy as np\n'), ((10918, 10935), 'numpy.ceil', 'np.ceil', (['(size / 2)'], {}), '(size / 2)\n', (10925, 10935), True, 'import numpy as np\n'), ((10961, 10979), 'numpy.floor', 'np.floor', (['(size / 2)'], {}), '(size / 2)\n', (10969, 10979), True, 'import numpy as np\n'), ((11084, 11098), 'scipy.stats.norm.ppf', 'norm.ppf', (['(0.01)'], {}), '(0.01)\n', (11092, 11098), False, 'from scipy.stats import norm\n'), ((11100, 11114), 'scipy.stats.norm.ppf', 'norm.ppf', (['(0.99)'], {}), '(0.99)\n', (11108, 11114), False, 'from scipy.stats import norm\n'), ((12291, 12311), 'numpy.nansum', 'np.nansum', (['k'], {'axis': '(1)'}), '(k, axis=1)\n', (12300, 12311), True, 'import numpy as np\n'), ((14233, 14243), 'numpy.ceil', 'np.ceil', (['k'], {}), '(k)\n', (14240, 14243), True, 'import numpy as np\n'), ((15180, 15192), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (15189, 15192), True, 'import numpy as np\n'), ((15615, 15638), 'numpy.zeros', 'np.zeros', (['dist.shape[0]'], {}), '(dist.shape[0])\n', (15623, 15638), True, 'import numpy as np\n'), ((3875, 3885), 'numpy.abs', 'np.abs', (['d0'], {}), '(d0)\n', (3881, 3885), True, 'import numpy as np\n'), ((4419, 4444), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (4442, 4444), False, 'import warnings\n'), ((4462, 4493), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (4483, 4493), False, 'import warnings\n'), ((4518, 4551), 'keras.models.load_model', 'keras.models.load_model', (['filepath'], {}), '(filepath)\n', (4541, 4551), False, 'import keras\n'), ((4783, 4809), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (4799, 4809), False, 'import os\n'), ((7230, 7277), 'numpy.ceil', 'np.ceil', (['((chrom_size - window_size) / step_size)'], {}), '((chrom_size - window_size) / step_size)\n', (7237, 7277), True, 'import numpy as np\n'), ((14374, 14385), 'numpy.floor', 'np.floor', (['k'], {}), '(k)\n', (14382, 14385), True, 'import numpy as np\n'), ((14403, 14413), 'numpy.ceil', 'np.ceil', (['n'], {}), '(n)\n', (14410, 14413), True, 'import numpy as np\n'), ((1806, 1817), 'numpy.round', 'np.round', (['p'], {}), '(p)\n', (1814, 1817), True, 'import numpy as np\n'), ((3260, 3278), 'numpy.sign', 'np.sign', (['(x1r - x0r)'], {}), '(x1r - x0r)\n', (3267, 3278), True, 'import numpy as np\n'), ((3281, 3299), 'numpy.sign', 'np.sign', (['(x2r - x1r)'], {}), '(x2r - x1r)\n', (3288, 3299), True, 'import numpy as np\n'), ((4743, 4768), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (4758, 4768), False, 'import os\n'), ((9527, 9564), 'numpy.ceil', 'np.ceil', (['(in_shape[i] / final_shape[i])'], {}), '(in_shape[i] / final_shape[i])\n', (9534, 9564), True, 'import numpy as np\n'), ((14933, 14943), 'numpy.log2', 'np.log2', (['n'], {}), '(n)\n', (14940, 14943), True, 'import numpy as np\n'), ((15400, 15409), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (15406, 15409), True, 'import numpy as np\n'), ((6485, 6507), 'numpy.ones', 'np.ones', (['stwi.shape[0]'], {}), '(stwi.shape[0])\n', (6492, 6507), True, 'import numpy as np\n'), ((7082, 7104), 'numpy.ones', 'np.ones', (['stwi.shape[0]'], {}), '(stwi.shape[0])\n', (7089, 7104), True, 'import numpy as np\n')] |
import random
import scipy
import numpy as np
import h5py
class DataLoader(object):
def __init__(self, cfg):
self.cfg = cfg
self.augment = cfg.data_augment
def get_data(self, mode='train'):
h5f = h5py.File('./classification/DataLoaders/mnist_background.h5', 'r')
self.x_test = np.reshape(h5f['X'][:], [12000, 28, 28, 1])
self.y_test = h5f['Y'][:]
h5f.close()
print()
def next_batch(self, start=None, end=None, mode='train'):
if mode == 'train':
x, y = self.mnist.train.next_batch(self.cfg.batch_size)
x = x.reshape((-1, self.cfg.height, self.cfg.width, self.cfg.channel))
if self.augment:
x = random_rotation_2d(x, self.cfg.max_angle)
elif mode == 'valid':
x = self.x_valid[start:end]
y = self.y_valid[start:end]
elif mode == 'test':
x = self.x_test[start:end]
y = self.y_test[start:end]
return x, y
def count_num_batch(self, batch_size, mode='train'):
if mode == 'train':
num_batch = int(self.y_train.shape[0] / batch_size)
elif mode == 'valid':
num_batch = int(self.y_valid.shape[0] / batch_size)
elif mode == 'test':
num_batch = int(self.y_test.shape[0] / batch_size)
return num_batch
def randomize(self):
""" Randomizes the order of data samples and their corresponding labels"""
permutation = np.random.permutation(self.y_train.shape[0])
shuffled_x = self.x_train[permutation, :, :, :]
shuffled_y = self.y_train[permutation]
return shuffled_x, shuffled_y
def random_rotation_2d(batch, max_angle):
""" Randomly rotate an image by a random angle (-max_angle, max_angle).
Arguments:
max_angle: `float`. The maximum rotation angle.
Returns:
batch of rotated 2D images
"""
size = batch.shape
batch = np.squeeze(batch)
batch_rot = np.zeros(batch.shape)
for i in range(batch.shape[0]):
if bool(random.getrandbits(1)):
image = np.squeeze(batch[i])
angle = random.uniform(-max_angle, max_angle)
batch_rot[i] = scipy.ndimage.interpolation.rotate(image, angle, mode='nearest', reshape=False)
else:
batch_rot[i] = batch[i]
return batch_rot.reshape(size) | [
"random.uniform",
"numpy.reshape",
"h5py.File",
"numpy.squeeze",
"numpy.zeros",
"random.getrandbits",
"scipy.ndimage.interpolation.rotate",
"numpy.random.permutation"
] | [((1956, 1973), 'numpy.squeeze', 'np.squeeze', (['batch'], {}), '(batch)\n', (1966, 1973), True, 'import numpy as np\n'), ((1990, 2011), 'numpy.zeros', 'np.zeros', (['batch.shape'], {}), '(batch.shape)\n', (1998, 2011), True, 'import numpy as np\n'), ((231, 297), 'h5py.File', 'h5py.File', (['"""./classification/DataLoaders/mnist_background.h5"""', '"""r"""'], {}), "('./classification/DataLoaders/mnist_background.h5', 'r')\n", (240, 297), False, 'import h5py\n'), ((320, 363), 'numpy.reshape', 'np.reshape', (["h5f['X'][:]", '[12000, 28, 28, 1]'], {}), "(h5f['X'][:], [12000, 28, 28, 1])\n", (330, 363), True, 'import numpy as np\n'), ((1496, 1540), 'numpy.random.permutation', 'np.random.permutation', (['self.y_train.shape[0]'], {}), '(self.y_train.shape[0])\n', (1517, 1540), True, 'import numpy as np\n'), ((2064, 2085), 'random.getrandbits', 'random.getrandbits', (['(1)'], {}), '(1)\n', (2082, 2085), False, 'import random\n'), ((2108, 2128), 'numpy.squeeze', 'np.squeeze', (['batch[i]'], {}), '(batch[i])\n', (2118, 2128), True, 'import numpy as np\n'), ((2149, 2186), 'random.uniform', 'random.uniform', (['(-max_angle)', 'max_angle'], {}), '(-max_angle, max_angle)\n', (2163, 2186), False, 'import random\n'), ((2214, 2293), 'scipy.ndimage.interpolation.rotate', 'scipy.ndimage.interpolation.rotate', (['image', 'angle'], {'mode': '"""nearest"""', 'reshape': '(False)'}), "(image, angle, mode='nearest', reshape=False)\n", (2248, 2293), False, 'import scipy\n')] |
"""
SPDX-FileCopyrightText: 2021 International Photoacoustic Standardisation Consortium (IPASC)
SPDX-FileCopyrightText: 2021 <NAME>
SPDX-FileCopyrightText: 2021 <NAME>
SPDX-License-Identifier: MIT
"""
from scipy.signal import hilbert, hilbert2
import numpy as np
def hilbert_transform_1_d(signal, axis=-1):
"""
:param signal:
:param axis: The axis the hilbert transform should be computed on
:return:
"""
return np.abs(hilbert(signal, axis=axis))
def hilbert_transform_2_d(signal):
"""
Parameters
----------
signal: np.ndarray a NxM numpy ndarray
Returns
-------
the 2D hilbert transform of the signal
"""
return np.abs(hilbert2(signal))
def zero_forcing(signal, threshold=1e-20):
"""
Parameters
----------
signal: np.ndarray a NxM numpy ndarray
threshold: float the cutoff value for the zero forcing (default: 1e-20)
Returns
-------
the signal, where no value is smaller than threshold
"""
signal[signal < threshold] = threshold
return signal
def absolute_value(signal):
"""
Parameters
----------
signal np.ndarray a NxM numpy ndarray
Returns
-------
the absolute values of the input signal
"""
return np.abs(signal)
def log_compression(signal, axis=-1, dynamic=40):
"""
:param signal:
:param axis: The axis the hilbert transform should be computed on
:param dynamic: the dynmaic in dB to be displayed
:return:
"""
# do the hilbert transform
env = hilbert_transform_1_d(signal, axis)
# do 20log10 on the normalized image
env = 20*np.log10(env/np.nanmax(env))
# put to zeros everything that is outside the desired dynamic
env[np.where(env < -dynamic)] = -dynamic
return env
| [
"numpy.abs",
"scipy.signal.hilbert2",
"numpy.where",
"numpy.nanmax",
"scipy.signal.hilbert"
] | [((1259, 1273), 'numpy.abs', 'np.abs', (['signal'], {}), '(signal)\n', (1265, 1273), True, 'import numpy as np\n'), ((447, 473), 'scipy.signal.hilbert', 'hilbert', (['signal'], {'axis': 'axis'}), '(signal, axis=axis)\n', (454, 473), False, 'from scipy.signal import hilbert, hilbert2\n'), ((688, 704), 'scipy.signal.hilbert2', 'hilbert2', (['signal'], {}), '(signal)\n', (696, 704), False, 'from scipy.signal import hilbert, hilbert2\n'), ((1733, 1757), 'numpy.where', 'np.where', (['(env < -dynamic)'], {}), '(env < -dynamic)\n', (1741, 1757), True, 'import numpy as np\n'), ((1643, 1657), 'numpy.nanmax', 'np.nanmax', (['env'], {}), '(env)\n', (1652, 1657), True, 'import numpy as np\n')] |
"""Run Monte Carlo simulations."""
from joblib import Parallel, delayed
from frbpoppy import Survey, CosmicPopulation, SurveyPopulation, pprint
from datetime import datetime
from copy import deepcopy
from glob import glob
import frbpoppy.paths
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
import uuid
POP_SIZE = 5e7
class SimulationOverview:
"""Given values, return uid
Load from file, or make."""
def __init__(self, load_csv=True):
p = frbpoppy.paths.populations()
self.filename = f'{p}mc/simluation_overview.csv'
if load_csv and os.path.isfile(self.filename):
self.load()
else:
self.df = pd.DataFrame()
def load(self):
self.df = pd.read_csv(self.filename, index_col=0)
self.df = self.df.loc[:, ~self.df.columns.str.contains('^Unnamed')]
def save(self):
self.df.to_csv(self.filename)
def append(self, df):
self.df = self.df.append(df, ignore_index=True)
def map_surveys(self, ix, names):
mapping = dict(zip(ix, names))
self.df.replace({"survey": mapping}, inplace=True)
class MonteCarlo:
def __init__(self, pop_size=1e2, load_csv=True):
self.survey_names = ['parkes-htru',
'chime-frb',
'askap-incoh',
'wsrt-apertif']
self.load_csv = load_csv
self.pop_size = pop_size
self.survey_ix = [i for i in range(len(self.survey_names))]
self.surveys = self.set_up_surveys()
self.so = SimulationOverview(load_csv=self.load_csv)
self.set_up_dirs()
def set_up_surveys(self):
"""Set up surveys."""
surveys = []
for name in self.survey_names:
survey = Survey(name=name)
survey.set_beam(model='airy', n_sidelobes=1)
if name in ('chime-frb', 'wsrt-apertif', 'parkes-htru'):
survey.set_beam(model=name)
surveys.append(survey)
return surveys
def set_up_dirs(self, run=np.nan):
"""Create subdirectory for saving populations.
Returns True if directory had to be set up."""
f = f'{frbpoppy.paths.populations()}mc/'
if not os.path.isdir(f):
os.mkdir(f)
return True
if not np.isnan(run):
f = f'{frbpoppy.paths.populations()}mc/run_{run}/'
if not os.path.isdir(f):
os.mkdir(f)
return True
return False
def gen_par_set_1(self,
parallel=True,
lum_min=np.nan,
lum_max=np.nan,
w_mean=np.nan,
w_std=np.nan,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=0):
alphas = np.linspace(-2.5, -1, 11)
sis = np.linspace(-2, 2, 11)
lis = np.linspace(-2, 0, 11)
# Put all options into a dataframe
if 'run' in self.so.df:
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(alphas, sis, lis, self.survey_ix)
options = np.array(opt).T.reshape(-1, 4)
df = pd.DataFrame(options, columns=('alpha', 'si', 'li', 'survey'))
df['run'] = run
df['par_set'] = 1
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
def iter_alpha(i):
alpha = alphas[i]
pop = CosmicPopulation.complex(self.pop_size)
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_lum(model='constant', value=1)
if not np.isnan(w_mean):
pop.set_w(model='lognormal', mean=w_mean, std=w_std)
if not np.isnan(dm_igm_slope):
pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
pop.set_dm_host(model='constant', value=dm_host)
pop.generate()
for si in sis:
pop.set_si(model='constant', value=si)
pop.gen_si()
for li in lis:
pop.set_lum(model='powerlaw',
low=1e40,
high=1e45, power=li)
if not np.isnan(lum_min):
pop.set_lum(model='powerlaw', low=lum_min,
high=lum_max, index=li)
pop.gen_lum()
for survey in self.surveys:
surv_pop = SurveyPopulation(pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 1)
mask &= (self.so.df.run == run)
mask &= (self.so.df.alpha == alpha)
mask &= (self.so.df.si == si)
mask &= (self.so.df.li == li)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
if parallel:
n_cpu = min([3, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
r = range(len(alphas))
Parallel(n_jobs=n_cpu)(delayed(iter_alpha)(i) for i in tqdm(r))
else:
[iter_alpha(i) for i in tqdm(range(len(alphas)))]
def gen_par_set_2(self,
parallel=True,
alpha=-1.5,
si=0,
w_mean=np.nan,
w_std=np.nan,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=np.nan):
lis = np.linspace(-1.5, 0, 11)
lum_mins = 10**np.linspace(38, 46, 11)
lum_maxs = 10**np.linspace(38, 46, 11)
# Put all options into a dataframe
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(lis, lum_mins, lum_maxs, self.survey_ix)
options = np.array(opt).T.reshape(-1, 4)
cols = ('li', 'lum_min', 'lum_max', 'survey')
df = pd.DataFrame(options, columns=cols)
df['par_set'] = 2
df['run'] = run
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
df = df[~(df.lum_max < df.lum_min)]
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
pop = CosmicPopulation.complex(self.pop_size)
if not np.isnan(alpha):
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_si(model='constant', value=si)
pop.set_lum(model='constant', value=1)
if not np.isnan(w_mean):
pop.set_w(model='lognormal', mean=w_mean, std=w_std)
if not np.isnan(dm_igm_slope):
pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
pop.set_dm_host(model='constant', value=dm_host)
pop.generate()
def adapt_pop(e):
li, lum_min, lum_max = e
if lum_max < lum_min:
return
t_pop = deepcopy(pop)
t_pop.set_lum(model='powerlaw', low=lum_min, high=lum_max,
power=li)
t_pop.gen_lum()
for survey in self.surveys:
surv_pop = SurveyPopulation(t_pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 2)
mask &= (self.so.df.run == run)
mask &= (self.so.df.li == li)
mask &= (self.so.df.lum_min == lum_min)
mask &= (self.so.df.lum_max == lum_max)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
n_cpu = min([3, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
mg = np.meshgrid(lis, lum_mins, lum_maxs)
loop = np.array(mg).T.reshape(-1, 3)
if parallel:
Parallel(n_jobs=n_cpu)(delayed(adapt_pop)(e) for e in tqdm(loop))
else:
[adapt_pop(e) for e in tqdm(loop)]
def gen_par_set_3(self,
parallel=True,
alpha=-1.5,
si=0,
li=-1,
lum_min=1e40,
lum_max=1e40,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=np.nan):
w_means = 10**np.linspace(-3, 1, 11)
w_stds = np.linspace(0, 3, 11)
# Put all options into a dataframe
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(w_means, w_stds, self.survey_ix)
options = np.array(opt).T.reshape(-1, 3)
cols = ('w_mean', 'w_std', 'survey')
df = pd.DataFrame(options, columns=cols)
df['run'] = run
df['par_set'] = 3
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
pop = CosmicPopulation.complex(self.pop_size)
if not np.isnan(alpha):
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_si(model='constant', value=si)
if not np.isnan(lum_min):
pop.set_lum(model='powerlaw', low=lum_min, high=lum_max, index=li)
if not np.isnan(dm_igm_slope):
pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
pop.set_dm_host(model='constant', value=dm_host)
pop.generate()
def adapt_pop(e):
w_mean, w_std = e
t_pop = deepcopy(pop)
t_pop.set_w(model='lognormal', mean=w_mean, std=w_std)
t_pop.gen_w()
for survey in self.surveys:
surv_pop = SurveyPopulation(t_pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 3)
mask &= (self.so.df.run == run)
mask &= (self.so.df.run == run)
mask &= (self.so.df.w_mean == w_mean)
mask &= (self.so.df.w_std == w_std)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
n_cpu = min([3, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
mg = np.meshgrid(w_means, w_stds)
loop = np.array(mg).T.reshape(-1, 2)
if parallel:
Parallel(n_jobs=n_cpu)(delayed(adapt_pop)(e) for e in tqdm(loop))
else:
[adapt_pop(e) for e in tqdm(loop)]
def gen_par_set_4(self,
parallel=True,
alpha=-1.5,
si=0,
li=-1,
lum_min=1e40,
lum_max=1e40,
w_mean=np.nan,
w_std=np.nan,
run=np.nan):
dm_igm_slopes = np.linspace(800, 1200, 11)
dm_hosts = np.linspace(0, 500, 11)
# Put all options into a dataframe
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(dm_igm_slopes, dm_hosts, self.survey_ix)
options = np.array(opt).T.reshape(-1, 3)
cols = ('dm_igm_slope', 'dm_host', 'survey')
df = pd.DataFrame(options, columns=cols)
df['run'] = run
df['par_set'] = 4
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
pop = CosmicPopulation.complex(self.pop_size)
if not np.isnan(alpha):
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_si(model='constant', value=si)
if not np.isnan(lum_min):
pop.set_lum(model='powerlaw', low=lum_min, high=lum_max, index=li)
if not np.isnan(w_mean):
pop.set_w(model='lognormal', mean=w_mean, std=w_std)
pop.generate()
def adapt_pop(e):
dm_igm_slope, dm_host = e
t_pop = deepcopy(pop)
t_pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
t_pop.gen_dm_igm()
t_pop.set_dm_host(model='constant', value=dm_host)
t_pop.gen_dm_host()
t_pop.frbs.dm = t_pop.frbs.dm_mw + t_pop.frbs.dm_igm
t_pop.frbs.dm += t_pop.frbs.dm_host
for survey in self.surveys:
surv_pop = SurveyPopulation(t_pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 4)
mask &= (self.so.df.run == run)
mask &= (self.so.df.dm_igm_slope == dm_igm_slope)
mask &= (self.so.df.dm_host == dm_host)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
n_cpu = min([4, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
mg = np.meshgrid(dm_igm_slopes, dm_hosts)
loop = np.array(mg).T.reshape(-1, 2)
if parallel:
Parallel(n_jobs=n_cpu)(delayed(adapt_pop)(e) for e in tqdm(loop))
else:
[adapt_pop(e) for e in tqdm(loop)]
| [
"pandas.read_csv",
"numpy.array",
"os.cpu_count",
"copy.deepcopy",
"datetime.datetime.today",
"os.remove",
"numpy.linspace",
"os.path.isdir",
"os.mkdir",
"frbpoppy.SurveyPopulation",
"pandas.DataFrame",
"numpy.meshgrid",
"glob.glob",
"uuid.uuid4",
"os.path.isfile",
"numpy.isnan",
"fr... | [((745, 784), 'pandas.read_csv', 'pd.read_csv', (['self.filename'], {'index_col': '(0)'}), '(self.filename, index_col=0)\n', (756, 784), True, 'import pandas as pd\n'), ((2877, 2902), 'numpy.linspace', 'np.linspace', (['(-2.5)', '(-1)', '(11)'], {}), '(-2.5, -1, 11)\n', (2888, 2902), True, 'import numpy as np\n'), ((2917, 2939), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(11)'], {}), '(-2, 2, 11)\n', (2928, 2939), True, 'import numpy as np\n'), ((2954, 2976), 'numpy.linspace', 'np.linspace', (['(-2)', '(0)', '(11)'], {}), '(-2, 0, 11)\n', (2965, 2976), True, 'import numpy as np\n'), ((3126, 3171), 'numpy.meshgrid', 'np.meshgrid', (['alphas', 'sis', 'lis', 'self.survey_ix'], {}), '(alphas, sis, lis, self.survey_ix)\n', (3137, 3171), True, 'import numpy as np\n'), ((3234, 3296), 'pandas.DataFrame', 'pd.DataFrame', (['options'], {'columns': "('alpha', 'si', 'li', 'survey')"}), "(options, columns=('alpha', 'si', 'li', 'survey'))\n", (3246, 3296), True, 'import pandas as pd\n'), ((3434, 3450), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (3448, 3450), False, 'from datetime import datetime\n'), ((6143, 6167), 'numpy.linspace', 'np.linspace', (['(-1.5)', '(0)', '(11)'], {}), '(-1.5, 0, 11)\n', (6154, 6167), True, 'import numpy as np\n'), ((6375, 6427), 'numpy.meshgrid', 'np.meshgrid', (['lis', 'lum_mins', 'lum_maxs', 'self.survey_ix'], {}), '(lis, lum_mins, lum_maxs, self.survey_ix)\n', (6386, 6427), True, 'import numpy as np\n'), ((6544, 6579), 'pandas.DataFrame', 'pd.DataFrame', (['options'], {'columns': 'cols'}), '(options, columns=cols)\n', (6556, 6579), True, 'import pandas as pd\n'), ((6717, 6733), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (6731, 6733), False, 'from datetime import datetime\n'), ((7127, 7166), 'frbpoppy.CosmicPopulation.complex', 'CosmicPopulation.complex', (['self.pop_size'], {}), '(self.pop_size)\n', (7151, 7166), False, 'from frbpoppy import Survey, CosmicPopulation, SurveyPopulation, pprint\n'), ((8644, 8680), 'numpy.meshgrid', 'np.meshgrid', (['lis', 'lum_mins', 'lum_maxs'], {}), '(lis, lum_mins, lum_maxs)\n', (8655, 8680), True, 'import numpy as np\n'), ((9293, 9314), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(11)'], {}), '(0, 3, 11)\n', (9304, 9314), True, 'import numpy as np\n'), ((9428, 9472), 'numpy.meshgrid', 'np.meshgrid', (['w_means', 'w_stds', 'self.survey_ix'], {}), '(w_means, w_stds, self.survey_ix)\n', (9439, 9472), True, 'import numpy as np\n'), ((9580, 9615), 'pandas.DataFrame', 'pd.DataFrame', (['options'], {'columns': 'cols'}), '(options, columns=cols)\n', (9592, 9615), True, 'import pandas as pd\n'), ((9753, 9769), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (9767, 9769), False, 'from datetime import datetime\n'), ((10119, 10158), 'frbpoppy.CosmicPopulation.complex', 'CosmicPopulation.complex', (['self.pop_size'], {}), '(self.pop_size)\n', (10143, 10158), False, 'from frbpoppy import Survey, CosmicPopulation, SurveyPopulation, pprint\n'), ((11491, 11519), 'numpy.meshgrid', 'np.meshgrid', (['w_means', 'w_stds'], {}), '(w_means, w_stds)\n', (11502, 11519), True, 'import numpy as np\n'), ((12086, 12112), 'numpy.linspace', 'np.linspace', (['(800)', '(1200)', '(11)'], {}), '(800, 1200, 11)\n', (12097, 12112), True, 'import numpy as np\n'), ((12132, 12155), 'numpy.linspace', 'np.linspace', (['(0)', '(500)', '(11)'], {}), '(0, 500, 11)\n', (12143, 12155), True, 'import numpy as np\n'), ((12269, 12321), 'numpy.meshgrid', 'np.meshgrid', (['dm_igm_slopes', 'dm_hosts', 'self.survey_ix'], {}), '(dm_igm_slopes, dm_hosts, self.survey_ix)\n', (12280, 12321), True, 'import numpy as np\n'), ((12437, 12472), 'pandas.DataFrame', 'pd.DataFrame', (['options'], {'columns': 'cols'}), '(options, columns=cols)\n', (12449, 12472), True, 'import pandas as pd\n'), ((12610, 12626), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (12624, 12626), False, 'from datetime import datetime\n'), ((12976, 13015), 'frbpoppy.CosmicPopulation.complex', 'CosmicPopulation.complex', (['self.pop_size'], {}), '(self.pop_size)\n', (13000, 13015), False, 'from frbpoppy import Survey, CosmicPopulation, SurveyPopulation, pprint\n'), ((14469, 14505), 'numpy.meshgrid', 'np.meshgrid', (['dm_igm_slopes', 'dm_hosts'], {}), '(dm_igm_slopes, dm_hosts)\n', (14480, 14505), True, 'import numpy as np\n'), ((600, 629), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (614, 629), False, 'import os\n'), ((691, 705), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (703, 705), True, 'import pandas as pd\n'), ((1798, 1815), 'frbpoppy.Survey', 'Survey', ([], {'name': 'name'}), '(name=name)\n', (1804, 1815), False, 'from frbpoppy import Survey, CosmicPopulation, SurveyPopulation, pprint\n'), ((2259, 2275), 'os.path.isdir', 'os.path.isdir', (['f'], {}), '(f)\n', (2272, 2275), False, 'import os\n'), ((2289, 2300), 'os.mkdir', 'os.mkdir', (['f'], {}), '(f)\n', (2297, 2300), False, 'import os\n'), ((2341, 2354), 'numpy.isnan', 'np.isnan', (['run'], {}), '(run)\n', (2349, 2354), True, 'import numpy as np\n'), ((3369, 3381), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3379, 3381), False, 'import uuid\n'), ((3746, 3754), 'glob.glob', 'glob', (['fs'], {}), '(fs)\n', (3750, 3754), False, 'from glob import glob\n'), ((3861, 3900), 'frbpoppy.CosmicPopulation.complex', 'CosmicPopulation.complex', (['self.pop_size'], {}), '(self.pop_size)\n', (3885, 3900), False, 'from frbpoppy import Survey, CosmicPopulation, SurveyPopulation, pprint\n'), ((6191, 6214), 'numpy.linspace', 'np.linspace', (['(38)', '(46)', '(11)'], {}), '(38, 46, 11)\n', (6202, 6214), True, 'import numpy as np\n'), ((6238, 6261), 'numpy.linspace', 'np.linspace', (['(38)', '(46)', '(11)'], {}), '(38, 46, 11)\n', (6249, 6261), True, 'import numpy as np\n'), ((6652, 6664), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6662, 6664), False, 'import uuid\n'), ((7073, 7081), 'glob.glob', 'glob', (['fs'], {}), '(fs)\n', (7077, 7081), False, 'from glob import glob\n'), ((7182, 7197), 'numpy.isnan', 'np.isnan', (['alpha'], {}), '(alpha)\n', (7190, 7197), True, 'import numpy as np\n'), ((7381, 7397), 'numpy.isnan', 'np.isnan', (['w_mean'], {}), '(w_mean)\n', (7389, 7397), True, 'import numpy as np\n'), ((7479, 7501), 'numpy.isnan', 'np.isnan', (['dm_igm_slope'], {}), '(dm_igm_slope)\n', (7487, 7501), True, 'import numpy as np\n'), ((7790, 7803), 'copy.deepcopy', 'deepcopy', (['pop'], {}), '(pop)\n', (7798, 7803), False, 'from copy import deepcopy\n'), ((9253, 9275), 'numpy.linspace', 'np.linspace', (['(-3)', '(1)', '(11)'], {}), '(-3, 1, 11)\n', (9264, 9275), True, 'import numpy as np\n'), ((9688, 9700), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (9698, 9700), False, 'import uuid\n'), ((10065, 10073), 'glob.glob', 'glob', (['fs'], {}), '(fs)\n', (10069, 10073), False, 'from glob import glob\n'), ((10175, 10190), 'numpy.isnan', 'np.isnan', (['alpha'], {}), '(alpha)\n', (10183, 10190), True, 'import numpy as np\n'), ((10323, 10340), 'numpy.isnan', 'np.isnan', (['lum_min'], {}), '(lum_min)\n', (10331, 10340), True, 'import numpy as np\n'), ((10436, 10458), 'numpy.isnan', 'np.isnan', (['dm_igm_slope'], {}), '(dm_igm_slope)\n', (10444, 10458), True, 'import numpy as np\n'), ((10683, 10696), 'copy.deepcopy', 'deepcopy', (['pop'], {}), '(pop)\n', (10691, 10696), False, 'from copy import deepcopy\n'), ((12545, 12557), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12555, 12557), False, 'import uuid\n'), ((12922, 12930), 'glob.glob', 'glob', (['fs'], {}), '(fs)\n', (12926, 12930), False, 'from glob import glob\n'), ((13032, 13047), 'numpy.isnan', 'np.isnan', (['alpha'], {}), '(alpha)\n', (13040, 13047), True, 'import numpy as np\n'), ((13180, 13197), 'numpy.isnan', 'np.isnan', (['lum_min'], {}), '(lum_min)\n', (13188, 13197), True, 'import numpy as np\n'), ((13293, 13309), 'numpy.isnan', 'np.isnan', (['w_mean'], {}), '(w_mean)\n', (13301, 13309), True, 'import numpy as np\n'), ((13484, 13497), 'copy.deepcopy', 'deepcopy', (['pop'], {}), '(pop)\n', (13492, 13497), False, 'from copy import deepcopy\n'), ((2438, 2454), 'os.path.isdir', 'os.path.isdir', (['f'], {}), '(f)\n', (2451, 2454), False, 'import os\n'), ((2472, 2483), 'os.mkdir', 'os.mkdir', (['f'], {}), '(f)\n', (2480, 2483), False, 'import os\n'), ((3772, 3784), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (3781, 3784), False, 'import os\n'), ((4038, 4054), 'numpy.isnan', 'np.isnan', (['w_mean'], {}), '(w_mean)\n', (4046, 4054), True, 'import numpy as np\n'), ((4144, 4166), 'numpy.isnan', 'np.isnan', (['dm_igm_slope'], {}), '(dm_igm_slope)\n', (4152, 4166), True, 'import numpy as np\n'), ((5672, 5694), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_cpu'}), '(n_jobs=n_cpu)\n', (5680, 5694), False, 'from joblib import Parallel, delayed\n'), ((7099, 7111), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (7108, 7111), False, 'import os\n'), ((8007, 8038), 'frbpoppy.SurveyPopulation', 'SurveyPopulation', (['t_pop', 'survey'], {}), '(t_pop, survey)\n', (8023, 8038), False, 'from frbpoppy import Survey, CosmicPopulation, SurveyPopulation, pprint\n'), ((8759, 8781), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_cpu'}), '(n_jobs=n_cpu)\n', (8767, 8781), False, 'from joblib import Parallel, delayed\n'), ((10091, 10103), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (10100, 10103), False, 'import os\n'), ((10858, 10889), 'frbpoppy.SurveyPopulation', 'SurveyPopulation', (['t_pop', 'survey'], {}), '(t_pop, survey)\n', (10874, 10889), False, 'from frbpoppy import Survey, CosmicPopulation, SurveyPopulation, pprint\n'), ((11598, 11620), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_cpu'}), '(n_jobs=n_cpu)\n', (11606, 11620), False, 'from joblib import Parallel, delayed\n'), ((12948, 12960), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (12957, 12960), False, 'import os\n'), ((13868, 13899), 'frbpoppy.SurveyPopulation', 'SurveyPopulation', (['t_pop', 'survey'], {}), '(t_pop, survey)\n', (13884, 13899), False, 'from frbpoppy import Survey, CosmicPopulation, SurveyPopulation, pprint\n'), ((14584, 14606), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_cpu'}), '(n_jobs=n_cpu)\n', (14592, 14606), False, 'from joblib import Parallel, delayed\n'), ((3190, 3203), 'numpy.array', 'np.array', (['opt'], {}), '(opt)\n', (3198, 3203), True, 'import numpy as np\n'), ((6446, 6459), 'numpy.array', 'np.array', (['opt'], {}), '(opt)\n', (6454, 6459), True, 'import numpy as np\n'), ((8559, 8573), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (8571, 8573), False, 'import os\n'), ((8598, 8612), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (8610, 8612), False, 'import os\n'), ((8696, 8708), 'numpy.array', 'np.array', (['mg'], {}), '(mg)\n', (8704, 8708), True, 'import numpy as np\n'), ((8874, 8884), 'tqdm.tqdm', 'tqdm', (['loop'], {}), '(loop)\n', (8878, 8884), False, 'from tqdm import tqdm\n'), ((9491, 9504), 'numpy.array', 'np.array', (['opt'], {}), '(opt)\n', (9499, 9504), True, 'import numpy as np\n'), ((11406, 11420), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (11418, 11420), False, 'import os\n'), ((11445, 11459), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (11457, 11459), False, 'import os\n'), ((11535, 11547), 'numpy.array', 'np.array', (['mg'], {}), '(mg)\n', (11543, 11547), True, 'import numpy as np\n'), ((11713, 11723), 'tqdm.tqdm', 'tqdm', (['loop'], {}), '(loop)\n', (11717, 11723), False, 'from tqdm import tqdm\n'), ((12340, 12353), 'numpy.array', 'np.array', (['opt'], {}), '(opt)\n', (12348, 12353), True, 'import numpy as np\n'), ((14384, 14398), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (14396, 14398), False, 'import os\n'), ((14423, 14437), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (14435, 14437), False, 'import os\n'), ((14521, 14533), 'numpy.array', 'np.array', (['mg'], {}), '(mg)\n', (14529, 14533), True, 'import numpy as np\n'), ((14699, 14709), 'tqdm.tqdm', 'tqdm', (['loop'], {}), '(loop)\n', (14703, 14709), False, 'from tqdm import tqdm\n'), ((4643, 4660), 'numpy.isnan', 'np.isnan', (['lum_min'], {}), '(lum_min)\n', (4651, 4660), True, 'import numpy as np\n'), ((4908, 4937), 'frbpoppy.SurveyPopulation', 'SurveyPopulation', (['pop', 'survey'], {}), '(pop, survey)\n', (4924, 4937), False, 'from frbpoppy import Survey, CosmicPopulation, SurveyPopulation, pprint\n'), ((5549, 5563), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (5561, 5563), False, 'import os\n'), ((5592, 5606), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (5604, 5606), False, 'import os\n'), ((5695, 5714), 'joblib.delayed', 'delayed', (['iter_alpha'], {}), '(iter_alpha)\n', (5702, 5714), False, 'from joblib import Parallel, delayed\n'), ((5727, 5734), 'tqdm.tqdm', 'tqdm', (['r'], {}), '(r)\n', (5731, 5734), False, 'from tqdm import tqdm\n'), ((8782, 8800), 'joblib.delayed', 'delayed', (['adapt_pop'], {}), '(adapt_pop)\n', (8789, 8800), False, 'from joblib import Parallel, delayed\n'), ((8813, 8823), 'tqdm.tqdm', 'tqdm', (['loop'], {}), '(loop)\n', (8817, 8823), False, 'from tqdm import tqdm\n'), ((11621, 11639), 'joblib.delayed', 'delayed', (['adapt_pop'], {}), '(adapt_pop)\n', (11628, 11639), False, 'from joblib import Parallel, delayed\n'), ((11652, 11662), 'tqdm.tqdm', 'tqdm', (['loop'], {}), '(loop)\n', (11656, 11662), False, 'from tqdm import tqdm\n'), ((14607, 14625), 'joblib.delayed', 'delayed', (['adapt_pop'], {}), '(adapt_pop)\n', (14614, 14625), False, 'from joblib import Parallel, delayed\n'), ((14638, 14648), 'tqdm.tqdm', 'tqdm', (['loop'], {}), '(loop)\n', (14642, 14648), False, 'from tqdm import tqdm\n')] |
import keras
import logging
import numpy as np
import tensorflow as tf
from collections import OrderedDict, deque
from keras.layers import Input, Dense, Lambda, Dropout, Dot, Permute, Reshape, Embedding, Concatenate, Multiply
from keras.models import Model, load_model
import os
import sys
sys.path.append('../utils/')
sys.path.append('../metrics/')
from file_operation import write_result_to_trec_format, load_pickle, retain_file
from evaluations import evaluate_trec
from model import BasicModel, NBatchLogger
from nprf_knrm_config import NPRFKNRMConfig
from nprf_knrm_pair_generator import NPRFKNRMPairGenerator
from relevance_info import Relevance
from result import Result
from rank_losses import rank_hinge_loss
class NPRFKNRM(BasicModel):
def __init__(self, config):
super(NPRFKNRM, self).__init__(config)
self.initializer_gate = keras.initializers.RandomUniform(minval=-0.01, maxval=0.01, seed=118)
def build(self):
# qd_input = Input((self.config.kernel_size,), name="qd_input")
dd_input = Input((self.config.nb_supervised_doc, self.config.kernel_size), name='dd_input')
# z = Dense(self.config.hidden_size, activation='tanh', name="qd_hidden")(qd_input)
# qd_out = Dense(self.config.out_size, name="qd_out")(z)
z = Dense(self.config.hidden_size, activation='tanh', name="dd_hidden")(dd_input)
dd_init_out = Dense(self.config.out_size, name='dd_init_out')(z)
dd_gate = Input((self.config.nb_supervised_doc, 1), name='baseline_doc_score')
dd_w = Dense(1, kernel_initializer=self.initializer_gate, use_bias=False, name='dd_gate')(dd_gate)
# dd_w = Lambda(lambda x: softmax(x, axis=1), output_shape=(self.config.nb_supervised_doc,), name='dd_softmax')(dd_w)
dd_w = Reshape((self.config.nb_supervised_doc,))(dd_w)
dd_init_out = Reshape((self.config.nb_supervised_doc,))(dd_init_out)
if self.config.method in [1, 3]: # no doc gating, with dense layer
z = dd_init_out
elif self.config.method == 2:
logging.info("Apply doc gating")
z = Multiply(name='dd_out')([dd_init_out, dd_w])
else:
raise ValueError("Method not initialized, please check config file")
if self.config.method in [1, 2]:
logging.info("Dense layer on top")
z = Dense(self.config.merge_hidden, activation='tanh', name='merge_hidden')(z)
out = Dense(self.config.merge_out, name='score')(z)
else:
logging.info("Apply doc gating, No dense layer on top, sum up scores")
out = Dot(axes=[1, 1], name='score')([z, dd_w])
model = Model(inputs=[dd_input, dd_gate], outputs=[out])
print(model.summary())
return model
def train_wrapper(self, fold, output_file,):
pair_generator = NPRFKNRMPairGenerator(**self.config.generator_params)
model = self.build()
# adagrad
model.compile(optimizer=self.config.optimizer, loss=rank_hinge_loss)
eval_met = self.train(model, pair_generator, fold, output_file, use_nprf=True)
return eval_met
def eval_by_qid_list_helper(self, qid_list, pair_generator):
relevance_dict = load_pickle(self.config.relevance_dict_path)
qid_list = sorted(qid_list)
qualified_qid_list = []
res_dict = OrderedDict()
for qid in qid_list:
relevance = relevance_dict.get(qid)
supervised_docid_list = relevance.get_supervised_docid_list()
if len(supervised_docid_list) < self.config.nb_supervised_doc:
# cannot construct d2d feature, thus not need to be update
score_list = relevance.get_supervised_score_list()
res = Result(qid, supervised_docid_list, score_list, self.config.runid)
res_dict.update({qid: res})
logging.warn("query {0} not to be rerank".format(qid))
else:
qualified_qid_list.append(qid)
# generate re rank features
dd_d, score_gate, len_indicator = \
pair_generator.generate_list_batch(qualified_qid_list, self.config.rerank_topk)
return [dd_d, score_gate], len_indicator, res_dict, qualified_qid_list
def eval_by_qid_list(self, X, len_indicator, res_dict, qualified_qid_list, model,
relevance_dict, rerank_topk, nb_supervised_doc, doc_topk_term, qrels_file,
docnolist_file, runid, output_file, ):
# qd_d, dd_d, score_gate = X
# dd_q, dd_d = list(map(lambda x: x[:, :nb_supervised_doc, : doc_topk_term, :], [dd_q, dd_d]))
topk_score_all = model.predict_on_batch(X)
topk_score_all = topk_score_all.flatten()
for i, qid in enumerate(qualified_qid_list):
relevance = relevance_dict.get(qid)
supervised_docid_list = relevance.get_supervised_docid_list()
topk_score = topk_score_all[sum(len_indicator[:i]): sum(len_indicator[:i]) + len_indicator[i]]
if len(supervised_docid_list) <= rerank_topk:
score_list = topk_score
else:
behind_score = np.min(topk_score) - 0.001 - np.sort(np.random.random((len(supervised_docid_list) - rerank_topk,)))
score_list = np.concatenate((topk_score, behind_score))
res = Result(qid, supervised_docid_list, score_list, runid)
res.update_ranking()
res_dict.update({qid: res})
# print "generate score {0}".format(time.time()-t)
write_result_to_trec_format(res_dict, output_file, docnolist_file)
met = evaluate_trec(qrels_file, output_file)
return met
if __name__ == '__main__':
conf = NPRFKNRMConfig()
ddmknrm = NPRFKNRM(conf)
# ddm.build()
# ddm.build2()
argv = sys.argv
phase = argv[1]
if phase == '--fold':
fold = int(argv[2])
temp = argv[3]
else:
fold = 1
temp = 'temp'
ddmknrm.train_wrapper(fold, temp)
| [
"keras.initializers.RandomUniform",
"nprf_knrm_pair_generator.NPRFKNRMPairGenerator",
"collections.OrderedDict",
"evaluations.evaluate_trec",
"logging.info",
"keras.layers.Dot",
"nprf_knrm_config.NPRFKNRMConfig",
"file_operation.load_pickle",
"file_operation.write_result_to_trec_format",
"keras.la... | [((295, 323), 'sys.path.append', 'sys.path.append', (['"""../utils/"""'], {}), "('../utils/')\n", (310, 323), False, 'import sys\n'), ((324, 354), 'sys.path.append', 'sys.path.append', (['"""../metrics/"""'], {}), "('../metrics/')\n", (339, 354), False, 'import sys\n'), ((5364, 5380), 'nprf_knrm_config.NPRFKNRMConfig', 'NPRFKNRMConfig', ([], {}), '()\n', (5378, 5380), False, 'from nprf_knrm_config import NPRFKNRMConfig\n'), ((856, 925), 'keras.initializers.RandomUniform', 'keras.initializers.RandomUniform', ([], {'minval': '(-0.01)', 'maxval': '(0.01)', 'seed': '(118)'}), '(minval=-0.01, maxval=0.01, seed=118)\n', (888, 925), False, 'import keras\n'), ((1029, 1114), 'keras.layers.Input', 'Input', (['(self.config.nb_supervised_doc, self.config.kernel_size)'], {'name': '"""dd_input"""'}), "((self.config.nb_supervised_doc, self.config.kernel_size), name='dd_input'\n )\n", (1034, 1114), False, 'from keras.layers import Input, Dense, Lambda, Dropout, Dot, Permute, Reshape, Embedding, Concatenate, Multiply\n'), ((1430, 1498), 'keras.layers.Input', 'Input', (['(self.config.nb_supervised_doc, 1)'], {'name': '"""baseline_doc_score"""'}), "((self.config.nb_supervised_doc, 1), name='baseline_doc_score')\n", (1435, 1498), False, 'from keras.layers import Input, Dense, Lambda, Dropout, Dot, Permute, Reshape, Embedding, Concatenate, Multiply\n'), ((2540, 2588), 'keras.models.Model', 'Model', ([], {'inputs': '[dd_input, dd_gate]', 'outputs': '[out]'}), '(inputs=[dd_input, dd_gate], outputs=[out])\n', (2545, 2588), False, 'from keras.models import Model, load_model\n'), ((2704, 2757), 'nprf_knrm_pair_generator.NPRFKNRMPairGenerator', 'NPRFKNRMPairGenerator', ([], {}), '(**self.config.generator_params)\n', (2725, 2757), False, 'from nprf_knrm_pair_generator import NPRFKNRMPairGenerator\n'), ((3061, 3105), 'file_operation.load_pickle', 'load_pickle', (['self.config.relevance_dict_path'], {}), '(self.config.relevance_dict_path)\n', (3072, 3105), False, 'from file_operation import write_result_to_trec_format, load_pickle, retain_file\n'), ((3181, 3194), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3192, 3194), False, 'from collections import OrderedDict, deque\n'), ((5194, 5260), 'file_operation.write_result_to_trec_format', 'write_result_to_trec_format', (['res_dict', 'output_file', 'docnolist_file'], {}), '(res_dict, output_file, docnolist_file)\n', (5221, 5260), False, 'from file_operation import write_result_to_trec_format, load_pickle, retain_file\n'), ((5271, 5309), 'evaluations.evaluate_trec', 'evaluate_trec', (['qrels_file', 'output_file'], {}), '(qrels_file, output_file)\n', (5284, 5309), False, 'from evaluations import evaluate_trec\n'), ((1268, 1335), 'keras.layers.Dense', 'Dense', (['self.config.hidden_size'], {'activation': '"""tanh"""', 'name': '"""dd_hidden"""'}), "(self.config.hidden_size, activation='tanh', name='dd_hidden')\n", (1273, 1335), False, 'from keras.layers import Input, Dense, Lambda, Dropout, Dot, Permute, Reshape, Embedding, Concatenate, Multiply\n'), ((1364, 1411), 'keras.layers.Dense', 'Dense', (['self.config.out_size'], {'name': '"""dd_init_out"""'}), "(self.config.out_size, name='dd_init_out')\n", (1369, 1411), False, 'from keras.layers import Input, Dense, Lambda, Dropout, Dot, Permute, Reshape, Embedding, Concatenate, Multiply\n'), ((1510, 1597), 'keras.layers.Dense', 'Dense', (['(1)'], {'kernel_initializer': 'self.initializer_gate', 'use_bias': '(False)', 'name': '"""dd_gate"""'}), "(1, kernel_initializer=self.initializer_gate, use_bias=False, name=\n 'dd_gate')\n", (1515, 1597), False, 'from keras.layers import Input, Dense, Lambda, Dropout, Dot, Permute, Reshape, Embedding, Concatenate, Multiply\n'), ((1736, 1777), 'keras.layers.Reshape', 'Reshape', (['(self.config.nb_supervised_doc,)'], {}), '((self.config.nb_supervised_doc,))\n', (1743, 1777), False, 'from keras.layers import Input, Dense, Lambda, Dropout, Dot, Permute, Reshape, Embedding, Concatenate, Multiply\n'), ((1802, 1843), 'keras.layers.Reshape', 'Reshape', (['(self.config.nb_supervised_doc,)'], {}), '((self.config.nb_supervised_doc,))\n', (1809, 1843), False, 'from keras.layers import Input, Dense, Lambda, Dropout, Dot, Permute, Reshape, Embedding, Concatenate, Multiply\n'), ((2208, 2242), 'logging.info', 'logging.info', (['"""Dense layer on top"""'], {}), "('Dense layer on top')\n", (2220, 2242), False, 'import logging\n'), ((2402, 2472), 'logging.info', 'logging.info', (['"""Apply doc gating, No dense layer on top, sum up scores"""'], {}), "('Apply doc gating, No dense layer on top, sum up scores')\n", (2414, 2472), False, 'import logging\n'), ((5020, 5073), 'result.Result', 'Result', (['qid', 'supervised_docid_list', 'score_list', 'runid'], {}), '(qid, supervised_docid_list, score_list, runid)\n', (5026, 5073), False, 'from result import Result\n'), ((1991, 2023), 'logging.info', 'logging.info', (['"""Apply doc gating"""'], {}), "('Apply doc gating')\n", (2003, 2023), False, 'import logging\n'), ((2253, 2324), 'keras.layers.Dense', 'Dense', (['self.config.merge_hidden'], {'activation': '"""tanh"""', 'name': '"""merge_hidden"""'}), "(self.config.merge_hidden, activation='tanh', name='merge_hidden')\n", (2258, 2324), False, 'from keras.layers import Input, Dense, Lambda, Dropout, Dot, Permute, Reshape, Embedding, Concatenate, Multiply\n'), ((2340, 2382), 'keras.layers.Dense', 'Dense', (['self.config.merge_out'], {'name': '"""score"""'}), "(self.config.merge_out, name='score')\n", (2345, 2382), False, 'from keras.layers import Input, Dense, Lambda, Dropout, Dot, Permute, Reshape, Embedding, Concatenate, Multiply\n'), ((2485, 2515), 'keras.layers.Dot', 'Dot', ([], {'axes': '[1, 1]', 'name': '"""score"""'}), "(axes=[1, 1], name='score')\n", (2488, 2515), False, 'from keras.layers import Input, Dense, Lambda, Dropout, Dot, Permute, Reshape, Embedding, Concatenate, Multiply\n'), ((3539, 3604), 'result.Result', 'Result', (['qid', 'supervised_docid_list', 'score_list', 'self.config.runid'], {}), '(qid, supervised_docid_list, score_list, self.config.runid)\n', (3545, 3604), False, 'from result import Result\n'), ((4964, 5006), 'numpy.concatenate', 'np.concatenate', (['(topk_score, behind_score)'], {}), '((topk_score, behind_score))\n', (4978, 5006), True, 'import numpy as np\n'), ((2034, 2057), 'keras.layers.Multiply', 'Multiply', ([], {'name': '"""dd_out"""'}), "(name='dd_out')\n", (2042, 2057), False, 'from keras.layers import Input, Dense, Lambda, Dropout, Dot, Permute, Reshape, Embedding, Concatenate, Multiply\n'), ((4843, 4861), 'numpy.min', 'np.min', (['topk_score'], {}), '(topk_score)\n', (4849, 4861), True, 'import numpy as np\n')] |
import numpy as np
import numba
n = 12
x, _ = np.polynomial.chebyshev.chebgauss(n)
V = np.polynomial.chebyshev.chebvander(x, n-1)
a = np.exp(np.sin(x))
b = np.cos(x)*a
c = np.linalg.solve(V, a)
def chebeval1(x, c):
x2 = 2*x
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
c0 = c[-i] - c1
c1 = tmp + c1*x2
return c0 + c1*x
# get derivative by differentiating series and evaluating sum
cp = np.polynomial.chebyshev.chebder(c)
d = np.zeros(n)
for i in range(n):
d[i] = chebeval1(x[i], cp)
print('Standard way error: {:0.2e}'.format(np.abs(b-d).max()))
def chebeval_d1(x, c):
x2 = 2*x
c0 = c[-2] + x2*c[-1]
c1 = c[-1]
d0 = 2*c[-1]
d1 = 0.0
for i in range(3, len(c)):
# recursion for d
dm = 2*c0 + x2*d0 - d1
d1 = d0
d0 = dm
# recursion for c
cm = c[-i] + x2*c0 - c1
c1 = c0
c0 = cm
return c0 + x*d0 - d1
# get derivative by using direct summation
d = np.zeros(n)
for i in range(n):
d[i] = chebeval_d1(x[i], c)
print('New way error grad: {:0.2e}'.format(np.abs(b-d).max()))
| [
"numpy.abs",
"numpy.linalg.solve",
"numpy.sin",
"numpy.polynomial.chebyshev.chebder",
"numpy.zeros",
"numpy.cos",
"numpy.polynomial.chebyshev.chebvander",
"numpy.polynomial.chebyshev.chebgauss"
] | [((48, 84), 'numpy.polynomial.chebyshev.chebgauss', 'np.polynomial.chebyshev.chebgauss', (['n'], {}), '(n)\n', (81, 84), True, 'import numpy as np\n'), ((89, 133), 'numpy.polynomial.chebyshev.chebvander', 'np.polynomial.chebyshev.chebvander', (['x', '(n - 1)'], {}), '(x, n - 1)\n', (123, 133), True, 'import numpy as np\n'), ((174, 195), 'numpy.linalg.solve', 'np.linalg.solve', (['V', 'a'], {}), '(V, a)\n', (189, 195), True, 'import numpy as np\n'), ((451, 485), 'numpy.polynomial.chebyshev.chebder', 'np.polynomial.chebyshev.chebder', (['c'], {}), '(c)\n', (482, 485), True, 'import numpy as np\n'), ((490, 501), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (498, 501), True, 'import numpy as np\n'), ((1008, 1019), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1016, 1019), True, 'import numpy as np\n'), ((143, 152), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (149, 152), True, 'import numpy as np\n'), ((158, 167), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (164, 167), True, 'import numpy as np\n'), ((596, 609), 'numpy.abs', 'np.abs', (['(b - d)'], {}), '(b - d)\n', (602, 609), True, 'import numpy as np\n'), ((1115, 1128), 'numpy.abs', 'np.abs', (['(b - d)'], {}), '(b - d)\n', (1121, 1128), True, 'import numpy as np\n')] |
from sporco.util import tikhonov_filter
import pywt
import numpy as np
from imageio import imread
def Fusion_DWT_db2(image1, image2):
# decomposing each image using Discrete wavelet transform(DWT) with Daubechies filter (db2)
coefficients_1 = pywt.wavedec2(image1, 'db2', level=2)
coefficients_2 = pywt.wavedec2(image2, 'db2', level=2)
# creating variables to be used
coefficients_h = list(coefficients_1)
# fusing the decomposed image data
coefficients_h[0] = (coefficients_1[0] + coefficients_2[0]) * 0.5
# creating variables to be used
temp1 = list(coefficients_1[1])
temp2 = list(coefficients_2[1])
temp3 = list(coefficients_h[1])
# fusing the decomposed image data
temp3[0] = (temp1[0] + temp2[0]) * 0.5
temp3[1] = (temp1[1] + temp2[1]) * 0.5
temp3[2] = (temp1[2] + temp2[2]) * 0.5
coefficients_h[1] = tuple(temp3)
# Creating fused image by reconstructing the fused decomposed image
result = pywt.waverec2(coefficients_h, 'db2')
return result
def lowpass(s, lda, npad): # In this function, low pass filtering is done by using Tikhonov filter.
return tikhonov_filter(s, lda, npad)
def signaltonoise(a, axis, ddof):
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m / sd)
# idx for selecting image, you can change manually 1 to 21 (21 different infrared and 21 different visible image)
for idx in range(1, 22, 1):
vis = imread('IV_images/VIS%d.png' % idx)
ir = imread('IV_images/IR%d.png' % idx)
npad = 16
lda = 5
vis_low, vis_high = lowpass(vis.astype(np.float32) / 255, lda, npad)
ir_low, ir_high = lowpass(ir.astype(np.float32) / 255, lda, npad)
img = Fusion_DWT_db2(vis.astype(np.float32) / 255, ir_high)
print("\nsignal to noise ratio for image_%2d: %5.2f" % (idx, np.max(signaltonoise(img, axis=0, ddof=0))))
| [
"pywt.wavedec2",
"sporco.util.tikhonov_filter",
"numpy.where",
"numpy.asanyarray",
"imageio.imread",
"pywt.waverec2"
] | [((253, 290), 'pywt.wavedec2', 'pywt.wavedec2', (['image1', '"""db2"""'], {'level': '(2)'}), "(image1, 'db2', level=2)\n", (266, 290), False, 'import pywt\n'), ((312, 349), 'pywt.wavedec2', 'pywt.wavedec2', (['image2', '"""db2"""'], {'level': '(2)'}), "(image2, 'db2', level=2)\n", (325, 349), False, 'import pywt\n'), ((971, 1007), 'pywt.waverec2', 'pywt.waverec2', (['coefficients_h', '"""db2"""'], {}), "(coefficients_h, 'db2')\n", (984, 1007), False, 'import pywt\n'), ((1140, 1169), 'sporco.util.tikhonov_filter', 'tikhonov_filter', (['s', 'lda', 'npad'], {}), '(s, lda, npad)\n', (1155, 1169), False, 'from sporco.util import tikhonov_filter\n'), ((1214, 1230), 'numpy.asanyarray', 'np.asanyarray', (['a'], {}), '(a)\n', (1227, 1230), True, 'import numpy as np\n'), ((1300, 1328), 'numpy.where', 'np.where', (['(sd == 0)', '(0)', '(m / sd)'], {}), '(sd == 0, 0, m / sd)\n', (1308, 1328), True, 'import numpy as np\n'), ((1485, 1520), 'imageio.imread', 'imread', (["('IV_images/VIS%d.png' % idx)"], {}), "('IV_images/VIS%d.png' % idx)\n", (1491, 1520), False, 'from imageio import imread\n'), ((1530, 1564), 'imageio.imread', 'imread', (["('IV_images/IR%d.png' % idx)"], {}), "('IV_images/IR%d.png' % idx)\n", (1536, 1564), False, 'from imageio import imread\n')] |
#!/usr/local/bin
import pickle
import itertools
import heapq
from collections import defaultdict
import numpy as np
import pandas as pd
from scipy.stats import binom
import matplotlib.pyplot as plt
def generate_mi(rsIDs):
""" Usefuls for reindexing for a new subset of rsIDS
"""
BASES = ['A', 'C', 'G', 'T']
temp_index = itertools.chain(*[[i]*4 for i in rsIDs])
tuples = zip(temp_index, itertools.cycle(BASES))
return pd.MultiIndex.from_tuples(tuples, names=['ID', 'BASE'])
class AEIData(object):
""" Multi-index implementation of CountMatrix
"""
def __init__(self, filename):
data = self._load(filename)
rsIDs = data['rsIDs']
# Might implement this in the future as a multi_index
self.df = pd.DataFrame(data['counts'], index=generate_mi(rsIDs))
self.df = self.df/float(1)
self.rsIDs = rsIDs
def _load(self, filename):
""" Loads a count matrix that is outputed from allele_counter.py
"""
pkl_file = open(filename, 'rb')
return(pickle.load(pkl_file))
def set_genotypes(self, genotypes):
""" Genotypes is a pandas Dataframe containing genotype information
from the samples
"""
#:TODO try some column (sample) automatching. Not always applicable however.
self.genotypes = genotypes.reindex(index = self.rsIDs)
def set_annotations(self, annotationfile):
""" Loads a VCF SNP annotation file from dbSNP and automatically aligns
data from the new set with the old set.
"""
# Check if it is already a DataFrame
# VCFs and BEDFiles
# VCF files usually have really long comment sections
# Read in file first and find the header line
# :TODO Need to parse VCF header
annot = pd.read_csv(annotationfile, sep="\t")
grouped = annot.groupby("ID")
index = [gp_keys[0] for gp_keys in grouped.groups.values()]
temp = annot.reindex(index)
temp.index = temp["ID"]
# Drop these extraneous columns
temp = temp.drop(["QUAL", "FILTER", "INFO", "ID"], axis=1)
# Reindex according to the count data
self.annot = temp.reindex(self.rsIDs)
# Deal with mismatches in the annotation file used to generate
# Need to use CHROM since it's the only float value, and np.NaN is
# used for missing values.
self.annot = self.annot[np.logical_not(np.isnan(self.annot["CHROM"]))]
self.df = self.df.reindex(generate_mi(self.annot.index))
self.genotypes = self.genotypes.reindex(self.annot.index)
self.rsIDs = list(self.annot.index)
def mask(self, count_threshold = 20, impute_threshold = 0.5):
""" Mask locations that aren't heterozygotes and the loctions that
don't meet a read count threshold.
"""
try:
# Reducing the dataframe based on annotations
ref_tup = zip(self.annot.index, self.annot["REF"])
alt_tup = zip(self.annot.index, self.annot["ALT"])
ref = self.df.ix[ref_tup]
alt = self.df.ix[alt_tup]
# Need to collapse the multi index
# :TODO find a more rigorous way to do this
ref.index = self.genotypes.index
alt.index = self.genotypes.index
hets = np.logical_or(self.genotypes < 0.5, self.genotypes > 1.5)
sums = (ref + alt) < count_threshold
ref[np.logical_or(hets, sums)] = np.NaN
alt[np.logical_or(hets, sums)] = np.NaN
self.binom_p = pd.DataFrame(binom.sf(alt - 1, ref + alt, 0.5),
columns=self.df.columns,index=ref.index)
self.ratio = ref/((ref+alt)/float(1))
except AttributeError:
print("Need to run set_annotations and set_genotyeps first")
def binom_test(self):
pass
def to_R_dataframe(self):
""" Converts
"""
pass
def to_UCSC_track(self):
""" Creates a bedfile with the combined p-values at each of the SNPs
"""
pass
def to_SQL(self):
""" Write the data frame to SQL.
"""
pass
class CountMatrix(object):
""" Count Matrix holds counts from sequencing data
"""
def __init__(self, filename):
data = self._load(filename)
# Might implement this in the future as a multi_index
self.df = pd.Panel(data['counts'], items=data['rsIDs'],
minor_axis=['A','C','G','T'])
self.index = data['rsIDs']
def _load(self, filename):
""" Loads a count matrix that is outputed from allele_counter.py
"""
pkl_file = open(filename, 'rb')
return(pickle.load(pkl_file))
def ratio_df(self, threshold = 20):
""" Converts the allele count pd.Panel to a dataframe of the ratios
"""
# This could be easily optimized
def _second_largest(x):
x = heapq.nlargest(2, x)
return float(x[1])
major = self.df.apply(max, axis=2)
minor = self.df.apply(_second_largest, axis=2)
# Make sure that the indexes match up. Remove the ones that don't
# Check the other way as well
total = major + minor
ratio = major/total
sum_matrix = self.df.apply(sum, axis=2)
self.sums = sum_matrix.transpose()
self.major = major.transpose()
self.minor = minor.transpose()
self.ratio = ratio.transpose()
self.ratio[self.sums < threshold] = np.NaN
def binom_test(self):
# Probably slower, but certainly more flexible
self.biallelic = self.df.apply(heapq.nlargest, n=2)
def set_genotypes(self, genotypes):
""" Genotypes is a pandas Dataframe containing genotype information
from the samples
"""
self.genotypes = genotypes.reindex(index = self.df.items)
# Automatically check if the genotypes are the same
def set_annotations(self, annotationfile):
""" Sets what the major allele frequencies and the minor allele
frequencies are using an VCF annotation file.
"""
# Check if it is already a DataFrame
# VCFs and BEDFiles
# VCF files usually have really long comment sections
# Read in file first and find the header line
annot = pd.read_csv(annotationfile, sep="\t")
grouped = anot.groupby("ID")
index = [gp_keys[0] for gp_keys in grouped.groups.values()]
temp = annot.reindex(index)
temp.index = temp["ID"]
# Drop these extraneous columns
temp.drop(["QUAL", "FILTER", "INFO", "ID"])
# Reindex according to
self.annot = temp.reindex(self.df.items)
# Deal with mismatches in the annotation file used to generate
self.annot = self.annot[np.logical_not(np.isnan(self.annot["REF"]))]
self.df.items
def collide_snps(snp_annotation, gff, genes_of_interest = None):
""" Grabs all SNPs that reside within a gene.
Arguments
---------
Returns
-------
A dictionary of genes with keys being the gene names and a list of SNPs
being the values.
"""
out_dict = defaultdict(list)
gene_ids = gff[8].apply(lambda x: (x.split("; ")[-1]
.lstrip('gene_id "')
.rstrip('"')))
if genes_of_interest:
gene_ids = gene_ids.apply(lambda x: x in genes_of_interest)
m_matrix = gff.ix[np.logical_and(gene_ids, gff.ix[:, 2] == 'exonic_part'),
[2,3,4,8]]
#gene_ids = gene_ids[np.logical_and(
| [
"itertools.chain",
"itertools.cycle",
"pandas.read_csv",
"numpy.logical_and",
"pickle.load",
"numpy.logical_or",
"pandas.Panel",
"scipy.stats.binom.sf",
"heapq.nlargest",
"collections.defaultdict",
"numpy.isnan",
"pandas.MultiIndex.from_tuples"
] | [((341, 385), 'itertools.chain', 'itertools.chain', (['*[([i] * 4) for i in rsIDs]'], {}), '(*[([i] * 4) for i in rsIDs])\n', (356, 385), False, 'import itertools\n'), ((446, 501), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['tuples'], {'names': "['ID', 'BASE']"}), "(tuples, names=['ID', 'BASE'])\n", (471, 501), True, 'import pandas as pd\n'), ((7228, 7245), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7239, 7245), False, 'from collections import defaultdict\n'), ((411, 433), 'itertools.cycle', 'itertools.cycle', (['BASES'], {}), '(BASES)\n', (426, 433), False, 'import itertools\n'), ((1055, 1076), 'pickle.load', 'pickle.load', (['pkl_file'], {}), '(pkl_file)\n', (1066, 1076), False, 'import pickle\n'), ((1817, 1854), 'pandas.read_csv', 'pd.read_csv', (['annotationfile'], {'sep': '"""\t"""'}), "(annotationfile, sep='\\t')\n", (1828, 1854), True, 'import pandas as pd\n'), ((4432, 4510), 'pandas.Panel', 'pd.Panel', (["data['counts']"], {'items': "data['rsIDs']", 'minor_axis': "['A', 'C', 'G', 'T']"}), "(data['counts'], items=data['rsIDs'], minor_axis=['A', 'C', 'G', 'T'])\n", (4440, 4510), True, 'import pandas as pd\n'), ((4743, 4764), 'pickle.load', 'pickle.load', (['pkl_file'], {}), '(pkl_file)\n', (4754, 4764), False, 'import pickle\n'), ((6380, 6417), 'pandas.read_csv', 'pd.read_csv', (['annotationfile'], {'sep': '"""\t"""'}), "(annotationfile, sep='\\t')\n", (6391, 6417), True, 'import pandas as pd\n'), ((3348, 3405), 'numpy.logical_or', 'np.logical_or', (['(self.genotypes < 0.5)', '(self.genotypes > 1.5)'], {}), '(self.genotypes < 0.5, self.genotypes > 1.5)\n', (3361, 3405), True, 'import numpy as np\n'), ((4984, 5004), 'heapq.nlargest', 'heapq.nlargest', (['(2)', 'x'], {}), '(2, x)\n', (4998, 5004), False, 'import heapq\n'), ((7533, 7588), 'numpy.logical_and', 'np.logical_and', (['gene_ids', "(gff.ix[:, 2] == 'exonic_part')"], {}), "(gene_ids, gff.ix[:, 2] == 'exonic_part')\n", (7547, 7588), True, 'import numpy as np\n'), ((2459, 2488), 'numpy.isnan', 'np.isnan', (["self.annot['CHROM']"], {}), "(self.annot['CHROM'])\n", (2467, 2488), True, 'import numpy as np\n'), ((3471, 3496), 'numpy.logical_or', 'np.logical_or', (['hets', 'sums'], {}), '(hets, sums)\n', (3484, 3496), True, 'import numpy as np\n'), ((3523, 3548), 'numpy.logical_or', 'np.logical_or', (['hets', 'sums'], {}), '(hets, sums)\n', (3536, 3548), True, 'import numpy as np\n'), ((3599, 3632), 'scipy.stats.binom.sf', 'binom.sf', (['(alt - 1)', '(ref + alt)', '(0.5)'], {}), '(alt - 1, ref + alt, 0.5)\n', (3607, 3632), False, 'from scipy.stats import binom\n'), ((6881, 6908), 'numpy.isnan', 'np.isnan', (["self.annot['REF']"], {}), "(self.annot['REF'])\n", (6889, 6908), True, 'import numpy as np\n')] |
import numpy as np
import json
dist_npy = './result/submit/dist_65.npy' #生成的dist矩阵,距离越小越相似
dist = np.load(dist_npy,allow_pickle=True)
rank = np.argsort(dist)
rank0 = rank[:,0]
print(len(rank0))
print(len(set(rank0)))
query_name_npy = './result/submit/query_name.npy'
gallery_name_npy = './result/submit/gallery_name.npy'
query_name = np.load(query_name_npy,allow_pickle=True) # query 图片名字的list(与计算dist时 保持一致)
gallery_name = np.load(gallery_name_npy,allow_pickle=True) # gallery 图片名字list(与计算dist时 保持一致)
dist_cp = dist.copy()
dist_cp.sort(1)
dist_r1 = dist_cp[:,0]
rank1 = np.argsort(dist_r1)
dist_r1.sort()
print(dist_r1)
print(dist[rank1[1]][rank0[rank1[1]]])
flags = np.zeros(len(gallery_name))
result = {}
target_json = './11241.json' #提交结果文件
thr = dist_r1[int(len(rank1)*0.85)]
for i in range(len(dist)):
if i%50 == 0:
print(i)
print(sum(flags))
query_index = rank1[i]
gallery_list = np.argsort(dist)[query_index]
dist_i = dist[query_index]
result[query_name[query_index]]=[]
num = 0
first=True
for g in gallery_list:
if flags[g] == 1:
first=False
continue
if first:
# if i < int(len(query_name)*0.85):
flags[g] = 1
first = False
if dist_i[g] < thr:
flags[g] = 1
result[query_name[query_index]].append(gallery_name[g])
num += 1
if num == 200:
break
with open(target_json,"w") as f:
json.dump(result,f)
| [
"numpy.argsort",
"numpy.load",
"json.dump"
] | [((98, 134), 'numpy.load', 'np.load', (['dist_npy'], {'allow_pickle': '(True)'}), '(dist_npy, allow_pickle=True)\n', (105, 134), True, 'import numpy as np\n'), ((142, 158), 'numpy.argsort', 'np.argsort', (['dist'], {}), '(dist)\n', (152, 158), True, 'import numpy as np\n'), ((337, 379), 'numpy.load', 'np.load', (['query_name_npy'], {'allow_pickle': '(True)'}), '(query_name_npy, allow_pickle=True)\n', (344, 379), True, 'import numpy as np\n'), ((427, 471), 'numpy.load', 'np.load', (['gallery_name_npy'], {'allow_pickle': '(True)'}), '(gallery_name_npy, allow_pickle=True)\n', (434, 471), True, 'import numpy as np\n'), ((575, 594), 'numpy.argsort', 'np.argsort', (['dist_r1'], {}), '(dist_r1)\n', (585, 594), True, 'import numpy as np\n'), ((1485, 1505), 'json.dump', 'json.dump', (['result', 'f'], {}), '(result, f)\n', (1494, 1505), False, 'import json\n'), ((921, 937), 'numpy.argsort', 'np.argsort', (['dist'], {}), '(dist)\n', (931, 937), True, 'import numpy as np\n')] |
# Copyright (c) 2019 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import pytest
import numpy as np
import nnabla as nn
from nnabla_ext.cuda.experimental import dali_iterator
@pytest.mark.skipif("not dali_iterator.enabled")
def test_dali_iterator():
class Iterator1(object):
def __init__(self, shape1, shape2):
self.i = 0
self.shape1 = shape1
self.shape2 = shape2
def next(self):
df = np.full(self.shape1, self.i, dtype=np.float32)
di = np.full(self.shape2, self.i, dtype=np.int32)
self.i += 1
return df, di
shape1 = (2, 3)
shape2 = (2, 2)
it = Iterator1(shape1, shape2)
for i in range(5):
df, di = it.next()
assert df.shape == shape1
assert di.shape == shape2
it = Iterator1(shape1, shape2)
# Testing non_blocking=True because we know it's safe in the following loop.
# --> TODO: Noticed that non_blocking option is not suppported as of 2019/10/11.
dali_it = dali_iterator.create_dali_iterator_from_data_iterator(
it, non_blocking=False)
for i in range(5):
ddf, ddi = dali_it.next()
assert isinstance(ddf, nn.NdArray)
assert isinstance(ddi, nn.NdArray)
assert ddf.dtype == np.float32
assert ddi.dtype == np.int32
# The following synchronizes null stream to host.
# So, it's safe in non_blocking mode.
np.testing.assert_allclose(ddf.get_data('r'), i)
np.testing.assert_equal(ddi.get_data('r'), i)
| [
"numpy.full",
"nnabla_ext.cuda.experimental.dali_iterator.create_dali_iterator_from_data_iterator",
"pytest.mark.skipif"
] | [((758, 805), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""not dali_iterator.enabled"""'], {}), "('not dali_iterator.enabled')\n", (776, 805), False, 'import pytest\n'), ((1606, 1683), 'nnabla_ext.cuda.experimental.dali_iterator.create_dali_iterator_from_data_iterator', 'dali_iterator.create_dali_iterator_from_data_iterator', (['it'], {'non_blocking': '(False)'}), '(it, non_blocking=False)\n', (1659, 1683), False, 'from nnabla_ext.cuda.experimental import dali_iterator\n'), ((1037, 1083), 'numpy.full', 'np.full', (['self.shape1', 'self.i'], {'dtype': 'np.float32'}), '(self.shape1, self.i, dtype=np.float32)\n', (1044, 1083), True, 'import numpy as np\n'), ((1101, 1145), 'numpy.full', 'np.full', (['self.shape2', 'self.i'], {'dtype': 'np.int32'}), '(self.shape2, self.i, dtype=np.int32)\n', (1108, 1145), True, 'import numpy as np\n')] |
# coding: utf8
import unittest
import numpy as np
import sys
sys.path.append('..')
from dppy.beta_ensemble_polynomial_potential_core import TracyWidom
class TestTracyWidom(unittest.TestCase):
""" Based on the work of Bornemann 2010 `https://arxiv.org/pdf/0804.2543.pdf <https://arxiv.org/pdf/0804.2543.pdf>`_
"""
TW = TracyWidom()
def test_kernel_example_bornemann_fredholm_determinant_should_equal_sin1(self):
""" Equation 5.8 Bornemann
"""
def K_Green(x, y):
Y, X = np.meshgrid(x, y)
return np.where(X <= Y, X * (1 - Y), Y * (1 - X))
quad_order = 50
x_quad, w_quad = self.TW.compute_quadrature(quad_order)
fred_det_K_approx = self.TW.fredholm_determinant(K_Green,
x_quad,
w_quad)
fred_det_K_theo = np.sin(1)
self.assertAlmostEqual(fred_det_K_approx, fred_det_K_theo,
msg=(fred_det_K_approx, fred_det_K_theo),
delta=1e-5)
def test_change_of_variables_from_0_1_to_s_oo_should_be_increasing(self):
"""
.. todo::
Add refer to increasing choice
"""
points = np.linspace(0, 1, 10)
s = -1
phi, d_phi = self.TW.change_of_variable(s)
for x, y in zip(points[:-1], points[1:]):
with self.subTest(x=x, y=y):
self.assertLessEqual(phi(x), phi(y))
def test_change_of_variables_from_0_1_to_s_oo_derivative_is_correct(self):
points = np.linspace(0, 1, 10, endpoint=False)
s = -1
phi, d_phi = self.TW.change_of_variable(s)
eps = 1e-7
for x in points:
with self.subTest(x=x):
d_phi_x_approx = (phi(x + eps) - phi(x)) / eps
d_phi_x = d_phi(x)
self.assertAlmostEqual(d_phi_x_approx, d_phi_x,
msg=(x, d_phi_x_approx, d_phi_x),
delta=1e-2)
def test_evaluation_Tracy_Widom_cdf(self):
""" evalution points obtained from Table 5. in *LARGEST EIGENVALUES AND SAMPLE COVARIANCE MATRICES*, <NAME>. BEJAN
https://pdfs.semanticscholar.org/ca19/3484415f374d8fb02e7fbdad72b99727b41f.pdf?_ga=2.251544262.1964171041.1570206947-237360766.1567514713
"""
points = np.array([[-3.0, 0.080361],
[-2.5, 0.212392],
[-2.0, 0.413256],
[-1.5, 0.631401],
[-1.0, 0.807225],
[-0.5, 0.916070],
[0.0, 0.969375],
[0.5, 0.990545],
[1.0, 0.997506],
[1.5, 0.999432],
[2.0, 0.999888]])
quad_order = 50
tol = 1e-4
cdf_s_approx = self.TW.cdf(points[:, 0], quad_order)
self.assertTrue(np.allclose(cdf_s_approx, points[:, 1], atol=tol))
def main():
unittest.main()
if __name__ == '__main__':
main()
| [
"numpy.allclose",
"numpy.where",
"numpy.sin",
"dppy.beta_ensemble_polynomial_potential_core.TracyWidom",
"numpy.array",
"numpy.linspace",
"unittest.main",
"numpy.meshgrid",
"sys.path.append"
] | [((64, 85), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (79, 85), False, 'import sys\n'), ((336, 348), 'dppy.beta_ensemble_polynomial_potential_core.TracyWidom', 'TracyWidom', ([], {}), '()\n', (346, 348), False, 'from dppy.beta_ensemble_polynomial_potential_core import TracyWidom\n'), ((3120, 3135), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3133, 3135), False, 'import unittest\n'), ((919, 928), 'numpy.sin', 'np.sin', (['(1)'], {}), '(1)\n', (925, 928), True, 'import numpy as np\n'), ((1295, 1316), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (1306, 1316), True, 'import numpy as np\n'), ((1626, 1663), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {'endpoint': '(False)'}), '(0, 1, 10, endpoint=False)\n', (1637, 1663), True, 'import numpy as np\n'), ((2445, 2657), 'numpy.array', 'np.array', (['[[-3.0, 0.080361], [-2.5, 0.212392], [-2.0, 0.413256], [-1.5, 0.631401], [-\n 1.0, 0.807225], [-0.5, 0.91607], [0.0, 0.969375], [0.5, 0.990545], [1.0,\n 0.997506], [1.5, 0.999432], [2.0, 0.999888]]'], {}), '([[-3.0, 0.080361], [-2.5, 0.212392], [-2.0, 0.413256], [-1.5, \n 0.631401], [-1.0, 0.807225], [-0.5, 0.91607], [0.0, 0.969375], [0.5, \n 0.990545], [1.0, 0.997506], [1.5, 0.999432], [2.0, 0.999888]])\n', (2453, 2657), True, 'import numpy as np\n'), ((528, 545), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (539, 545), True, 'import numpy as np\n'), ((565, 607), 'numpy.where', 'np.where', (['(X <= Y)', '(X * (1 - Y))', '(Y * (1 - X))'], {}), '(X <= Y, X * (1 - Y), Y * (1 - X))\n', (573, 607), True, 'import numpy as np\n'), ((3050, 3099), 'numpy.allclose', 'np.allclose', (['cdf_s_approx', 'points[:, 1]'], {'atol': 'tol'}), '(cdf_s_approx, points[:, 1], atol=tol)\n', (3061, 3099), True, 'import numpy as np\n')] |
"""Assorted plotting functions.
AUTHOR: <NAME> <britta.wstnr[at]gmail.com>
"""
import numpy as np
import matplotlib.pyplot as plt
from nilearn.plotting import plot_stat_map
from nilearn.image import index_img
def plot_score_std(x_ax, scores, title=None, colors=None, legend=None):
if colors is None:
colors = ['mediumseagreen', 'crimson', 'steelblue']
if len(scores) > 3:
raise ValueError("Please specify colors for plotting.")
for ii, score in enumerate(scores):
plt.plot(x_ax, score.mean(0), color=colors[ii])
ax = plt.gca()
ax.fill_between(x_ax,
score.mean(0) - np.std(score),
score.mean(0) + np.std(score),
alpha=.4, color=colors[ii])
plt.axvline(x=0., color='black')
plt.ylabel('AUC')
plt.xlim(x_ax[0], x_ax[-1])
plt.xlabel('time')
plt.title(title)
if legend is not None:
plt.legend(legend)
def plot_source_act(stc, fwd, mri=None, threshold=None, thresh_ref=None,
title=None, timepoint=None, save_fig=False,
fig_fname=None, cmap=None, vmax=None, display_mode='ortho',
coords=None, add_coords=False):
"""Plot source activity on volume.
Plots source activity on subject's MRI.
Parameters:
-----------
stc : dict
MNE Python beamformer output
fwd : forward operator
MNE forward model
mri : string | None
Can be path to a specific subject's brain or None for not having
any background image.
threshold : float | 'auto' | None
Threshold for plotting, if 'auto', nilearn's automatic threshold is
used, if None, no thresholding is done.
thresh_ref : string
Reference for thresholding. Can be 'all' to use maximum across time and
space or 'max_time' to use maximum time point or 'timepoint' to refer
to the time point given in timepoint.
title : string | None
Title for the figure.
timepoint : float | string
Time point that should be plotted. Can be given as index (int) or can
be 'max' to select the time point with maximal activity.
save_fig : bool
whether the figure should be saved
fig_fname : string
where to save the figure to
cmap : None | string
Matplotlib color map for plotting, passed to nilearn's plot_stat_map.
Popular choices might be "viridis" or "RdBu". From the nilearn doc:
The colormap must be symmetric. If None, the default color map will be
used."
vmax : None | float
Upper (and -lower) limit of the color bar.
display_mode : string
Display mode. See nilearn for details. Defaults to 'ortho'.
coords : None | list of tuples
Coordinates to cut and/or plot a marker at (see add_coords).
add_coords : bool
If True, a marker will be displayed at the coordinates provided in
coords.
Returns
-------
nilearn figure.
"""
img = stc.as_volume(fwd['src'], mri_resolution=False)
if timepoint is 'max':
vox, timepoint = np.unravel_index(stc.data.argmax(), stc.data.shape)
if thresh_ref is 'all':
threshold = np.max(stc.data) * threshold
elif thresh_ref is 'max_time':
if timepoint is not 'max':
# in that case, maximum time point needs to be calculated now:
_, m_tp = np.unravel_index(stc.data.argmax(), stc.data.shape)
threshold = np.max(stc.data[:, m_tp]) * threshold
elif thresh_ref is 'timepoint':
threshold = np.max(stc.data[:, timepoint] * threshold)
if save_fig is True:
if fig_fname is None:
raise ValueError("Please specify a file name to save figure to.")
if add_coords is True:
raise NotImplementedError("Cannot plot markers and save yet, "
"sorry.")
else:
fig_fname = None
if type(coords) is not list:
coords = [coords]
if display_mode is 'z':
# only take the z coordinate
cut_coords = tuple([x[2] for x in coords])
elif display_mode is 'ortho':
# only one cut coordinate supported
cut_coords = coords[0]
else:
raise NotImplementedError("Requested display mode is not "
"supported yet.")
display = plot_stat_map(index_img(img, timepoint), bg_img=mri,
threshold=threshold, title=title, cmap=cmap,
symmetric_cbar=True, vmax=vmax,
output_file=fig_fname, cut_coords=cut_coords,
display_mode=display_mode)
if add_coords is True:
if coords is None:
raise ValueError("Please provide coords for adding a marker.")
# add a marker
colors = ['w', 'y', 'g', 'k', 'b']
if len(coords) > len(colors):
raise ValueError("Can maximally plot 5 coordinates.")
else:
colors = colors[:len(coords)]
for coord, color in zip(coords, colors):
display.add_markers([coord], marker_color=color, marker_size=50)
# plt.show()
def plot_source_ts(stc, n_ts, abs=True, xlims=None, ylims=None, title=None,
save_fig=False, fig_fname=None):
"""Plot source time series.
Plots the n maximal time series in source space data.
Parameters:
-----------
stc : dict
MNE-Python source estimate.
n_ts : int
Number of time series to plot.
abs : bool
Whether the n time series should be picked on max() or max(abs()).
xlims : tuple | None
x axis limits for figure.
ylims : tuple | None
y axis limits for figure.
title : string | None
Title for the figure.
save_fig : bool
Whether figure should be saved to disk. Note that the figure will not
be shown in this case (nilearn properties).
fig_fname : str
Path for saving figure if save_fig=True.
Returns
-------
matplotlib figure
"""
plt.figure()
if abs:
plt.plot(stc.times,
stc.data[np.argsort(np.max(np.abs(stc.data), axis=1))
[-n_ts:]].T)
else:
plt.plot(stc.times,
stc.data[np.argsort(np.max(stc.data, axis=1))[-n_ts:]].T)
# figure axes and title
plt.xlabel('Time [s]')
plt.ylabel('LCMV value [a.u.]')
if xlims is not None:
plt.xlim(xlims)
else:
plt.xlim(stc.times.min(), stc.times.max())
if ylims is not None:
plt.ylim(ylims)
plt.title(title)
plt.show()
if save_fig is True:
if fig_fname is None:
raise ValueError("Please give a figure name to save to.")
plt.savefig(fig_fname, bbox_inches='tight')
def plot_covariance(cov, title=None, colorbar=True, show_fig=True,
save_fig=False, fig_fname=None):
"""Plot covariance matrix.
Plots covariance matrices.
Parameters:
-----------
cov : covariance matrix
MNE-Python covaraince matrix instance.
title : str
Title for plot.
colorbar : bool
Should color bar be added? Defaults to True.
show_fig : bool
Whether figure should be displayed.
save_fig : bool
Whether figure should be saved to disk. Note that the figure will not
be shown in this case (nilearn properties).
fig_fname : str
Path for saving figure if save_fig=True.
"""
# center the x limits wrt the smaller extreme (minimum or maximum)
v_abs = min(abs(cov['data'].min()), abs(cov['data'].max()))
# plotting
plt.figure()
plt.imshow(cov.data, vmin=-v_abs, vmax=v_abs, cmap='RdBu')
plt.title(title)
if colorbar:
plt.colorbar()
# show figure if applicable
if show_fig is True:
plt.show()
# saving
if save_fig:
if fig_fname is None:
raise ValueError("Please give a figure name to save to.")
plt.savefig(fig_fname, bbox_inches='tight')
| [
"matplotlib.pyplot.imshow",
"numpy.abs",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"nilearn.image.index_img",
"matplotlib.pyplot.colorbar",
"numpy.max",
"matplotlib.pyplot.figure",
"numpy.std",
"matplotlib.pyplot.title",
"ma... | [((781, 814), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(0.0)', 'color': '"""black"""'}), "(x=0.0, color='black')\n", (792, 814), True, 'import matplotlib.pyplot as plt\n'), ((818, 835), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""AUC"""'], {}), "('AUC')\n", (828, 835), True, 'import matplotlib.pyplot as plt\n'), ((840, 867), 'matplotlib.pyplot.xlim', 'plt.xlim', (['x_ax[0]', 'x_ax[-1]'], {}), '(x_ax[0], x_ax[-1])\n', (848, 867), True, 'import matplotlib.pyplot as plt\n'), ((872, 890), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (882, 890), True, 'import matplotlib.pyplot as plt\n'), ((895, 911), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (904, 911), True, 'import matplotlib.pyplot as plt\n'), ((6136, 6148), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6146, 6148), True, 'import matplotlib.pyplot as plt\n'), ((6445, 6467), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (6455, 6467), True, 'import matplotlib.pyplot as plt\n'), ((6472, 6503), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""LCMV value [a.u.]"""'], {}), "('LCMV value [a.u.]')\n", (6482, 6503), True, 'import matplotlib.pyplot as plt\n'), ((6669, 6685), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6678, 6685), True, 'import matplotlib.pyplot as plt\n'), ((6690, 6700), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6698, 6700), True, 'import matplotlib.pyplot as plt\n'), ((7732, 7744), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7742, 7744), True, 'import matplotlib.pyplot as plt\n'), ((7749, 7807), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cov.data'], {'vmin': '(-v_abs)', 'vmax': 'v_abs', 'cmap': '"""RdBu"""'}), "(cov.data, vmin=-v_abs, vmax=v_abs, cmap='RdBu')\n", (7759, 7807), True, 'import matplotlib.pyplot as plt\n'), ((7812, 7828), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (7821, 7828), True, 'import matplotlib.pyplot as plt\n'), ((574, 583), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (581, 583), True, 'import matplotlib.pyplot as plt\n'), ((947, 965), 'matplotlib.pyplot.legend', 'plt.legend', (['legend'], {}), '(legend)\n', (957, 965), True, 'import matplotlib.pyplot as plt\n'), ((4425, 4450), 'nilearn.image.index_img', 'index_img', (['img', 'timepoint'], {}), '(img, timepoint)\n', (4434, 4450), False, 'from nilearn.image import index_img\n'), ((6538, 6553), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlims'], {}), '(xlims)\n', (6546, 6553), True, 'import matplotlib.pyplot as plt\n'), ((6649, 6664), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylims'], {}), '(ylims)\n', (6657, 6664), True, 'import matplotlib.pyplot as plt\n'), ((6835, 6878), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_fname'], {'bbox_inches': '"""tight"""'}), "(fig_fname, bbox_inches='tight')\n", (6846, 6878), True, 'import matplotlib.pyplot as plt\n'), ((7854, 7868), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (7866, 7868), True, 'import matplotlib.pyplot as plt\n'), ((7935, 7945), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7943, 7945), True, 'import matplotlib.pyplot as plt\n'), ((8085, 8128), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_fname'], {'bbox_inches': '"""tight"""'}), "(fig_fname, bbox_inches='tight')\n", (8096, 8128), True, 'import matplotlib.pyplot as plt\n'), ((3253, 3269), 'numpy.max', 'np.max', (['stc.data'], {}), '(stc.data)\n', (3259, 3269), True, 'import numpy as np\n'), ((654, 667), 'numpy.std', 'np.std', (['score'], {}), '(score)\n', (660, 667), True, 'import numpy as np\n'), ((709, 722), 'numpy.std', 'np.std', (['score'], {}), '(score)\n', (715, 722), True, 'import numpy as np\n'), ((3521, 3546), 'numpy.max', 'np.max', (['stc.data[:, m_tp]'], {}), '(stc.data[:, m_tp])\n', (3527, 3546), True, 'import numpy as np\n'), ((3615, 3657), 'numpy.max', 'np.max', (['(stc.data[:, timepoint] * threshold)'], {}), '(stc.data[:, timepoint] * threshold)\n', (3621, 3657), True, 'import numpy as np\n'), ((6374, 6398), 'numpy.max', 'np.max', (['stc.data'], {'axis': '(1)'}), '(stc.data, axis=1)\n', (6380, 6398), True, 'import numpy as np\n'), ((6233, 6249), 'numpy.abs', 'np.abs', (['stc.data'], {}), '(stc.data)\n', (6239, 6249), True, 'import numpy as np\n')] |
#! /usr/bin/python
###################################################################################################
#
# Python Simulator for Sapphire Lattice Crypto-Processor
#
# Author: <NAME>
# Last Modified: 25-Nov-2019
#
# Inputs: Parameters (n,q), Operating Conditions, Program, Simulation Options
# Outputs: Instruction Count, Cycle Count, Total Time, Average Power, Total Energy
#
###################################################################################################
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import math, sys, os, re, random
from sha3 import *
from core import *
from encoding import *
# Read / Write Cycle Counts
READ_CYCLES = 2 # read data from the crypto core
WRITE_CYCLES = 2 # write data to the crypto core
# Supported Parameters
valid_n = [64, 128, 256, 512, 1024, 2048]
valid_q = [3329, 7681, 12289, 40961, 65537, 120833, 133121, 184321, 4205569, 4206593, 8058881, 8380417, 8404993]
# Power Consumption Table (Current in uA at 1.1 V and 72 MHz)
idd_dict = {
"ctrl" : 1815,
"reg_alu" : 3271,
"reg_poly" : 2795,
"sha3" : 6115,
"poly_read_write" : 6145,
"poly_init" : 6120,
"poly_bitrev" : 6212,
"poly_copy" : 6183,
"poly_eq_check" : 5523,
"poly_norm_check" : 3019,
"poly_shift" : 6201,
"poly_hash" : 7503,
"poly_sum_elems" : 3630,
"poly_max_elems" : 3184,
"poly_mult_psi" : { 3329: 7546, 7681: 7335, 12289: 8067, 40961: 9032, 65537: 7455, 120833: 8890, 133121: 8055, 184321: 8740, 4205569: 10418, 4206593: 9352, 8058881: 11726, 8380417: 8441, 8404993: 9156 },
"poly_ntt" : { 3329: 8591, 7681: 8483, 12289: 9589, 40961: 10783, 65537: 8619, 120833: 10764, 133121: 9958, 184321: 10585, 4205569: 13455, 4206593: 12657, 8058881: 14365, 8380417: 10366, 8404993: 10922 },
"poly_poly_addsub" : { 3329: 5022, 7681: 5290, 12289: 5523, 40961: 5717, 65537: 5464, 120833: 5950, 133121: 5688, 184321: 6125, 4205569: 6422, 4206593: 6498, 8058881: 6862, 8380417: 5921, 8404993: 6071 },
"poly_poly_mul" : { 3329: 7557, 7681: 7347, 12289: 8075, 40961: 9046, 65537: 7464, 120833: 8900, 133121: 8066, 184321: 8753, 4205569: 10433, 4206593: 9367, 8058881: 11734, 8380417: 8454, 8404993: 9173 },
"poly_const_addsub" : { 3329: 3558, 7681: 3581, 12289: 3640, 40961: 3640, 65537: 3630, 120833: 3630, 133121: 3611, 184321: 3644, 4205569: 3653, 4206593: 3655, 8058881: 3620, 8380417: 3611, 8404993: 3628 },
"poly_const_mul" : { 3329: 5946, 7681: 5736, 12289: 6134, 40961: 6940, 65537: 5794, 120833: 7144, 133121: 6396, 184321: 7142, 4205569: 8822, 4206593: 7756, 8058881: 9939, 8380417: 7046, 8404993: 7562 },
"poly_const_and" : 3504,
"poly_const_or" : 3552,
"poly_const_xor" : 3514,
"poly_const_shift" : 3484,
"sample_rej" : 6755,
"sample_bin" : 7545,
"sample_cdt" : 2764,
"sample_uni" : 7573,
"sample_tri_1" : 3645,
"sample_tri_2" : 3627,
"sample_tri_3" : 6791,
}
# Instruction decode and execute
def instr_exec(instr, iter_count):
global keccak_buf
global proc_regs
global poly_mem
global poly_tmp
global param_n
global param_q
global ticks
global pc
global power
instr_t = instr.replace(" ", "")
# INSTRUCTION - Parameter Configuration
matchObj = re.match(r'config\(n=(\d+),q=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
param_n = int(matchObj.group(1))
param_q = int(matchObj.group(2))
#print("config: n = %d, q = %d" % (param_n, param_q))
if param_n not in valid_n:
print("\n[Line %4d] %s\nERROR: Unsupported parameter \"n = %d\" (Valid \"n\": %s)\n" % (lines[pc], instr, param_n, valid_n))
exit()
if param_q not in valid_q:
print("\n[Line %4d] %s\nERROR: Unsupported parameter \"q = %d\" (Valid prime \"q\": %s)\n" % (lines[pc], instr, param_q, valid_q))
exit()
# Initialize polynomial memory
poly_mem = [[0 for i in range(param_n)] for j in range(int(8192/param_n))]
poly_tmp = [0 for i in range(param_n)]
#poly_mem = np.zeros((int(8192/param_n), param_n))
#poly_tmp = np.zeros((param_n))
#poly_mem = np.array(poly_mem, dtype=np.int64).tolist()
#poly_tmp = np.array(poly_mem, dtype=np.int64).tolist()
pc = pc + 1
ticks = ticks + 2
power = power + ([idd_dict["ctrl"]]*2)
return 0
# INSTRUCTION - Register Write Operation
matchObj = re.match(r'c(\d)=(\d+)', instr_t, re.M|re.I)
if matchObj:
reg = int(matchObj.group(1))
val = int(matchObj.group(2))
if reg > 1:
print("\n[Line %4d] %s\nERROR: No such register \"c%d\", please use \"c0\" or \"c1\"\n" % (lines[pc], instr, reg))
exit()
if val >= 2**16:
print("\n[Line %4d] %s\nERROR: Value %s too big for 16-bit register \"c%d\"\n" % (lines[pc], instr, val, reg))
exit()
# Update register value
proc_regs["c%s" % reg] = val
pc = pc + 1
ticks = ticks + 2
power = power + ([idd_dict["ctrl"]]*2)
return 1
matchObj = re.match(r'c(\d)=c(\d)([\+\-])(\d+)', instr_t, re.M|re.I)
if matchObj:
reg_dst = int(matchObj.group(1))
reg_src = int(matchObj.group(2))
val = int(matchObj.group(4))
if reg_dst > 1:
print("\n[Line %4d] %s\nERROR: No such register \"c%d\", please use \"c0\" or \"c1\"\n" % (lines[pc], instr, reg_dst))
exit()
if reg_src > 1:
print("\n[Line %4d] %s\nERROR: No such register \"c%d\", please use \"c0\" or \"c1\"\n" % (lines[pc], instr, reg_src))
exit()
if reg_dst != reg_src:
print("\n[Line %4d] %s\nERROR: Must use \"c0 = c0 +/- <val>\" or \"c1 = c1 +/- <val>\"\n" % (lines[pc], instr))
exit()
if val >= 2**16:
print("\n[Line %4d] %s\nERROR: Value %d too big for 16-bit register \"c%d\"\n" % (lines[pc], instr, val, reg_dst))
exit()
# Update register value
if matchObj.group(3) == "+":
proc_regs["c%d" % reg_dst] = (proc_regs["c%d" % reg_dst] + val) % 2**16
if matchObj.group(3) == "-":
proc_regs["c%d" % reg_dst] = (proc_regs["c%d" % reg_dst] - val) % 2**16
pc = pc + 1
ticks = ticks + 2
power = power + ([idd_dict["reg_alu"]]*2)
return 1
matchObj = re.match(r'reg=(\d+)', instr_t, re.M|re.I)
if matchObj:
val = int(matchObj.group(1))
if val >= 2**24:
print("\n[Line %4d] %s\nERROR: Value %d too big for 24-bit register \"reg\"\n" % (lines[pc], instr, val))
exit()
# Update register value
proc_regs["reg"] = val
pc = pc + 1
ticks = ticks + 2
power = power + ([idd_dict["ctrl"]]*2)
return 1
matchObj = re.match(r'tmp=(\d+)', instr_t, re.M|re.I)
if matchObj:
val = int(matchObj.group(1))
if val >= 2**24:
print("\n[Line %4d] %s\nERROR: Value %d too big for 24-bit register \"tmp\"\n" % (lines[pc], instr, val))
exit()
# Update register value
proc_regs["tmp"] = val
pc = pc + 1
ticks = ticks + 2
power = power + ([idd_dict["ctrl"]]*2)
return 1
matchObj = re.match(r'reg=tmp', instr_t, re.M|re.I)
if matchObj:
# Update register value
proc_regs["reg"] = proc_regs["tmp"]
pc = pc + 1
ticks = ticks + 2
power = power + ([idd_dict["ctrl"]]*2)
return 1
# INSTRUCTION - Register ALU Operation
matchObj = re.match(r'tmp=tmp([\+\-\*&\|\^><][><]*)reg', instr_t, re.M|re.I)
if matchObj:
op = matchObj.group(1)
#print("op: %s" % op)
if op == "+":
# Update register value
proc_regs["tmp"] = (proc_regs["tmp"] + proc_regs["reg"]) % param_q
elif op == "-":
# Update register value
proc_regs["tmp"] = (proc_regs["tmp"] - proc_regs["reg"]) % param_q
elif op == "*":
# Update register value
proc_regs["tmp"] = (proc_regs["tmp"] * proc_regs["reg"]) % param_q
elif op == "&":
# Update register value
proc_regs["tmp"] = proc_regs["tmp"] & proc_regs["reg"]
elif op == "|":
# Update register value
proc_regs["tmp"] = proc_regs["tmp"] | proc_regs["reg"]
elif op == "^":
# Update register value
proc_regs["tmp"] = proc_regs["tmp"] ^ proc_regs["reg"]
elif op == ">>":
# Update register value
if proc_regs["reg"] < 24:
proc_regs["tmp"] = (proc_regs["tmp"] >> proc_regs["reg"]) % 2**24
else:
proc_regs["tmp"] = 0
elif op == "<<":
# Update register value
if proc_regs["reg"] < 24:
proc_regs["tmp"] = (proc_regs["tmp"] << proc_regs["reg"]) % 2**24
else:
proc_regs["tmp"] = 0
else:
print("\n[Line %4d] %s\nERROR: Unsupported operation \"%s\", allowed operators are {+, -, *, &, |, ^, >>, <<}\n" % (lines[pc], instr, op))
exit()
pc = pc + 1
ticks = ticks + 2
power = power + ([idd_dict["reg_alu"]]*2)
return 1
# INSTRUCTION - Register Polynomial Operation
matchObj = re.match(r'reg=\(poly=(\d+)\)\[(\d+)\]', instr_t, re.M|re.I)
if matchObj:
poly = int(matchObj.group(1))
index = int(matchObj.group(2))
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
if index >= param_n:
print("\n[Line %4d] %s\nERROR: Index \"%d\" out of range, allowed indices for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, param_n))
exit()
# Read polynomial coefficient and update register value
proc_regs["reg"] = poly_mem[poly][index]
cycles = 2 + 1 + 2
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["reg_poly"]]*cycles)
return 2
matchObj = re.match(r'reg=\(poly=(\d+)\)\[c(\d)\]', instr_t, re.M|re.I)
if matchObj:
poly = int(matchObj.group(1))
reg = int(matchObj.group(2))
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
if int(matchObj.group(1)) > 1:
print("\n[Line %4d] %s\nERROR: No such register \"c%d\", please use \"c0\" or \"c1\"\n" % (lines[pc], instr, reg))
exit()
# Read polynomial coefficient and update register value
proc_regs["reg"] = poly_mem[poly][proc_regs["c%d" % reg] % param_n]
cycles = 2 + 1 + 2
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["reg_poly"]]*cycles)
return 2
matchObj = re.match(r'\(poly=(\d+)\)\[(\d+)\]=reg', instr_t, re.M|re.I)
if matchObj:
poly = int(matchObj.group(1))
index = int(matchObj.group(2))
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
if index >= param_n:
print("\n[Line %4d] %s\nERROR: Index \"%d\" out of range, allowed indices for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, param_n))
exit()
# Read register value and update polynomial coefficient
poly_mem[poly][index] = proc_regs["reg"]
cycles = 2 + 1 + 1
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["reg_poly"]]*cycles)
return 2
matchObj = re.match(r'\(poly=(\d+)\)\[c(\d)\]=reg', instr_t, re.M|re.I)
if matchObj:
poly = int(matchObj.group(1))
reg = int(matchObj.group(2))
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
if reg > 1:
print("\n[Line %4d] %s\nERROR: No such register \"c%d\", please use \"c0\" or \"c1\"\n" % (lines[pc], instr, reg))
exit()
# Read register value and update polynomial coefficient
poly_mem[poly][proc_regs["c%d" % reg] % param_n] = proc_regs["reg"]
cycles = 2 + 1 + 1
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["reg_poly"]]*cycles)
return 2
# INSTRUCTION - Polynomial Absolute Maximum in range [-q/2, + q/2]
matchObj = re.match(r'reg=max\(poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
poly = int(matchObj.group(1))
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Compute maximum of coefficients and update register value
proc_regs["reg"] = 0
for i in range(param_n):
if poly_mem[poly][i] < int(param_q/2) and poly_mem[poly][i] > proc_regs["reg"]:
proc_regs["reg"] = poly_mem[poly][i]
if poly_mem[poly][i] >= int(param_q/2) and (param_q - poly_mem[poly][i]) > proc_regs["reg"]:
proc_regs["reg"] = (param_q - poly_mem[poly][i])
cycles = 2 + 1 + 1 + param_n
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["poly_max_elems"]]*cycles)
return 2
# INSTRUCTION - Polynomial Sum of Coefficients in range [-q/2, + q/2]
matchObj = re.match(r'reg=sum\(poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
poly = int(matchObj.group(1))
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Compute sum of coefficients and update register value
proc_regs["reg"] = 0
for i in range(param_n):
if poly_mem[poly][i] < int(param_q/2):
proc_regs["reg"] = proc_regs["reg"] + poly_mem[poly][i]
if poly_mem[poly][i] >= int(param_q/2):
proc_regs["reg"] = proc_regs["reg"] + (poly_mem[poly][i] - param_q)
proc_regs["reg"] = abs(proc_regs["reg"])
#print("sum = %d" % proc_regs["reg"])
cycles = 2 + 1 + 1 + param_n
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["poly_sum_elems"]]*cycles)
return 2
# INSTRUCTION - Polynomial Number Theoretic Transform
matchObj = re.match(r'transform\(mode=(DI[FT]_I{0,1}NTT),poly_dst=(\d+),poly_src=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
mode = matchObj.group(1)
poly_dst = int(matchObj.group(2))
poly_src = int(matchObj.group(3))
if poly_dst >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly_dst = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly_dst, param_n, int(8192/param_n)))
exit()
if poly_src >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly_src = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly_src, param_n, int(8192/param_n)))
exit()
if not ((poly_src < int(4096/param_n) and poly_dst >= int(4096/param_n)) or (poly_dst < int(4096/param_n) and poly_src >= int(4096/param_n))):
print("\n[Line %4d] %s\nERROR: Polynomial pair \"poly_dst = %d, poly_src = %d\" is not allowed for n = %d, ensure \"poly_dst < %d, poly_src >= %d\" or \"poly_src < %d, poly_dst >= %d\"\n" % (lines[pc], instr, poly_dst, poly_src, param_n, int(4096/param_n), int(4096/param_n), int(4096/param_n), int(4096/param_n)))
exit()
# Compute transform and update polynomial coefficients
if mode == "DIF_NTT":
# assume standard input, bit-reversed output
cycles = dif_ntt(param_n, param_q, poly_mem[poly_src], lines[pc], instr)
poly_mem[poly_dst] = poly_mem[poly_src].copy()
poly_mem[poly_src] = [(random.getrandbits(24) % param_q) for i in range(param_n)] # Source polynomial gets clobbered
if mode == "DIT_NTT":
# assume bit-reversed input, standard output
cycles = dit_ntt(param_n, param_q, poly_mem[poly_src], lines[pc], instr)
poly_mem[poly_dst] = poly_mem[poly_src].copy()
poly_mem[poly_src] = [(random.getrandbits(24) % param_q) for i in range(param_n)] # Source polynomial gets clobbered
if mode == "DIF_INTT":
# assume standard input, bit-reversed output
cycles = dif_intt(param_n, param_q, poly_mem[poly_src], lines[pc], instr)
poly_mem[poly_dst] = poly_mem[poly_src].copy()
poly_mem[poly_src] = [(random.getrandbits(24) % param_q) for i in range(param_n)] # Source polynomial gets clobbered
if mode == "DIT_INTT":
# assume bit-reversed input, standard output
cycles = dit_intt(param_n, param_q, poly_mem[poly_src], lines[pc], instr)
poly_mem[poly_dst] = poly_mem[poly_src].copy()
poly_mem[poly_src] = [(random.getrandbits(24) % param_q) for i in range(param_n)] # Source polynomial gets clobbered
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["poly_ntt"][param_q]]*cycles)
# Need to copy polynomial when n is an even power of 2
if int(math.log(param_n,2)) % 2 == 0:
cycles = 2 + 1 + 1 + int(param_n/4)
ticks = ticks + cycles
power = power + ([idd_dict["poly_copy"]]*cycles)
return 3
# INSTRUCTION - Pre- and Post- Processing for Negative-Wrapped Convolution
matchObj = re.match(r'mult_psi\(poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
poly = int(matchObj.group(1))
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Pre-process polynomial coefficients
cycles = mult_psi(param_n, param_q, poly_mem[poly], lines[pc], instr)
proc_regs["tmp"] = random.getrandbits(24) # "tmp" register gets clobbered
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["poly_mult_psi"][param_q]]*cycles)
return 3
matchObj = re.match(r'mult_psi_inv\(poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
poly = int(matchObj.group(1))
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Pre-process polynomial coefficients
cycles = mult_psi_inv(param_n, param_q, poly_mem[poly], lines[pc], instr)
proc_regs["tmp"] = random.getrandbits(24) # "tmp" register gets clobbered
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["poly_mult_psi"][param_q]]*cycles)
return 3
# PSEUDO-INSTRUCTION - Rejection Sampling
matchObj = re.match(r'rej_sample\(prng=SHAKE-(\d+),seed=r(\d),c0=(\d+),c1=(\d+),poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
mode = int(matchObj.group(1))
reg = int(matchObj.group(2))
val_c0 = int(matchObj.group(3))
val_c1 = int(matchObj.group(4))
poly = int(matchObj.group(5))
if mode != 128 and mode != 256:
print("\n[Line %4d] %s\nERROR: Only SHAKE-128 and SHAKE-256 are supported\n" % (lines[pc], instr))
exit()
if reg != 0 and reg != 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", allowed registers are r0 and r1\n" % (lines[pc], instr, reg))
exit()
if val_c0 >= 2**16:
print("\n[Line %4d] %s\nERROR: Value %d too big for 16-bit register \"c0\"\n" % (lines[pc], instr, val_c0))
exit()
if val_c1 >= 2**16:
print("\n[Line %4d] %s\nERROR: Value %d too big for 16-bit register \"c1\"\n" % (lines[pc], instr, val_c1))
exit()
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Update register values
proc_regs["c0"] = val_c0
proc_regs["c1"] = val_c1
cycles = 2 + 2
# Sample polynomial coefficients
cycles = cycles + rejection_sample(param_n, param_q, mode, hex(proc_regs["r%d" % reg])[2:].rstrip("L").rjust(64,'0') + hex(proc_regs["c0"])[2:].rstrip("L").rjust(4,'0') + hex(proc_regs["c1"])[2:].rstrip("L").rjust(4,'0'), poly_mem[poly])
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sample_rej"]]*cycles)
return 4
# PSEUDO-INSTRUCTION - Binomial Sampling
matchObj = re.match(r'bin_sample\(prng=SHAKE-(\d+),seed=r(\d),c0=(\d+),c1=(\d+),k=(\d+),poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
mode = int(matchObj.group(1))
reg = int(matchObj.group(2))
val_c0 = int(matchObj.group(3))
val_c1 = int(matchObj.group(4))
param_k = int(matchObj.group(5))
poly = int(matchObj.group(6))
if mode != 128 and mode != 256:
print("\n[Line %4d] %s\nERROR: Only SHAKE-128 and SHAKE-256 are supported\n" % (lines[pc], instr))
exit()
if reg != 0 and reg != 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", allowed registers are r0 and r1\n" % (lines[pc], instr, reg))
exit()
if val_c0 >= 2**16:
print("\n[Line %4d] %s\nERROR: Value %d too big for 16-bit register \"c0\"\n" % (lines[pc], instr, val_c0))
exit()
if val_c1 >= 2**16:
print("\n[Line %4d] %s\nERROR: Value %d too big for 16-bit register \"c1\"\n" % (lines[pc], instr, val_c1))
exit()
if param_k < 1 or param_k > 32:
print("\n[Line %4d] %s\nERROR: Value of \"k\" must be in the range 1 to 32\n" % (lines[pc], instr))
exit()
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Update register values
proc_regs["c0"] = val_c0
proc_regs["c1"] = val_c1
cycles = 2 + 2
# Sample polynomial coefficients
cycles = cycles + binomial_sample(param_n, param_q, param_k, mode, hex(proc_regs["r%d" % reg])[2:].rstrip("L").rjust(64,'0') + hex(proc_regs["c0"])[2:].rstrip("L").rjust(4,'0') + hex(proc_regs["c1"])[2:].rstrip("L").rjust(4,'0'), poly_mem[poly])
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sample_bin"]]*cycles)
return 4
# PSEUDO-INSTRUCTION - Cumulative Distribution Table Sampling
matchObj = re.match(r'cdt_sample\(prng=SHAKE-(\d+),seed=r(\d),c0=(\d+),c1=(\d+),r=(\d+),poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
mode = int(matchObj.group(1))
reg = int(matchObj.group(2))
val_c0 = int(matchObj.group(3))
val_c1 = int(matchObj.group(4))
param_r = int(matchObj.group(5))
poly = int(matchObj.group(6))
if mode != 128 and mode != 256:
print("\n[Line %4d] %s\nERROR: Only SHAKE-128 and SHAKE-256 are supported\n" % (lines[pc], instr))
exit()
if reg != 0 and reg != 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", allowed registers are r0 and r1\n" % (lines[pc], instr, reg))
exit()
if val_c0 >= 2**16:
print("\n[Line %4d] %s\nERROR: Value %d too big for 16-bit register \"c0\"\n" % (lines[pc], instr, val_c0))
exit()
if val_c1 >= 2**16:
print("\n[Line %4d] %s\nERROR: Value %d too big for 16-bit register \"c1\"\n" % (lines[pc], instr, val_c1))
exit()
if param_r < 1 or param_r > 32:
print("\n[Line %4d] %s\nERROR: Value of \"r\" must be in the range 1 to 32\n" % (lines[pc], instr))
exit()
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
if "--cdt" not in sys.argv:
print("\n[Line %4d] %s\nERROR: CDT not provided, please provide a valid CDT file to use CDT-based sampling\n" % (lines[pc], instr))
exit()
# Update register values
proc_regs["c0"] = val_c0
proc_regs["c1"] = val_c1
cycles = 2 + 2
# Sample polynomial coefficients
cycles = cycles + cdt_sample(param_n, param_q, param_r, mode, hex(proc_regs["r%d" % reg])[2:].rstrip("L").rjust(64,'0') + hex(proc_regs["c0"])[2:].rstrip("L").rjust(4,'0') + hex(proc_regs["c1"])[2:].rstrip("L").rjust(4,'0'), cdt_mem, poly_mem[poly])
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sample_cdt"]]*cycles)
return 4
# PSEUDO-INSTRUCTION - Uniform Sampling
matchObj = re.match(r'uni_sample\(prng=SHAKE-(\d+),seed=r(\d),c0=(\d+),c1=(\d+),eta=(\d+),poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
mode = int(matchObj.group(1))
reg = int(matchObj.group(2))
val_c0 = int(matchObj.group(3))
val_c1 = int(matchObj.group(4))
param_eta = int(matchObj.group(5))
poly = int(matchObj.group(6))
if mode != 128 and mode != 256:
print("\n[Line %4d] %s\nERROR: Only SHAKE-128 and SHAKE-256 are supported\n" % (lines[pc], instr))
exit()
if reg != 0 and reg != 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", allowed registers are r0 and r1\n" % (lines[pc], instr, reg))
exit()
if val_c0 >= 2**16:
print("\n[Line %4d] %s\nERROR: Value %d too big for 16-bit register \"c0\"\n" % (lines[pc], instr, val_c0))
exit()
if val_c1 >= 2**16:
print("\n[Line %4d] %s\nERROR: Value %d too big for 16-bit register \"c1\"\n" % (lines[pc], instr, val_c1))
exit()
if param_eta >= param_q:
print("\n[Line %4d] %s\nERROR: Value of \"eta\" too large, must be less than %d\n" % (lines[pc], instr, param_q))
exit()
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Update register values
proc_regs["c0"] = val_c0
proc_regs["c1"] = val_c1
proc_regs["reg"] = param_eta
cycles = 2 + 2 + 2
# Sample polynomial coefficients
cycles = cycles + uniform_sample(param_n, param_q, param_eta, mode, hex(proc_regs["r%d" % reg])[2:].rstrip("L").rjust(64,'0') + hex(proc_regs["c0"])[2:].rstrip("L").rjust(4,'0') + hex(proc_regs["c1"])[2:].rstrip("L").rjust(4,'0'), poly_mem[poly])
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sample_uni"]]*cycles)
return 4
# PSEUDO-INSTRUCTION - Trinary Sampling #1
matchObj = re.match(r'tri_sample_1\(prng=SHAKE-(\d+),seed=r(\d),c0=(\d+),c1=(\d+),m=(\d+),poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
mode = int(matchObj.group(1))
reg = int(matchObj.group(2))
val_c0 = int(matchObj.group(3))
val_c1 = int(matchObj.group(4))
param_m = int(matchObj.group(5))
poly = int(matchObj.group(6))
if mode != 128 and mode != 256:
print("\n[Line %4d] %s\nERROR: Only SHAKE-128 and SHAKE-256 are supported\n" % (lines[pc], instr))
exit()
if reg != 0 and reg != 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", allowed registers are r0 and r1\n" % (lines[pc], instr, reg))
exit()
if val_c0 >= 2**16:
print("\n[Line %4d] %s\nERROR: Value %d too big for 16-bit register \"c0\"\n" % (lines[pc], instr, val_c0))
exit()
if val_c1 >= 2**16:
print("\n[Line %4d] %s\nERROR: Value %d too big for 16-bit register \"c1\"\n" % (lines[pc], instr, val_c1))
exit()
if param_m >= param_n:
print("\n[Line %4d] %s\nERROR: Value of \"m\" too large, must be less than %d\n" % (lines[pc], instr, param_n))
exit()
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Update register values
proc_regs["c0"] = val_c0
proc_regs["c1"] = val_c1
cycles = 2 + 2
# Sample polynomial coefficients
cycles = cycles + trinary_sample_1(param_n, param_q, param_m, mode, hex(proc_regs["r%d" % reg])[2:].rstrip("L").rjust(64,'0') + hex(proc_regs["c0"])[2:].rstrip("L").rjust(4,'0') + hex(proc_regs["c1"])[2:].rstrip("L").rjust(4,'0'), poly_mem[poly])
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sample_tri_1"]]*cycles)
return 4
# PSEUDO-INSTRUCTION - Trinary Sampling #2
matchObj = re.match(r'tri_sample_2\(prng=SHAKE-(\d+),seed=r(\d),c0=(\d+),c1=(\d+),m0=(\d+),m1=(\d+),poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
mode = int(matchObj.group(1))
reg = int(matchObj.group(2))
val_c0 = int(matchObj.group(3))
val_c1 = int(matchObj.group(4))
param_m0 = int(matchObj.group(5))
param_m1 = int(matchObj.group(6))
poly = int(matchObj.group(7))
if mode != 128 and mode != 256:
print("\n[Line %4d] %s\nERROR: Only SHAKE-128 and SHAKE-256 are supported\n" % (lines[pc], instr))
exit()
if reg != 0 and reg != 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", allowed registers are r0 and r1\n" % (lines[pc], instr, reg))
exit()
if val_c0 >= 2**16:
print("\n[Line %4d] %s\nERROR: Value %d too big for 16-bit register \"c0\"\n" % (lines[pc], instr, val_c0))
exit()
if val_c1 >= 2**16:
print("\n[Line %4d] %s\nERROR: Value %d too big for 16-bit register \"c1\"\n" % (lines[pc], instr, val_c1))
exit()
if param_m0 >= param_n:
print("\n[Line %4d] %s\nERROR: Value of \"m0\" too large, must be less than %d\n" % (lines[pc], instr, param_n))
exit()
if param_m1 >= param_n:
print("\n[Line %4d] %s\nERROR: Value of \"m1\" too large, must be less than %d\n" % (lines[pc], instr, param_n))
exit()
if (param_m0 + param_m1) >= param_n:
print("\n[Line %4d] %s\nERROR: Value of \"m0 + m1\" too large, must be less than %d\n" % (lines[pc], instr, param_n))
exit()
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Update register values
proc_regs["c0"] = val_c0
proc_regs["c1"] = val_c1
proc_regs["reg"] = param_m0 + (param_m1 * 2**12)
cycles = 2 + 2 + 2
# Sample polynomial coefficients
cycles = cycles + trinary_sample_2(param_n, param_q, param_m0, param_m1, mode, hex(proc_regs["r%d" % reg])[2:].rstrip("L").rjust(64,'0') + hex(proc_regs["c0"])[2:].rstrip("L").rjust(4,'0') + hex(proc_regs["c1"])[2:].rstrip("L").rjust(4,'0'), poly_mem[poly])
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sample_tri_2"]]*cycles)
return 4
# PSEUDO-INSTRUCTION - Trinary Sampling #3
matchObj = re.match(r'tri_sample_3\(prng=SHAKE-(\d+),seed=r(\d),c0=(\d+),c1=(\d+),rho=1/(\d+),poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
mode = int(matchObj.group(1))
reg = int(matchObj.group(2))
val_c0 = int(matchObj.group(3))
val_c1 = int(matchObj.group(4))
param_rho = int(matchObj.group(5))
poly = int(matchObj.group(6))
if mode != 128 and mode != 256:
print("\n[Line %4d] %s\nERROR: Only SHAKE-128 and SHAKE-256 are supported\n" % (lines[pc], instr))
exit()
if reg != 0 and reg != 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", allowed registers are r0 and r1\n" % (lines[pc], instr, reg))
exit()
if val_c0 >= 2**16:
print("\n[Line %4d] %s\nERROR: Value %d too big for 16-bit register \"c0\"\n" % (lines[pc], instr, val_c0))
exit()
if val_c1 >= 2**16:
print("\n[Line %4d] %s\nERROR: Value %d too big for 16-bit register \"c1\"\n" % (lines[pc], instr, val_c1))
exit()
if param_rho != 2 and param_rho != 4 and param_rho != 8 and param_rho != 16 and param_rho != 32 and param_rho != 64 and param_rho != 128:
print("\n[Line %4d] %s\nERROR: Unsupported parameter \"rho = 1/%d\" (Valid \"rho\": [1/2, 1/4, 1/8, 1/16, 1/32, 1/64, 1/128])\n" % (lines[pc], instr, param_rho))
exit()
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Update register values
proc_regs["c0"] = val_c0
proc_regs["c1"] = val_c1
cycles = 2 + 2
# Sample polynomial coefficients
cycles = cycles + trinary_sample_3(param_n, param_q, param_rho, mode, hex(proc_regs["r%d" % reg])[2:].rstrip("L").rjust(64,'0') + hex(proc_regs["c0"])[2:].rstrip("L").rjust(4,'0') + hex(proc_regs["c1"])[2:].rstrip("L").rjust(4,'0'), poly_mem[poly])
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sample_tri_3"]]*cycles)
return 4
# INSTRUCTION - Rejection Sampling
matchObj = re.match(r'rej_sample\(prng=SHAKE-(\d+),seed=r(\d),poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
mode = int(matchObj.group(1))
reg = int(matchObj.group(2))
poly = int(matchObj.group(3))
if mode != 128 and mode != 256:
print("\n[Line %4d] %s\nERROR: Only SHAKE-128 and SHAKE-256 are supported\n" % (lines[pc], instr))
exit()
if reg != 0 and reg != 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", allowed registers are r0 and r1\n" % (lines[pc], instr, reg))
exit()
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Sample polynomial coefficients
cycles = rejection_sample(param_n, param_q, mode, hex(proc_regs["r%d" % reg])[2:].rstrip("L").rjust(64,'0') + hex(proc_regs["c0"])[2:].rstrip("L").rjust(4,'0') + hex(proc_regs["c1"])[2:].rstrip("L").rjust(4,'0'), poly_mem[poly])
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sample_rej"]]*cycles)
return 4
# INSTRUCTION - Binomial Sampling
matchObj = re.match(r'bin_sample\(prng=SHAKE-(\d+),seed=r(\d),k=(\d+),poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
mode = int(matchObj.group(1))
reg = int(matchObj.group(2))
param_k = int(matchObj.group(3))
poly = int(matchObj.group(4))
if mode != 128 and mode != 256:
print("\n[Line %4d] %s\nERROR: Only SHAKE-128 and SHAKE-256 are supported\n" % (lines[pc], instr))
exit()
if reg != 0 and reg != 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", allowed registers are r0 and r1\n" % (lines[pc], instr, reg))
exit()
if param_k < 1 or param_k > 32:
print("\n[Line %4d] %s\nERROR: Value of \"k\" must be in the range 1 to 32\n" % (lines[pc], instr))
exit()
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Sample polynomial coefficients
cycles = binomial_sample(param_n, param_q, param_k, mode, hex(proc_regs["r%d" % reg])[2:].rstrip("L").rjust(64,'0') + hex(proc_regs["c0"])[2:].rstrip("L").rjust(4,'0') + hex(proc_regs["c1"])[2:].rstrip("L").rjust(4,'0'), poly_mem[poly])
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sample_bin"]]*cycles)
return 4
# INSTRUCTION - Cumulative Distribution Table Sampling
matchObj = re.match(r'cdt_sample\(prng=SHAKE-(\d+),seed=r(\d),r=(\d+),poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
mode = int(matchObj.group(1))
reg = int(matchObj.group(2))
param_r = int(matchObj.group(3))
poly = int(matchObj.group(4))
if mode != 128 and mode != 256:
print("\n[Line %4d] %s\nERROR: Only SHAKE-128 and SHAKE-256 are supported\n" % (lines[pc], instr))
exit()
if reg != 0 and reg != 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", allowed registers are r0 and r1\n" % (lines[pc], instr, reg))
exit()
if param_r < 1 or param_r > 32:
print("\n[Line %4d] %s\nERROR: Value of \"r\" must be in the range 1 to 32\n" % (lines[pc], instr))
exit()
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
if "--cdt" not in sys.argv:
print("\n[Line %4d] %s\nERROR: CDT not provided, please provide a valid CDT file to use CDT-based sampling\n" % (lines[pc], instr))
exit()
# Sample polynomial coefficients
cycles = cdt_sample(param_n, param_q, param_r, mode, hex(proc_regs["r%d" % reg])[2:].rstrip("L").rjust(64,'0') + hex(proc_regs["c0"])[2:].rstrip("L").rjust(4,'0') + hex(proc_regs["c1"])[2:].rstrip("L").rjust(4,'0'), cdt_mem, poly_mem[poly])
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sample_cdt"]]*cycles)
return 4
# INSTRUCTION - Uniform Sampling
matchObj = re.match(r'uni_sample\(prng=SHAKE-(\d+),seed=r(\d),eta=(\d+),poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
mode = int(matchObj.group(1))
reg = int(matchObj.group(2))
param_eta = int(matchObj.group(3))
poly = int(matchObj.group(4))
if mode != 128 and mode != 256:
print("\n[Line %4d] %s\nERROR: Only SHAKE-128 and SHAKE-256 are supported\n" % (lines[pc], instr))
exit()
if reg != 0 and reg != 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", allowed registers are r0 and r1\n" % (lines[pc], instr, reg))
exit()
if param_eta >= param_q:
print("\n[Line %4d] %s\nERROR: Value of \"eta\" too large, must be less than %d\n" % (lines[pc], instr, param_q))
exit()
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Update register values
proc_regs["reg"] = param_eta
cycles = 2
# Sample polynomial coefficients
cycles = cycles + uniform_sample(param_n, param_q, param_eta, mode, hex(proc_regs["r%d" % reg])[2:].rstrip("L").rjust(64,'0') + hex(proc_regs["c0"])[2:].rstrip("L").rjust(4,'0') + hex(proc_regs["c1"])[2:].rstrip("L").rjust(4,'0'), poly_mem[poly])
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sample_uni"]]*cycles)
return 4
# INSTRUCTION - Trinary Sampling #1
matchObj = re.match(r'tri_sample_1\(prng=SHAKE-(\d+),seed=r(\d),m=(\d+),poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
mode = int(matchObj.group(1))
reg = int(matchObj.group(2))
param_m = int(matchObj.group(3))
poly = int(matchObj.group(4))
if mode != 128 and mode != 256:
print("\n[Line %4d] %s\nERROR: Only SHAKE-128 and SHAKE-256 are supported\n" % (lines[pc], instr))
exit()
if reg != 0 and reg != 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", allowed registers are r0 and r1\n" % (lines[pc], instr, reg))
exit()
if param_m >= param_n:
print("\n[Line %4d] %s\nERROR: Value of \"m\" too large, must be less than %d\n" % (lines[pc], instr, param_n))
exit()
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Sample polynomial coefficients
cycles = trinary_sample_1(param_n, param_q, param_m, mode, hex(proc_regs["r%d" % reg])[2:].rstrip("L").rjust(64,'0') + hex(proc_regs["c0"])[2:].rstrip("L").rjust(4,'0') + hex(proc_regs["c1"])[2:].rstrip("L").rjust(4,'0'), poly_mem[poly])
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sample_tri_1"]]*cycles)
return 4
# INSTRUCTION - Trinary Sampling #2
matchObj = re.match(r'tri_sample_2\(prng=SHAKE-(\d+),seed=r(\d),m0=(\d+),m1=(\d+),poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
mode = int(matchObj.group(1))
reg = int(matchObj.group(2))
param_m0 = int(matchObj.group(3))
param_m1 = int(matchObj.group(4))
poly = int(matchObj.group(5))
if mode != 128 and mode != 256:
print("\n[Line %4d] %s\nERROR: Only SHAKE-128 and SHAKE-256 are supported\n" % (lines[pc], instr))
exit()
if reg != 0 and reg != 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", allowed registers are r0 and r1\n" % (lines[pc], instr, reg))
exit()
if param_m0 >= param_n:
print("\n[Line %4d] %s\nERROR: Value of \"m0\" too large, must be less than %d\n" % (lines[pc], instr, param_n))
exit()
if param_m1 >= param_n:
print("\n[Line %4d] %s\nERROR: Value of \"m1\" too large, must be less than %d\n" % (lines[pc], instr, param_n))
exit()
if (param_m0 + param_m1) >= param_n:
print("\n[Line %4d] %s\nERROR: Value of \"m0 + m1\" too large, must be less than %d\n" % (lines[pc], instr, param_n))
exit()
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Update register values
proc_regs["reg"] = param_m0 + (param_m1 * 2**12)
cycles = 2
# Sample polynomial coefficients
cycles = cycles + trinary_sample_2(param_n, param_q, param_m0, param_m1, mode, hex(proc_regs["r%d" % reg])[2:].rstrip("L").rjust(64,'0') + hex(proc_regs["c0"])[2:].rstrip("L").rjust(4,'0') + hex(proc_regs["c1"])[2:].rstrip("L").rjust(4,'0'), poly_mem[poly])
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sample_tri_2"]]*cycles)
return 4
# INSTRUCTION - Trinary Sampling #3
matchObj = re.match(r'tri_sample_3\(prng=SHAKE-(\d+),seed=r(\d),rho=1/(\d+),poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
mode = int(matchObj.group(1))
reg = int(matchObj.group(2))
param_rho = int(matchObj.group(3))
poly = int(matchObj.group(4))
if mode != 128 and mode != 256:
print("\n[Line %4d] %s\nERROR: Only SHAKE-128 and SHAKE-256 are supported\n" % (lines[pc], instr))
exit()
if reg != 0 and reg != 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", allowed registers are r0 and r1\n" % (lines[pc], instr, reg))
exit()
if param_rho != 2 and param_rho != 4 and param_rho != 8 and param_rho != 16 and param_rho != 32 and param_rho != 64 and param_rho != 128:
print("\n[Line %4d] %s\nERROR: Unsupported parameter \"rho = 1/%d\" (Valid \"rho\": [1/2, 1/4, 1/8, 1/16, 1/32, 1/64, 1/128])\n" % (lines[pc], instr, param_rho))
exit()
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Sample polynomial coefficients
cycles = trinary_sample_3(param_n, param_q, param_rho, mode, hex(proc_regs["r%d" % reg])[2:].rstrip("L").rjust(64,'0') + hex(proc_regs["c0"])[2:].rstrip("L").rjust(4,'0') + hex(proc_regs["c1"])[2:].rstrip("L").rjust(4,'0'), poly_mem[poly])
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sample_tri_3"]]*cycles)
return 4
# INSTRUCTION - Polynomial Initialization
matchObj = re.match(r'init\(poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
poly = int(matchObj.group(1))
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Set all polynomial coefficients to zero
poly_mem[poly] = [0 for i in range(param_n)]
cycles = 2 + 1 + 1 + int(param_n/4)
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["poly_init"]]*cycles)
return 5
# INSTRUCTION - Polynomial Copy
matchObj = re.match(r'poly_copy\(poly_dst=(\d+),poly_src=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
poly_dst = int(matchObj.group(1))
poly_src = int(matchObj.group(2))
if poly_dst >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly_dst = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly_dst, param_n, int(8192/param_n)))
exit()
if poly_src >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly_src = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly_src, param_n, int(8192/param_n)))
exit()
# Copy polynomial coefficients (handle both fast and slow cases in cycle count)
poly_mem[poly_dst] = poly_mem[poly_src].copy()
if ((poly_src < int(4096/param_n) and poly_dst >= int(4096/param_n)) or (poly_dst < int(4096/param_n) and poly_src >= int(4096/param_n))):
cycles = 2 + 1 + 1 + int(param_n/4)
else:
cycles = 2 + 1 + 1 + (3*param_n)
proc_regs["tmp"] = random.getrandbits(24) # "tmp" register gets clobbered
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["poly_copy"]]*cycles)
return 5
supported_poly_ops = ["ADD", "SUB", "MUL", "BITREV", "CONST_ADD", "CONST_SUB", "CONST_MUL", "CONST_AND", "CONST_OR", "CONST_XOR", "CONST_RSHIFT", "CONST_LSHIFT"]
# INSTRUCTION - Polynomial ALU Operations
matchObj = re.match(r'poly_op\(op=([\w_]+),poly_dst=(\d+),poly_src=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
op = matchObj.group(1)
poly_dst = int(matchObj.group(2))
poly_src = int(matchObj.group(3))
if poly_dst >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly_dst = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly_dst, param_n, int(8192/param_n)))
exit()
if poly_src >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly_src = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly_src, param_n, int(8192/param_n)))
exit()
if not ((poly_src < int(4096/param_n) and poly_dst >= int(4096/param_n)) or (poly_dst < int(4096/param_n) and poly_src >= int(4096/param_n))):
print("\n[Line %4d] %s\nERROR: Polynomial pair \"poly_dst = %d, poly_src = %d\" is not allowed for n = %d, ensure \"poly_dst < %d, poly_src >= %d\" or \"poly_src < %d, poly_dst >= %d\"\n" % (lines[pc], instr, poly_dst, poly_src, param_n, int(4096/param_n), int(4096/param_n), int(4096/param_n), int(4096/param_n)))
exit()
#print("op: %s" % op)
if op == "ADD":
# Update polynomial coefficients
for i in range(param_n):
poly_mem[poly_dst][i] = (int(poly_mem[poly_src][i]) + int(poly_mem[poly_dst][i])) % param_q
proc_regs["tmp"] = random.getrandbits(24) # "tmp" register gets clobbered
cycles = 2 + 1 + 1 + param_n
power = power + ([idd_dict["poly_poly_addsub"][param_q]]*cycles)
elif op == "SUB":
# Update polynomial coefficients
for i in range(param_n):
poly_mem[poly_dst][i] = (int(poly_mem[poly_src][i]) - int(poly_mem[poly_dst][i]) + param_q) % param_q
proc_regs["tmp"] = random.getrandbits(24) # "tmp" register gets clobbered
cycles = 2 + 1 + 1 + param_n
power = power + ([idd_dict["poly_poly_addsub"][param_q]]*cycles)
elif op == "MUL":
# Update polynomial coefficients
for i in range(param_n):
poly_mem[poly_dst][i] = (int(poly_mem[poly_src][i]) * int(poly_mem[poly_dst][i])) % param_q
proc_regs["tmp"] = random.getrandbits(24) # "tmp" register gets clobbered
cycles = 2 + 1 + 1 + param_n
power = power + ([idd_dict["poly_poly_mul"][param_q]]*cycles)
elif op == "BITREV":
# Update polynomial coefficients
for i in range(param_n):
i_rev = int(('{:0{w}b}'.format(i, w=int(math.log(param_n,2))))[::-1], 2)
poly_mem[poly_dst][i_rev] = poly_mem[poly_src][i]
cycles = 2 + 1 + (1+int(param_n/4))
power = power + ([idd_dict["poly_bitrev"]]*cycles)
elif op == "CONST_ADD":
# Update polynomial coefficients
for i in range(param_n):
poly_mem[poly_dst][i] = (int(poly_mem[poly_src][i]) + proc_regs["reg"]) % param_q
cycles = 2 + 1 + 1 + param_n
power = power + ([idd_dict["poly_const_addsub"][param_q]]*cycles)
elif op == "CONST_SUB":
# Update polynomial coefficients
for i in range(param_n):
poly_mem[poly_dst][i] = (int(poly_mem[poly_src][i]) - proc_regs["reg"] + param_q) % param_q
cycles = 2 + 1 + 1 + param_n
power = power + ([idd_dict["poly_const_addsub"][param_q]]*cycles)
elif op == "CONST_MUL":
# Update polynomial coefficients
for i in range(param_n):
poly_mem[poly_dst][i] = (int(poly_mem[poly_src][i]) * proc_regs["reg"]) % param_q
cycles = 2 + 1 + 1 + param_n
power = power + ([idd_dict["poly_const_mul"][param_q]]*cycles)
elif op == "CONST_AND":
# Update polynomial coefficients
for i in range(param_n):
poly_mem[poly_dst][i] = (poly_mem[poly_src][i] & proc_regs["reg"])
cycles = 2 + 1 + 1 + param_n
power = power + ([idd_dict["poly_const_and"]]*cycles)
elif op == "CONST_OR":
# Update polynomial coefficients
for i in range(param_n):
poly_mem[poly_dst][i] = (poly_mem[poly_src][i] | proc_regs["reg"])
cycles = 2 + 1 + 1 + param_n
power = power + ([idd_dict["poly_const_or"]]*cycles)
elif op == "CONST_XOR":
# Update polynomial coefficients
for i in range(param_n):
poly_mem[poly_dst][i] = (poly_mem[poly_src][i] ^ proc_regs["reg"])
cycles = 2 + 1 + 1 + param_n
power = power + ([idd_dict["poly_const_xor"]]*cycles)
elif op == "CONST_RSHIFT":
# Update polynomial coefficients
for i in range(param_n):
if proc_regs["reg"] < 24:
poly_mem[poly_dst][i] = (poly_mem[poly_src][i] >> proc_regs["reg"]) % 2**24
else:
poly_mem[poly_dst][i] = 0
cycles = 2 + 1 + 1 + param_n
power = power + ([idd_dict["poly_const_shift"]]*cycles)
elif op == "CONST_LSHIFT":
# Update polynomial coefficients
for i in range(param_n):
if proc_regs["reg"] < 24:
poly_mem[poly_dst][i] = (poly_mem[poly_src][i] << proc_regs["reg"]) % 2**24
else:
poly_mem[poly_dst][i] = 0
cycles = 2 + 1 + 1 + param_n
power = power + ([idd_dict["poly_const_shift"]]*cycles)
else:
print("\n[Line %4d] %s\nERROR: Unsupported operation \"%s\", allowed operations are %s\n" % (lines[pc], instr, op, supported_poly_ops))
exit()
pc = pc + 1
ticks = ticks + cycles
return 5
# INSTRUCTION - Polynomial Circular Left Shift (Multiplication by x modulo x^N+1 and x^N-1)
matchObj = re.match(r'shift_poly\(ring=x\^N([\+\-])1,poly_dst=(\d+),poly_src=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
ring = matchObj.group(1)
poly_dst = int(matchObj.group(2))
poly_src = int(matchObj.group(3))
if poly_dst >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly_dst = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly_dst, param_n, int(8192/param_n)))
exit()
if poly_src >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly_src = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly_src, param_n, int(8192/param_n)))
exit()
if not ((poly_src < int(4096/param_n) and poly_dst >= int(4096/param_n)) or (poly_dst < int(4096/param_n) and poly_src >= int(4096/param_n))):
print("\n[Line %4d] %s\nERROR: Polynomial pair \"poly_dst = %d, poly_src = %d\" is not allowed for n = %d, ensure \"poly_dst < %d, poly_src >= %d\" or \"poly_src < %d, poly_dst >= %d\"\n" % (lines[pc], instr, poly_dst, poly_src, param_n, int(4096/param_n), int(4096/param_n), int(4096/param_n), int(4096/param_n)))
exit()
# Update polynomial coefficients
for i in range(1, param_n):
poly_mem[poly_dst][i] = poly_mem[poly_src][i-1]
if ring == "+":
poly_mem[poly_dst][0] = param_q - poly_mem[poly_scr][param_n-1]
if ring == "-":
poly_mem[poly_dst][0] = poly_mem[poly_scr][param_n-1]
cycles = 2 + 1 + 1 + int(param_n/4)
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["poly_shift"]]*cycles)
return 5
# INSTRUCTION - Polynomial Equality Check
matchObj = re.match(r'flag=eq_check\(poly0=(\d+),poly1=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
poly0 = int(matchObj.group(1))
poly1 = int(matchObj.group(2))
if poly0 >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly0 = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly0, param_n, int(8192/param_n)))
exit()
if poly1 >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly1 = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly1, param_n, int(8192/param_n)))
exit()
if not ((poly1 < int(4096/param_n) and poly0 >= int(4096/param_n)) or (poly0 < int(4096/param_n) and poly1 >= int(4096/param_n))):
print("\n[Line %4d] %s\nERROR: Polynomial pair \"poly0 = %d, poly1 = %d\" is not allowed for n = %d, ensure \"poly0 < %d, poly1 >= %d\" or \"poly1 < %d, poly0 >= %d\"\n" % (lines[pc], instr, poly0, poly1, param_n, int(4096/param_n), int(4096/param_n), int(4096/param_n), int(4096/param_n)))
exit()
# Compare polynomial coefficients and update flag
if poly_mem[poly0] == poly_mem[poly1]:
proc_regs["flag"] = 1
else:
proc_regs["flag"] = 0
proc_regs["tmp"] = random.getrandbits(24) # "tmp" register gets clobbered
cycles = 2 + 1 + 2 + param_n
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["poly_eq_check"]]*cycles)
return 6
# INSTRUCTION - Polynomial Infinity Norm Check
matchObj = re.match(r'flag=inf_norm_check\(poly=(\d+),bound=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
poly = int(matchObj.group(1))
bound = int(matchObj.group(2))
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
if bound >= 2**24:
print("\n[Line %4d] %s\nERROR: Parameter \"bound = %d\" too large, must be less than 2**24\n" % (lines[pc], instr, bound))
exit()
# Update register value
proc_regs["reg"] = bound
cycles = 2
# Compare infinity norm of polynomial with specified bound and update flag
count = 0
for i in range(param_n):
if poly_mem[poly][i] > bound and poly_mem[poly][i] < (param_q - bound):
count = count + 1
if count == 0:
proc_regs["flag"] = 1
else:
proc_regs["flag"] = 0
cycles = cycles + 2 + 1 + 1 + param_n
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["poly_inf_norm_check"]]*cycles)
return 6
# INSTRUCTION - Register Comparison
matchObj = re.match(r'flag=compare\(c(\d),(\d+)\)', instr_t, re.M|re.I)
if matchObj:
reg = int(matchObj.group(1))
val = int(matchObj.group(2))
if reg > 1:
print("\n[Line %4d] %s\nERROR: No such register \"c%d\", please use \"c0\" or \"c1\"\n" % (lines[pc], instr, reg))
exit()
if val >= 2**16:
print("\n[Line %4d] %s\nERROR: Value %s too big for 16-bit register \"c%d\"\n" % (lines[pc], instr, val, reg))
exit()
# Compare register value and update flag
if proc_regs["c%s" % reg] < val:
proc_regs["flag"] = -1
elif proc_regs["c%s" % reg] > val:
proc_regs["flag"] = 1
else:
proc_regs["flag"] = 0
pc = pc + 1
ticks = ticks + 2
power = power + ([idd_dict["ctrl"]]*2)
return 6
matchObj = re.match(r'flag=compare\(reg,(\d+)\)', instr_t, re.M|re.I)
if matchObj:
val = int(matchObj.group(1))
if val >= 2**24:
print("\n[Line %4d] %s\nERROR: Value %s too big for 24-bit register \"reg\"\n" % (lines[pc], instr, val))
exit()
# Compare register value and update flag
if proc_regs["reg"] < val:
proc_regs["flag"] == -1
elif proc_regs["reg"] > val:
proc_regs["flag"] == 1
else:
proc_regs["flag"] = 0
pc = pc + 1
ticks = ticks + 2
power = power + ([idd_dict["ctrl"]]*2)
return 6
matchObj = re.match(r'flag=compare\(tmp,(\d+)\)', instr_t, re.M|re.I)
if matchObj:
val = int(matchObj.group(1))
if val >= 2**24:
print("\n[Line %4d] %s\nERROR: Value %s too big for 24-bit register \"tmp\"\n" % (lines[pc], instr, val))
exit()
# Compare register value and update flag
if proc_regs["tmp"] < val:
proc_regs["flag"] == -1
elif proc_regs["tmp"] > val:
proc_regs["flag"] == 1
else:
proc_regs["flag"] = 0
pc = pc + 1
ticks = ticks + 2
power = power + ([idd_dict["ctrl"]]*2)
return 6
# INSTRUCTION - Check Flag and Jump
matchObj = re.match(r'if\(flag([!=]=)([\-\+]{0,1})([01])\)goto([\w\d_]+)', instr_t, re.M|re.I)
if matchObj:
op = matchObj.group(1)
sign = matchObj.group(2)
val = int(matchObj.group(3))
label = matchObj.group(4)
if label not in labels:
print("\n[Line %4d] %s\nERROR: Label \"%s\" not found\n" % (lines[pc], instr, label))
exit()
# Check flag value and jump
if op == "==":
if val == 0:
if proc_regs["flag"] == 0:
pc = labels[label]
else:
pc = pc + 1
if val == 1:
if sign == "+" or sign == "":
if proc_regs["flag"] == 1:
pc = labels[label]
else:
pc = pc + 1
if sign == "-":
if proc_regs["flag"] == -1:
pc = labels[label]
else:
pc = pc + 1
if op == "!=":
if val == 0:
if proc_regs["flag"] != 0:
pc = labels[label]
else:
pc = pc + 1
if val == 1:
if sign == "+" or sign == "":
if proc_regs["flag"] != 1:
pc = labels[label]
else:
pc = pc + 1
if sign == "-":
if proc_regs["flag"] != -1:
pc = labels[label]
else:
pc = pc + 1
ticks = ticks + 2
power = power + ([idd_dict["ctrl"]]*2)
return 6
# INSTRUCTION - SHA3 Operations
matchObj = re.match(r'sha3_init', instr_t, re.M|re.I)
if matchObj:
keccak_buf = ""
cycles = 2 + 1 + 25
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sha3"]]*cycles)
return 7
matchObj = re.match(r'sha3_(\d+)_absorb\(poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
mode = int(matchObj.group(1))
poly = int(matchObj.group(2))
if mode != 256 and mode != 512:
print("\n[Line %4d] %s\nERROR: Only SHA3-256 and SHA3-512 are supported\n" % (lines[pc], instr))
exit()
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Push zero-padded polynomial coefficients into Keccak buffer
for i in range(param_n):
keccak_buf = keccak_buf + hex(poly_mem[poly][i])[2:].rstrip("L").rjust(8,'0')
if mode == 256:
cycles = 2 + 1 + 1 + param_n + math.ceil(param_n/34)*(17+25)
if mode == 512:
cycles = 2 + 1 + 1 + param_n + math.ceil(param_n/18)*(9+25)
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["poly_hash"]]*cycles)
return 7
matchObj = re.match(r'sha3_(\d+)_absorb\(r(\d)\)', instr_t, re.M|re.I)
if matchObj:
mode = int(matchObj.group(1))
reg = int(matchObj.group(2))
if mode != 256 and mode != 512:
print("\n[Line %4d] %s\nERROR: Only SHA3-256 and SHA3-512 are supported\n" % (lines[pc], instr))
exit()
if reg != 0 and reg != 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", allowed registers are r0 and r1\n" % (lines[pc], instr, reg))
exit()
# Push seed register contents into Keccak buffer
keccak_buf = keccak_buf + hex(proc_regs["r%d" % reg])[2:].rstrip("L").rjust(64,'0')
if mode == 256:
cycles = 2 + 1 + (17+25)
if mode == 512:
cycles = 2 + 1 + (9+25)
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sha3"]]*cycles)
return 7
matchObj = re.match(r'r(\d)=sha3_256_digest', instr_t, re.M|re.I)
if matchObj:
reg = int(matchObj.group(1))
if reg != 0 and reg != 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", allowed registers are r0 and r1\n" % (lines[pc], instr, reg))
exit()
# Generate SHA3-256 digest
digest = sha3_256(keccak_buf)
proc_regs["r%d" % reg] = int(digest, 16)
keccak_buf = ""
cycles = 2 + 1 + (25+25+2)
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sha3"]]*cycles)
return 7
matchObj = re.match(r'r0\|\|r1=sha3_512_digest', instr_t, re.M|re.I)
if matchObj:
# Generate SHA3-512 digest
digest = sha3_512(keccak_buf)
proc_regs["r0"] = int(digest, 16) >> 256
proc_regs["r1"] = int(digest, 16) % 2**256
keccak_buf = ""
cycles = 2 + 1 + (25+25+3)
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sha3"]]*cycles)
return 7
# INSTRUCTION - End of Program
matchObj = re.match(r'end', instr_t, re.M|re.I)
if matchObj:
#print("end-of-program")
ticks = ticks + 2
power = power + ([idd_dict["ctrl"]]*2)
return 99
# INSTRUCTION - NOP
matchObj = re.match(r'nop', instr_t, re.M|re.I)
if matchObj:
#print("no-operation")
ticks = ticks + 2
power = power + ([idd_dict["ctrl"]]*2)
return -98
# DEBUG-INSTRUCTION - Compare Encoded Polynomials (Debug Only)
# Append "iter_<iter_count>_" to all filenames in case of multiple iterations
if num_iters > 1:
f_prefix = "iter_%d_" % iter_count
else:
f_prefix = ""
matchObj = re.match(r'encode_compare\("(.*)","(.*)",encoding=([\w_]+)\)', instr_t, re.M|re.I)
if matchObj:
f1 = matchObj.group(1)
f2 = matchObj.group(2)
if not f1.endswith(".npy"):
print("\n[Line %4d] %s\nWARNING: Adding .npy extension to filename \"%s\"\n" % (lines[pc], instr, f1))
f1 = f1 + ".npy"
if not f2.endswith(".npy"):
print("\n[Line %4d] %s\nWARNING: Adding .npy extension to filename \"%s\"\n" % (lines[pc], instr, f2))
f2 = f2 + ".npy"
f1 = f1.replace(os.path.basename(f1), f_prefix + os.path.basename(f1))
f2 = f2.replace(os.path.basename(f2), f_prefix + os.path.basename(f2))
encoding = matchObj.group(3)
if not os.path.exists(f1):
print("\n[Line %4d] %s\nERROR: Input file %s for \"encode_compare\" does not exist" % (lines[pc], instr, f1))
exit()
if not os.path.exists(f2):
print("\n[Line %4d] %s\nERROR: Input file %s for \"encode_compare\" does not exist" % (lines[pc], instr, f2))
exit()
b1 = encode_to_bytearray(param_n, param_q, list(np.load(f1, allow_pickle = True)), encoding, lines[pc], instr)
b2 = encode_to_bytearray(param_n, param_q, list(np.load(f2, allow_pickle = True)), encoding, lines[pc], instr)
print("poly_1 = %s" % list(np.load(f1, allow_pickle = True)))
print("poly_2 = %s" % list(np.load(f2, allow_pickle = True)))
print("byte_array_1 = %s" % b1)
print("byte_array_2 = %s" % b2)
if b1 == b2:
print("\n--- MATCH ---\n")
else:
print("\n--- NO MATCH ---\n")
pc = pc + 1
return -98
# DEBUG-INSTRUCTION - Print Encoded Polynomial (Debug Only)
matchObj = re.match(r'encode_print\(poly=(\d+),encoding=([\w_]+)\)', instr_t, re.M|re.I)
if matchObj:
poly = int(matchObj.group(1))
encoding = matchObj.group(2)
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
if "--verbose" in sys.argv:
b = encode_to_bytearray(param_n, param_q, poly_mem[poly], encoding, lines[pc], instr)
print("byte_array = %s" % b)
pc = pc + 1
return -98
# DEBUG-INSTRUCTION - Register / Polynomial Random-Init / Load / Store
# These instructions are not really available in the crypto core, but act as
# substitutes (in the simulator) for the actual 32-bit load / store interface
# Append "iter_<iter_count>_" to all filenames in case of multiple iterations
if num_iters > 1:
f_prefix = "iter_%d_" % iter_count
else:
f_prefix = ""
matchObj = re.match(r'random\(r(\d)\)', instr_t, re.M|re.I)
if matchObj:
reg = int(matchObj.group(1))
if reg > 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", please use \"r0\" or \"r1\"\n" % (lines[pc], instr, reg))
exit()
proc_regs["r%d" % reg] = random.getrandbits(256)
cycles = WRITE_CYCLES*8
pc = pc + 1
if "--free_rw" not in sys.argv:
ticks = ticks + cycles
power = power + ([idd_dict["ctrl"]]*cycles)
return -98
matchObj = re.match(r'random\(poly=(\d+),encoding=([\w\d_]+),"(.*)"\)', instr_t, re.M|re.I)
if matchObj:
poly = int(matchObj.group(1))
encoding = matchObj.group(2)
f = matchObj.group(3)
if not f.endswith(".npy"):
print("\n[Line %4d] %s\nWARNING: Adding .npy extension to filename \"%s\"\n" % (lines[pc], instr, f))
f = f + ".npy"
f = f.replace(os.path.basename(f), f_prefix + os.path.basename(f))
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
if os.path.exists(f):
print("\n[Line %4d] %s\nWARNING: Output file %s for \"random\" already exists" % (lines[pc], instr, f))
random_poly_encode(param_n, param_q, poly_mem[poly], encoding, lines[pc], instr)
np.save(f, np.asarray(poly_mem[poly]))
cycles = WRITE_CYCLES*param_n
pc = pc + 1
if "--free_rw" not in sys.argv:
ticks = ticks + cycles
power = power + ([idd_dict["poly_read_write"]]*cycles)
return -98
matchObj = re.match(r'load\(r(\d),"(.*)"\)', instr_t, re.M|re.I)
if matchObj:
reg = int(matchObj.group(1))
f = matchObj.group(2)
if not f.endswith(".npy"):
print("\n[Line %4d] %s\nWARNING: Adding .npy extension to filename \"%s\"\n" % (lines[pc], instr, f))
f = f + ".npy"
f = f.replace(os.path.basename(f), f_prefix + os.path.basename(f))
if reg > 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", please use \"r0\" or \"r1\"\n" % (lines[pc], instr, reg))
exit()
if not os.path.exists(f):
print("\n[Line %4d] %s\nERROR: Input file %s for \"load\" does not exist" % (lines[pc], instr, f))
exit()
proc_regs["r%d" % reg] = list(np.load(f, allow_pickle = True))[0]
cycles = WRITE_CYCLES*8
pc = pc + 1
if "--free_rw" not in sys.argv:
ticks = ticks + cycles
power = power + ([idd_dict["ctrl"]]*cycles)
return -98
matchObj = re.match(r'save\(r(\d),"(.*)"\)', instr_t, re.M|re.I)
if matchObj:
reg = int(matchObj.group(1))
f = matchObj.group(2)
if not f.endswith(".npy"):
print("\n[Line %4d] %s\nWARNING: Adding .npy extension to filename \"%s\"\n" % (lines[pc], instr, f))
f = f + ".npy"
f = f.replace(os.path.basename(f), f_prefix + os.path.basename(f))
if reg > 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", please use \"r0\" or \"r1\"\n" % (lines[pc], instr, reg))
exit()
if os.path.exists(f):
print("\n[Line %4d] %s\nWARNING: Output file %s for \"save\" already exists" % (lines[pc], instr, f))
np.save(f, np.asarray([proc_regs["r%d" % reg]]))
cycles = READ_CYCLES*8
pc = pc + 1
if "--free_rw" not in sys.argv:
ticks = ticks + cycles
power = power + ([idd_dict["ctrl"]]*cycles)
return -98
matchObj = re.match(r'load\(poly=(\d+),"(.*)"\)', instr_t, re.M|re.I)
if matchObj:
poly = int(matchObj.group(1))
f = matchObj.group(2)
if not f.endswith(".npy"):
print("\n[Line %4d] %s\nWARNING: Adding .npy extension to filename \"%s\"\n" % (lines[pc], instr, f))
f = f + ".npy"
f = f.replace(os.path.basename(f), f_prefix + os.path.basename(f))
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
if not os.path.exists(f):
print("\n[Line %4d] %s\nERROR: Input file %s for \"load\" does not exist" % (lines[pc], instr, f))
exit()
poly_mem[poly] = list(np.load(f, allow_pickle = True)).copy()
cycles = WRITE_CYCLES*param_n
pc = pc + 1
if "--free_rw" not in sys.argv:
ticks = ticks + cycles
power = power + ([idd_dict["poly_read_write"]]*cycles)
return -98
matchObj = re.match(r'save\(poly=(\d+),"(.*)"\)', instr_t, re.M|re.I)
if matchObj:
poly = int(matchObj.group(1))
f = matchObj.group(2)
if not f.endswith(".npy"):
print("\n[Line %4d] %s\nWARNING: Adding .npy extension to filename \"%s\"\n" % (lines[pc], instr, f))
f = f + ".npy"
f = f.replace(os.path.basename(f), f_prefix + os.path.basename(f))
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
if os.path.exists(f):
print("\n[Line %4d] %s\nWARNING: Output file %s for \"save\" already exists" % (lines[pc], instr, f))
np.save(f, np.asarray(poly_mem[poly]))
cycles = READ_CYCLES*param_n
pc = pc + 1
if "--free_rw" not in sys.argv:
ticks = ticks + cycles
power = power + ([idd_dict["poly_read_write"]]*cycles)
return -98
# DEBUG-INSTRUCTION - Print (Debug Only)
matchObj = re.match(r'print\(r(\d)\)', instr_t, re.M|re.I)
if matchObj:
reg = int(matchObj.group(1))
if reg > 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", please use \"r0\" or \"r1\"\n" % (lines[pc], instr, reg))
exit()
if "--verbose" in sys.argv:
print("\nr%d = 0x%s\n" % (reg, hex(proc_regs["r%d" % reg])[2:].upper().rstrip("L").rjust(64,'0')))
pc = pc + 1
return -99
matchObj = re.match(r'print\(reg\)', instr_t, re.M|re.I)
if matchObj:
if "--verbose" in sys.argv:
print("\nreg = %d\n" % proc_regs["reg"])
pc = pc + 1
return -99
matchObj = re.match(r'print\(tmp\)', instr_t, re.M|re.I)
if matchObj:
if "--verbose" in sys.argv:
print("\ntmp = %d\n" % proc_regs["tmp"])
pc = pc + 1
return -99
matchObj = re.match(r'print\(flag\)', instr_t, re.M|re.I)
if matchObj:
if "--verbose" in sys.argv:
print("\nflag = %d\n" % proc_regs["flag"])
pc = pc + 1
return -99
matchObj = re.match(r'print\(c(\d)\)', instr_t, re.M|re.I)
if matchObj:
reg = int(matchObj.group(1))
if reg > 1:
print("\n[Line %4d] %s\nERROR: No such register \"c%d\", please use \"c0\" or \"c1\"\n" % (lines[pc], instr, reg))
exit()
if "--verbose" in sys.argv:
print("\nc%d = %d\n" % (reg, proc_regs["c%d" % reg]))
pc = pc + 1
return -99
matchObj = re.match(r'print\(poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
poly = int(matchObj.group(1))
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
if "--verbose" in sys.argv:
print("\npoly[%d] = %s\n" % (poly, poly_mem[poly]))
pc = pc + 1
return -99
# INVALID INSTRUCTION
return -1
#====================================
# SAPPHIRE-SIM
#====================================
# Check arguments
if len(sys.argv) < 7 or ("--prog" not in sys.argv) or ("--vdd" not in sys.argv) or ("--fmhz" not in sys.argv):
print("\nERROR: Incorrect arguments provided for simulator script")
print("Usage: python sim.py --prog <program_file_path>")
print(" --vdd <voltage>")
print(" --fmhz <frequency_mhz>")
print(" [ --verbose ]")
print(" [ --free_rw ]")
print(" [ --plot_power ]")
print(" [ --cdt <cdt_file_path> ]")
print(" [ --iter <num_iterations> ]")
exit()
# Check that program file exists
if not os.path.exists(sys.argv[sys.argv.index("--prog") + 1]):
print("\nERROR: Program file %s does not exist" % sys.argv[2])
exit()
# Check supply voltage
vdd = float(sys.argv[sys.argv.index("--vdd") + 1])
if vdd < 0.68 or vdd > 1.21:
print("\nERROR: Supply voltage outside acceptable range of 0.68-1.21 V\n")
exit()
# Check operating frequency
# fmax = 12 MHz at 0.68 V and 72 MHz at 1.1 V
# Model fmax as a linear function of vdd (not exactly accurate but good enough for our simulator)
fmhz = int(sys.argv[sys.argv.index("--fmhz") + 1])
fmax = int(12 + (72-12)*(vdd - 0.68)/(1.1-0.68))
if fmhz > fmax:
print("\nERROR: Operating frequency above maximum %d MHz at %0.2f V\n" % (fmax, vdd))
exit()
defines = ["main"]
ifdefs = []
active_ifdef = "main"
labels = {}
# Read program file
imem_f = open(sys.argv[sys.argv.index("--prog") + 1])
imem = []
# Process ifdefs
for (i, instr) in enumerate(imem_f):
# Identify `define flags
matchObj = re.match(r'`define\s*(.+)', instr.strip(), re.M|re.I)
if matchObj:
defines.append(matchObj.group(1))
imem.append("")
continue
# Identify `ifdef flags
matchObj = re.match(r'`ifdef\s*(.+)', instr.strip(), re.M|re.I)
if matchObj:
ifdefs.append(active_ifdef)
active_ifdef = matchObj.group(1)
imem.append("")
continue
# Identify `endif flags
matchObj = re.match(r'`endif', instr.strip(), re.M|re.I)
if matchObj:
active_ifdef = ifdefs[-1]
ifdefs = ifdefs[:-1]
imem.append("")
continue
# Ignore instructions inside undeclared `ifdef blocks
if active_ifdef not in defines:
imem.append("")
continue
imem.append(instr)
imem_f.close()
# Remove comments
imem = [re.sub(r'#.*$', "", instr) for instr in imem]
# Remove empty lines and leading / trailing spaces
lines = [i+1 for i in range(len(imem)) if imem[i].strip()]
imem = [instr.strip() for instr in imem if instr.strip()]
# Parse labels (labels must be followed by an instruction in the same line)
for (i, instr) in enumerate(imem):
matchObj = re.match(r'([\w\d_]+)\s*:\s*(.+)', instr.strip(), re.M|re.I)
if matchObj:
label = matchObj.group(1)
labels[label] = i
imem[i] = matchObj.group(2)
# Check if first instruction is "config"
if not re.match(r'config.*', imem[0], re.M|re.I):
print("\nERROR: First instruction of program must be \"config\"\n")
exit()
# Check if last instruction is "end"
if not re.match(r'end', imem[len(imem)-1], re.M|re.I):
print("\nWARNING: Last instruction of program must be \"end\", appending \"end\" at the end of program\n")
imem.append("end")
keccak_buf = ""
proc_regs = {
"r0" : 0,
"r1" : 0,
"reg" : 0,
"tmp" : 0,
"c0" : 0,
"c1" : 0,
"flag" : 0,
}
poly_mem = []
poly_tmp = []
param_n = 0
param_q = 0
ticks = 0
pc = 0
power = []
# Read CDT file, if provided
if "--cdt" in sys.argv:
if not os.path.exists(sys.argv[sys.argv.index("--cdt") + 1]):
print("\nERROR: CDT file %s does not exist" % sys.argv[sys.argv.index("--cdt") + 1])
exit()
cdt_mem = open(sys.argv[sys.argv.index("--cdt") + 1])
cdt_mem = [cdval.strip() for cdval in cdt_mem if cdval.strip()]
cdt_mem = [int(cdval) for cdval in cdt_mem]
if len(cdt_mem) > 64:
print("\nERROR: CDT is longer than 64 entries")
exit()
num_iters = 1
# Read number of iterations, if provided
if "--iter" in sys.argv:
num_iters = int(sys.argv[sys.argv.index("--iter") + 1])
ticks_arr = []
power_arr = []
energy_arr = []
for i in range(num_iters):
keccak_buf = ""
proc_regs["r0"] = 0
proc_regs["r1"] = 0
proc_regs["reg"] = 0
proc_regs["tmp"] = 0
proc_regs["c0"] = 0
proc_regs["c1"] = 0
proc_regs["flag"] = 0
ticks = 0
pc = 0
power = []
# The lattice-crypto core is not pipelined
# Requires 1 cycle to fetch and >= 1 cycles to decode and execute instruction
instr_count = 0
while (1):
if "--verbose" in sys.argv:
if pc in labels.values():
for (label, label_pc) in labels.items():
if label_pc == pc:
break
print("[%3d] %s : %s" %(pc, label, imem[pc]))
else:
print("[%3d] %s" %(pc, imem[pc]))
ret = instr_exec(imem[pc], i)
# Invalid instruction
if ret == -1:
print("\n[Line %4d] %s\nERROR: Instruction not supported\n" % (lines[pc], imem[pc]))
exit()
if ret >= 0:
instr_count = instr_count + 1
# End of program
if ret == 99:
break
# Convert current to power at specified operating condition
# Take into account the fact that leakage power and dynamic power scale differently
# Leakage current is assumed independent of processor state and operating frequency
# i_leak = 102.6 uA at 0.70 V
# i_leak = 121.0 uA at 0.75 V
# i_leak = 139.5 uA at 0.80 V
# i_leak = 159.7 uA at 0.85 V
# i_leak = 188.8 uA at 0.90 V
# i_leak = 220.0 uA at 0.95 V
# i_leak = 257.4 uA at 1.00 V
# i_leak = 303.8 uA at 1.05 V
# i_leak = 355.7 uA at 1.10 V
# Model leakage current as an exponential function of vdd (pretty accurate, curve-fitted from measurements)
# Model active current as proportional to vdd and fmhz (again, not exactly accurate but good enough for our simulator)
i_leak = 11.728*math.exp(3.0933*vdd)
power = [(i_leak + ((idd - 355.7)*(fmhz/72)*(vdd/1.1))) for idd in power]
# Add some tiny random noise (+/-1%) to current values
power = [idd + random.randrange(-int(idd/100),int(idd/100)) for idd in power]
# Finally, convert current to power
power = [idd*vdd for idd in power]
if num_iters > 1:
print("\n[iter = %d]" % (i+1))
else:
print("\n")
print("------------------------------------------------------")
print("Program Execution Summary (at %0.2f V and %d MHz)" % (vdd, fmhz))
print("------------------------------------------------------")
print("* Instructions: %d" % instr_count)
print("* Total Cycles: %s" % format(ticks, ',d'))
ticks_arr.append(ticks)
time_us = ticks/fmhz
if time_us < 1e3:
print("* Total Time: %0.2f us" % (time_us))
elif time_us < 1e6:
print("* Total Time: %0.2f ms" % (time_us/1e3))
elif time_us < 1e9:
print("* Total Time: %0.2f s" % (time_us/1e6))
avg_power_uw = sum(power)/ticks
if avg_power_uw < 1e3:
print("* Average Power: %0.2f uW" % (avg_power_uw))
elif avg_power_uw < 1e6:
print("* Average Power: %0.2f mW" % (avg_power_uw/1e3))
power_arr.append(avg_power_uw)
energy_pj = sum(power)/fmhz
if energy_pj < 1e3:
print("* Total Energy: %0.2f pJ" % (energy_pj))
elif energy_pj < 1e6:
print("* Total Energy: %0.2f nJ" % (energy_pj/1e3))
elif energy_pj < 1e9:
print("* Total Energy: %0.2f uJ" % (energy_pj/1e6))
energy_arr.append(energy_pj)
print("------------------------------------------------------")
print("\n")
# Print average cycles and energy, only in case of multiple iterations
if num_iters > 1:
print("Over %d Iterations:" % (num_iters))
avg_ticks = math.ceil(sum(ticks_arr)/len(ticks_arr))
print(" Average Cycles: %s" % (format(avg_ticks, ',d')))
avg_avg_power_uw = sum(power_arr)/len(power_arr)
if avg_avg_power_uw < 1e3:
print(" Average Power: %0.2f uW" % (avg_avg_power_uw))
elif avg_avg_power_uw < 1e6:
print(" Average Power: %0.2f mW" % (avg_avg_power_uw/1e3))
avg_energy_pj = sum(energy_arr)/len(energy_arr)
if avg_energy_pj < 1e3:
print(" Average Energy: %0.2f pJ" % (avg_energy_pj))
elif avg_energy_pj < 1e6:
print(" Average Energy: %0.2f nJ" % (avg_energy_pj/1e3))
elif avg_energy_pj < 1e9:
print(" Average Energy: %0.2f uJ" % (avg_energy_pj/1e6))
# Plot power profile, only in case of single iteration
if "--plot_power" in sys.argv and num_iters == 1:
power = [i_leak] + power
mpl.rcParams['xtick.major.pad'] = 5
mpl.rcParams['ytick.major.pad'] = 5
plt.figure(figsize=(15,5))
plt.plot(power, linewidth=1.5)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel("Cycles", fontsize=16, fontweight='bold')
plt.ylabel("Power (uW)", fontsize=16, fontweight='bold')
plt.tight_layout()
plt.show()
| [
"matplotlib.pyplot.ylabel",
"math.log",
"random.getrandbits",
"math.exp",
"os.path.exists",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xticks",
"re.match",
"re.sub",
"sys.argv.index",
"matplotlib.pyplot.show",
"ma... | [((3369, 3432), 're.match', 're.match', (['"""config\\\\(n=(\\\\d+),q=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('config\\\\(n=(\\\\d+),q=(\\\\d+)\\\\)', instr_t, re.M | re.I)\n", (3377, 3432), False, 'import math, sys, os, re, random\n'), ((4544, 4591), 're.match', 're.match', (['"""c(\\\\d)=(\\\\d+)"""', 'instr_t', '(re.M | re.I)'], {}), "('c(\\\\d)=(\\\\d+)', instr_t, re.M | re.I)\n", (4552, 4591), False, 'import math, sys, os, re, random\n'), ((5207, 5270), 're.match', 're.match', (['"""c(\\\\d)=c(\\\\d)([\\\\+\\\\-])(\\\\d+)"""', 'instr_t', '(re.M | re.I)'], {}), "('c(\\\\d)=c(\\\\d)([\\\\+\\\\-])(\\\\d+)', instr_t, re.M | re.I)\n", (5215, 5270), False, 'import math, sys, os, re, random\n'), ((6496, 6540), 're.match', 're.match', (['"""reg=(\\\\d+)"""', 'instr_t', '(re.M | re.I)'], {}), "('reg=(\\\\d+)', instr_t, re.M | re.I)\n", (6504, 6540), False, 'import math, sys, os, re, random\n'), ((6943, 6987), 're.match', 're.match', (['"""tmp=(\\\\d+)"""', 'instr_t', '(re.M | re.I)'], {}), "('tmp=(\\\\d+)', instr_t, re.M | re.I)\n", (6951, 6987), False, 'import math, sys, os, re, random\n'), ((7390, 7431), 're.match', 're.match', (['"""reg=tmp"""', 'instr_t', '(re.M | re.I)'], {}), "('reg=tmp', instr_t, re.M | re.I)\n", (7398, 7431), False, 'import math, sys, os, re, random\n'), ((7693, 7764), 're.match', 're.match', (['"""tmp=tmp([\\\\+\\\\-\\\\*&\\\\|\\\\^><][><]*)reg"""', 'instr_t', '(re.M | re.I)'], {}), "('tmp=tmp([\\\\+\\\\-\\\\*&\\\\|\\\\^><][><]*)reg', instr_t, re.M | re.I)\n", (7701, 7764), False, 'import math, sys, os, re, random\n'), ((9469, 9536), 're.match', 're.match', (['"""reg=\\\\(poly=(\\\\d+)\\\\)\\\\[(\\\\d+)\\\\]"""', 'instr_t', '(re.M | re.I)'], {}), "('reg=\\\\(poly=(\\\\d+)\\\\)\\\\[(\\\\d+)\\\\]', instr_t, re.M | re.I)\n", (9477, 9536), False, 'import math, sys, os, re, random\n'), ((10345, 10412), 're.match', 're.match', (['"""reg=\\\\(poly=(\\\\d+)\\\\)\\\\[c(\\\\d)\\\\]"""', 'instr_t', '(re.M | re.I)'], {}), "('reg=\\\\(poly=(\\\\d+)\\\\)\\\\[c(\\\\d)\\\\]', instr_t, re.M | re.I)\n", (10353, 10412), False, 'import math, sys, os, re, random\n'), ((11225, 11292), 're.match', 're.match', (['"""\\\\(poly=(\\\\d+)\\\\)\\\\[(\\\\d+)\\\\]=reg"""', 'instr_t', '(re.M | re.I)'], {}), "('\\\\(poly=(\\\\d+)\\\\)\\\\[(\\\\d+)\\\\]=reg', instr_t, re.M | re.I)\n", (11233, 11292), False, 'import math, sys, os, re, random\n'), ((12101, 12168), 're.match', 're.match', (['"""\\\\(poly=(\\\\d+)\\\\)\\\\[c(\\\\d)\\\\]=reg"""', 'instr_t', '(re.M | re.I)'], {}), "('\\\\(poly=(\\\\d+)\\\\)\\\\[c(\\\\d)\\\\]=reg', instr_t, re.M | re.I)\n", (12109, 12168), False, 'import math, sys, os, re, random\n'), ((13034, 13092), 're.match', 're.match', (['"""reg=max\\\\(poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('reg=max\\\\(poly=(\\\\d+)\\\\)', instr_t, re.M | re.I)\n", (13042, 13092), False, 'import math, sys, os, re, random\n'), ((14082, 14140), 're.match', 're.match', (['"""reg=sum\\\\(poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('reg=sum\\\\(poly=(\\\\d+)\\\\)', instr_t, re.M | re.I)\n", (14090, 14140), False, 'import math, sys, os, re, random\n'), ((15149, 15262), 're.match', 're.match', (['"""transform\\\\(mode=(DI[FT]_I{0,1}NTT),poly_dst=(\\\\d+),poly_src=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "(\n 'transform\\\\(mode=(DI[FT]_I{0,1}NTT),poly_dst=(\\\\d+),poly_src=(\\\\d+)\\\\)',\n instr_t, re.M | re.I)\n", (15157, 15262), False, 'import math, sys, os, re, random\n'), ((18364, 18423), 're.match', 're.match', (['"""mult_psi\\\\(poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('mult_psi\\\\(poly=(\\\\d+)\\\\)', instr_t, re.M | re.I)\n", (18372, 18423), False, 'import math, sys, os, re, random\n'), ((19070, 19133), 're.match', 're.match', (['"""mult_psi_inv\\\\(poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('mult_psi_inv\\\\(poly=(\\\\d+)\\\\)', instr_t, re.M | re.I)\n", (19078, 19133), False, 'import math, sys, os, re, random\n'), ((19831, 19952), 're.match', 're.match', (['"""rej_sample\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),c0=(\\\\d+),c1=(\\\\d+),poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "(\n 'rej_sample\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),c0=(\\\\d+),c1=(\\\\d+),poly=(\\\\d+)\\\\)'\n , instr_t, re.M | re.I)\n", (19839, 19952), False, 'import math, sys, os, re, random\n'), ((21665, 21795), 're.match', 're.match', (['"""bin_sample\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),c0=(\\\\d+),c1=(\\\\d+),k=(\\\\d+),poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "(\n 'bin_sample\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),c0=(\\\\d+),c1=(\\\\d+),k=(\\\\d+),poly=(\\\\d+)\\\\)'\n , instr_t, re.M | re.I)\n", (21673, 21795), False, 'import math, sys, os, re, random\n'), ((23748, 23878), 're.match', 're.match', (['"""cdt_sample\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),c0=(\\\\d+),c1=(\\\\d+),r=(\\\\d+),poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "(\n 'cdt_sample\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),c0=(\\\\d+),c1=(\\\\d+),r=(\\\\d+),poly=(\\\\d+)\\\\)'\n , instr_t, re.M | re.I)\n", (23756, 23878), False, 'import math, sys, os, re, random\n'), ((26012, 26144), 're.match', 're.match', (['"""uni_sample\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),c0=(\\\\d+),c1=(\\\\d+),eta=(\\\\d+),poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "(\n 'uni_sample\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),c0=(\\\\d+),c1=(\\\\d+),eta=(\\\\d+),poly=(\\\\d+)\\\\)'\n , instr_t, re.M | re.I)\n", (26020, 26144), False, 'import math, sys, os, re, random\n'), ((28129, 28261), 're.match', 're.match', (['"""tri_sample_1\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),c0=(\\\\d+),c1=(\\\\d+),m=(\\\\d+),poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "(\n 'tri_sample_1\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),c0=(\\\\d+),c1=(\\\\d+),m=(\\\\d+),poly=(\\\\d+)\\\\)'\n , instr_t, re.M | re.I)\n", (28137, 28261), False, 'import math, sys, os, re, random\n'), ((30201, 30344), 're.match', 're.match', (['"""tri_sample_2\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),c0=(\\\\d+),c1=(\\\\d+),m0=(\\\\d+),m1=(\\\\d+),poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "(\n 'tri_sample_2\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),c0=(\\\\d+),c1=(\\\\d+),m0=(\\\\d+),m1=(\\\\d+),poly=(\\\\d+)\\\\)'\n , instr_t, re.M | re.I)\n", (30209, 30344), False, 'import math, sys, os, re, random\n'), ((32770, 32906), 're.match', 're.match', (['"""tri_sample_3\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),c0=(\\\\d+),c1=(\\\\d+),rho=1/(\\\\d+),poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "(\n 'tri_sample_3\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),c0=(\\\\d+),c1=(\\\\d+),rho=1/(\\\\d+),poly=(\\\\d+)\\\\)'\n , instr_t, re.M | re.I)\n", (32778, 32906), False, 'import math, sys, os, re, random\n'), ((35007, 35102), 're.match', 're.match', (['"""rej_sample\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('rej_sample\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),poly=(\\\\d+)\\\\)',\n instr_t, re.M | re.I)\n", (35015, 35102), False, 'import math, sys, os, re, random\n'), ((36271, 36375), 're.match', 're.match', (['"""bin_sample\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),k=(\\\\d+),poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('bin_sample\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),k=(\\\\d+),poly=(\\\\d+)\\\\)',\n instr_t, re.M | re.I)\n", (36279, 36375), False, 'import math, sys, os, re, random\n'), ((37784, 37888), 're.match', 're.match', (['"""cdt_sample\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),r=(\\\\d+),poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('cdt_sample\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),r=(\\\\d+),poly=(\\\\d+)\\\\)',\n instr_t, re.M | re.I)\n", (37792, 37888), False, 'import math, sys, os, re, random\n'), ((39478, 39585), 're.match', 're.match', (['"""uni_sample\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),eta=(\\\\d+),poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('uni_sample\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),eta=(\\\\d+),poly=(\\\\d+)\\\\)'\n , instr_t, re.M | re.I)\n", (39486, 39585), False, 'import math, sys, os, re, random\n'), ((41082, 41189), 're.match', 're.match', (['"""tri_sample_1\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),m=(\\\\d+),poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('tri_sample_1\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),m=(\\\\d+),poly=(\\\\d+)\\\\)'\n , instr_t, re.M | re.I)\n", (41090, 41189), False, 'import math, sys, os, re, random\n'), ((42584, 42707), 're.match', 're.match', (['"""tri_sample_2\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),m0=(\\\\d+),m1=(\\\\d+),poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "(\n 'tri_sample_2\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),m0=(\\\\d+),m1=(\\\\d+),poly=(\\\\d+)\\\\)'\n , instr_t, re.M | re.I)\n", (42592, 42707), False, 'import math, sys, os, re, random\n'), ((44640, 44755), 're.match', 're.match', (['"""tri_sample_3\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),rho=1/(\\\\d+),poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "(\n 'tri_sample_3\\\\(prng=SHAKE-(\\\\d+),seed=r(\\\\d),rho=1/(\\\\d+),poly=(\\\\d+)\\\\)',\n instr_t, re.M | re.I)\n", (44648, 44755), False, 'import math, sys, os, re, random\n'), ((46325, 46380), 're.match', 're.match', (['"""init\\\\(poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('init\\\\(poly=(\\\\d+)\\\\)', instr_t, re.M | re.I)\n", (46333, 46380), False, 'import math, sys, os, re, random\n'), ((46992, 47077), 're.match', 're.match', (['"""poly_copy\\\\(poly_dst=(\\\\d+),poly_src=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('poly_copy\\\\(poly_dst=(\\\\d+),poly_src=(\\\\d+)\\\\)', instr_t, re.M | re.I\n )\n", (47000, 47077), False, 'import math, sys, os, re, random\n'), ((48498, 48593), 're.match', 're.match', (['"""poly_op\\\\(op=([\\\\w_]+),poly_dst=(\\\\d+),poly_src=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('poly_op\\\\(op=([\\\\w_]+),poly_dst=(\\\\d+),poly_src=(\\\\d+)\\\\)',\n instr_t, re.M | re.I)\n", (48506, 48593), False, 'import math, sys, os, re, random\n'), ((54516, 54628), 're.match', 're.match', (['"""shift_poly\\\\(ring=x\\\\^N([\\\\+\\\\-])1,poly_dst=(\\\\d+),poly_src=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "(\n 'shift_poly\\\\(ring=x\\\\^N([\\\\+\\\\-])1,poly_dst=(\\\\d+),poly_src=(\\\\d+)\\\\)',\n instr_t, re.M | re.I)\n", (54524, 54628), False, 'import math, sys, os, re, random\n'), ((56298, 56376), 're.match', 're.match', (['"""flag=eq_check\\\\(poly0=(\\\\d+),poly1=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('flag=eq_check\\\\(poly0=(\\\\d+),poly1=(\\\\d+)\\\\)', instr_t, re.M | re.I)\n", (56306, 56376), False, 'import math, sys, os, re, random\n'), ((57908, 57995), 're.match', 're.match', (['"""flag=inf_norm_check\\\\(poly=(\\\\d+),bound=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('flag=inf_norm_check\\\\(poly=(\\\\d+),bound=(\\\\d+)\\\\)', instr_t, re.M |\n re.I)\n", (57916, 57995), False, 'import math, sys, os, re, random\n'), ((59176, 59241), 're.match', 're.match', (['"""flag=compare\\\\(c(\\\\d),(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('flag=compare\\\\(c(\\\\d),(\\\\d+)\\\\)', instr_t, re.M | re.I)\n", (59184, 59241), False, 'import math, sys, os, re, random\n'), ((60036, 60098), 're.match', 're.match', (['"""flag=compare\\\\(reg,(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('flag=compare\\\\(reg,(\\\\d+)\\\\)', instr_t, re.M | re.I)\n", (60044, 60098), False, 'import math, sys, os, re, random\n'), ((60676, 60738), 're.match', 're.match', (['"""flag=compare\\\\(tmp,(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('flag=compare\\\\(tmp,(\\\\d+)\\\\)', instr_t, re.M | re.I)\n", (60684, 60738), False, 'import math, sys, os, re, random\n'), ((61361, 61455), 're.match', 're.match', (['"""if\\\\(flag([!=]=)([\\\\-\\\\+]{0,1})([01])\\\\)goto([\\\\w\\\\d_]+)"""', 'instr_t', '(re.M | re.I)'], {}), "('if\\\\(flag([!=]=)([\\\\-\\\\+]{0,1})([01])\\\\)goto([\\\\w\\\\d_]+)',\n instr_t, re.M | re.I)\n", (61369, 61455), False, 'import math, sys, os, re, random\n'), ((63108, 63151), 're.match', 're.match', (['"""sha3_init"""', 'instr_t', '(re.M | re.I)'], {}), "('sha3_init', instr_t, re.M | re.I)\n", (63116, 63151), False, 'import math, sys, os, re, random\n'), ((63355, 63424), 're.match', 're.match', (['"""sha3_(\\\\d+)_absorb\\\\(poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('sha3_(\\\\d+)_absorb\\\\(poly=(\\\\d+)\\\\)', instr_t, re.M | re.I)\n", (63363, 63424), False, 'import math, sys, os, re, random\n'), ((64443, 64507), 're.match', 're.match', (['"""sha3_(\\\\d+)_absorb\\\\(r(\\\\d)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('sha3_(\\\\d+)_absorb\\\\(r(\\\\d)\\\\)', instr_t, re.M | re.I)\n", (64451, 64507), False, 'import math, sys, os, re, random\n'), ((65352, 65408), 're.match', 're.match', (['"""r(\\\\d)=sha3_256_digest"""', 'instr_t', '(re.M | re.I)'], {}), "('r(\\\\d)=sha3_256_digest', instr_t, re.M | re.I)\n", (65360, 65408), False, 'import math, sys, os, re, random\n'), ((65961, 66021), 're.match', 're.match', (['"""r0\\\\|\\\\|r1=sha3_512_digest"""', 'instr_t', '(re.M | re.I)'], {}), "('r0\\\\|\\\\|r1=sha3_512_digest', instr_t, re.M | re.I)\n", (65969, 66021), False, 'import math, sys, os, re, random\n'), ((66439, 66476), 're.match', 're.match', (['"""end"""', 'instr_t', '(re.M | re.I)'], {}), "('end', instr_t, re.M | re.I)\n", (66447, 66476), False, 'import math, sys, os, re, random\n'), ((66657, 66694), 're.match', 're.match', (['"""nop"""', 'instr_t', '(re.M | re.I)'], {}), "('nop', instr_t, re.M | re.I)\n", (66665, 66694), False, 'import math, sys, os, re, random\n'), ((67096, 67187), 're.match', 're.match', (['"""encode_compare\\\\("(.*)","(.*)",encoding=([\\\\w_]+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), '(\'encode_compare\\\\("(.*)","(.*)",encoding=([\\\\w_]+)\\\\)\', instr_t, \n re.M | re.I)\n', (67104, 67187), False, 'import math, sys, os, re, random\n'), ((68858, 68944), 're.match', 're.match', (['"""encode_print\\\\(poly=(\\\\d+),encoding=([\\\\w_]+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('encode_print\\\\(poly=(\\\\d+),encoding=([\\\\w_]+)\\\\)', instr_t, re.M |\n re.I)\n", (68866, 68944), False, 'import math, sys, os, re, random\n'), ((69911, 69963), 're.match', 're.match', (['"""random\\\\(r(\\\\d)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('random\\\\(r(\\\\d)\\\\)', instr_t, re.M | re.I)\n", (69919, 69963), False, 'import math, sys, os, re, random\n'), ((70454, 70545), 're.match', 're.match', (['"""random\\\\(poly=(\\\\d+),encoding=([\\\\w\\\\d_]+),"(.*)"\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), '(\'random\\\\(poly=(\\\\d+),encoding=([\\\\w\\\\d_]+),"(.*)"\\\\)\', instr_t, \n re.M | re.I)\n', (70462, 70545), False, 'import math, sys, os, re, random\n'), ((71660, 71717), 're.match', 're.match', (['"""load\\\\(r(\\\\d),"(.*)"\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), '(\'load\\\\(r(\\\\d),"(.*)"\\\\)\', instr_t, re.M | re.I)\n', (71668, 71717), False, 'import math, sys, os, re, random\n'), ((72670, 72727), 're.match', 're.match', (['"""save\\\\(r(\\\\d),"(.*)"\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), '(\'save\\\\(r(\\\\d),"(.*)"\\\\)\', instr_t, re.M | re.I)\n', (72678, 72727), False, 'import math, sys, os, re, random\n'), ((73642, 73704), 're.match', 're.match', (['"""load\\\\(poly=(\\\\d+),"(.*)"\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), '(\'load\\\\(poly=(\\\\d+),"(.*)"\\\\)\', instr_t, re.M | re.I)\n', (73650, 73704), False, 'import math, sys, os, re, random\n'), ((74741, 74803), 're.match', 're.match', (['"""save\\\\(poly=(\\\\d+),"(.*)"\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), '(\'save\\\\(poly=(\\\\d+),"(.*)"\\\\)\', instr_t, re.M | re.I)\n', (74749, 74803), False, 'import math, sys, os, re, random\n'), ((75842, 75893), 're.match', 're.match', (['"""print\\\\(r(\\\\d)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('print\\\\(r(\\\\d)\\\\)', instr_t, re.M | re.I)\n", (75850, 75893), False, 'import math, sys, os, re, random\n'), ((76311, 76359), 're.match', 're.match', (['"""print\\\\(reg\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('print\\\\(reg\\\\)', instr_t, re.M | re.I)\n", (76319, 76359), False, 'import math, sys, os, re, random\n'), ((76517, 76565), 're.match', 're.match', (['"""print\\\\(tmp\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('print\\\\(tmp\\\\)', instr_t, re.M | re.I)\n", (76525, 76565), False, 'import math, sys, os, re, random\n'), ((76723, 76772), 're.match', 're.match', (['"""print\\\\(flag\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('print\\\\(flag\\\\)', instr_t, re.M | re.I)\n", (76731, 76772), False, 'import math, sys, os, re, random\n'), ((76932, 76983), 're.match', 're.match', (['"""print\\\\(c(\\\\d)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('print\\\\(c(\\\\d)\\\\)', instr_t, re.M | re.I)\n", (76940, 76983), False, 'import math, sys, os, re, random\n'), ((77356, 77412), 're.match', 're.match', (['"""print\\\\(poly=(\\\\d+)\\\\)"""', 'instr_t', '(re.M | re.I)'], {}), "('print\\\\(poly=(\\\\d+)\\\\)', instr_t, re.M | re.I)\n", (77364, 77412), False, 'import math, sys, os, re, random\n'), ((80427, 80452), 're.sub', 're.sub', (['"""#.*$"""', '""""""', 'instr'], {}), "('#.*$', '', instr)\n", (80433, 80452), False, 'import math, sys, os, re, random\n'), ((80992, 81034), 're.match', 're.match', (['"""config.*"""', 'imem[0]', '(re.M | re.I)'], {}), "('config.*', imem[0], re.M | re.I)\n", (81000, 81034), False, 'import math, sys, os, re, random\n'), ((86888, 86915), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (86898, 86915), True, 'import matplotlib.pyplot as plt\n'), ((86919, 86949), 'matplotlib.pyplot.plot', 'plt.plot', (['power'], {'linewidth': '(1.5)'}), '(power, linewidth=1.5)\n', (86927, 86949), True, 'import matplotlib.pyplot as plt\n'), ((86954, 86977), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (86964, 86977), True, 'import matplotlib.pyplot as plt\n'), ((86982, 87005), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (86992, 87005), True, 'import matplotlib.pyplot as plt\n'), ((87010, 87062), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Cycles"""'], {'fontsize': '(16)', 'fontweight': '"""bold"""'}), "('Cycles', fontsize=16, fontweight='bold')\n", (87020, 87062), True, 'import matplotlib.pyplot as plt\n'), ((87067, 87123), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Power (uW)"""'], {'fontsize': '(16)', 'fontweight': '"""bold"""'}), "('Power (uW)', fontsize=16, fontweight='bold')\n", (87077, 87123), True, 'import matplotlib.pyplot as plt\n'), ((87128, 87146), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (87144, 87146), True, 'import matplotlib.pyplot as plt\n'), ((87151, 87161), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (87159, 87161), True, 'import matplotlib.pyplot as plt\n'), ((18862, 18884), 'random.getrandbits', 'random.getrandbits', (['(24)'], {}), '(24)\n', (18880, 18884), False, 'import math, sys, os, re, random\n'), ((19576, 19598), 'random.getrandbits', 'random.getrandbits', (['(24)'], {}), '(24)\n', (19594, 19598), False, 'import math, sys, os, re, random\n'), ((48089, 48111), 'random.getrandbits', 'random.getrandbits', (['(24)'], {}), '(24)\n', (48107, 48111), False, 'import math, sys, os, re, random\n'), ((57620, 57642), 'random.getrandbits', 'random.getrandbits', (['(24)'], {}), '(24)\n', (57638, 57642), False, 'import math, sys, os, re, random\n'), ((70213, 70236), 'random.getrandbits', 'random.getrandbits', (['(256)'], {}), '(256)\n', (70231, 70236), False, 'import math, sys, os, re, random\n'), ((71155, 71172), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (71169, 71172), False, 'import math, sys, os, re, random\n'), ((73236, 73253), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (73250, 73253), False, 'import math, sys, os, re, random\n'), ((75383, 75400), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (75397, 75400), False, 'import math, sys, os, re, random\n'), ((84132, 84154), 'math.exp', 'math.exp', (['(3.0933 * vdd)'], {}), '(3.0933 * vdd)\n', (84140, 84154), False, 'import math, sys, os, re, random\n'), ((49984, 50006), 'random.getrandbits', 'random.getrandbits', (['(24)'], {}), '(24)\n', (50002, 50006), False, 'import math, sys, os, re, random\n'), ((67642, 67662), 'os.path.basename', 'os.path.basename', (['f1'], {}), '(f1)\n', (67658, 67662), False, 'import math, sys, os, re, random\n'), ((67721, 67741), 'os.path.basename', 'os.path.basename', (['f2'], {}), '(f2)\n', (67737, 67741), False, 'import math, sys, os, re, random\n'), ((67828, 67846), 'os.path.exists', 'os.path.exists', (['f1'], {}), '(f1)\n', (67842, 67846), False, 'import math, sys, os, re, random\n'), ((68004, 68022), 'os.path.exists', 'os.path.exists', (['f2'], {}), '(f2)\n', (68018, 68022), False, 'import math, sys, os, re, random\n'), ((70855, 70874), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (70871, 70874), False, 'import math, sys, os, re, random\n'), ((71398, 71424), 'numpy.asarray', 'np.asarray', (['poly_mem[poly]'], {}), '(poly_mem[poly])\n', (71408, 71424), True, 'import numpy as np\n'), ((71996, 72015), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (72012, 72015), False, 'import math, sys, os, re, random\n'), ((72230, 72247), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (72244, 72247), False, 'import math, sys, os, re, random\n'), ((73006, 73025), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (73022, 73025), False, 'import math, sys, os, re, random\n'), ((73388, 73424), 'numpy.asarray', 'np.asarray', (["[proc_regs['r%d' % reg]]"], {}), "([proc_regs['r%d' % reg]])\n", (73398, 73424), True, 'import numpy as np\n'), ((73984, 74003), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (74000, 74003), False, 'import math, sys, os, re, random\n'), ((74288, 74305), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (74302, 74305), False, 'import math, sys, os, re, random\n'), ((75083, 75102), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (75099, 75102), False, 'import math, sys, os, re, random\n'), ((75535, 75561), 'numpy.asarray', 'np.asarray', (['poly_mem[poly]'], {}), '(poly_mem[poly])\n', (75545, 75561), True, 'import numpy as np\n'), ((78842, 78865), 'sys.argv.index', 'sys.argv.index', (['"""--vdd"""'], {}), "('--vdd')\n", (78856, 78865), False, 'import math, sys, os, re, random\n'), ((79184, 79208), 'sys.argv.index', 'sys.argv.index', (['"""--fmhz"""'], {}), "('--fmhz')\n", (79198, 79208), False, 'import math, sys, os, re, random\n'), ((79491, 79515), 'sys.argv.index', 'sys.argv.index', (['"""--prog"""'], {}), "('--prog')\n", (79505, 79515), False, 'import math, sys, os, re, random\n'), ((50414, 50436), 'random.getrandbits', 'random.getrandbits', (['(24)'], {}), '(24)\n', (50432, 50436), False, 'import math, sys, os, re, random\n'), ((67675, 67695), 'os.path.basename', 'os.path.basename', (['f1'], {}), '(f1)\n', (67691, 67695), False, 'import math, sys, os, re, random\n'), ((67754, 67774), 'os.path.basename', 'os.path.basename', (['f2'], {}), '(f2)\n', (67770, 67774), False, 'import math, sys, os, re, random\n'), ((68221, 68251), 'numpy.load', 'np.load', (['f1'], {'allow_pickle': '(True)'}), '(f1, allow_pickle=True)\n', (68228, 68251), True, 'import numpy as np\n'), ((68340, 68370), 'numpy.load', 'np.load', (['f2'], {'allow_pickle': '(True)'}), '(f2, allow_pickle=True)\n', (68347, 68370), True, 'import numpy as np\n'), ((70887, 70906), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (70903, 70906), False, 'import math, sys, os, re, random\n'), ((72028, 72047), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (72044, 72047), False, 'import math, sys, os, re, random\n'), ((72417, 72446), 'numpy.load', 'np.load', (['f'], {'allow_pickle': '(True)'}), '(f, allow_pickle=True)\n', (72424, 72446), True, 'import numpy as np\n'), ((73038, 73057), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (73054, 73057), False, 'import math, sys, os, re, random\n'), ((74016, 74035), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (74032, 74035), False, 'import math, sys, os, re, random\n'), ((75115, 75134), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (75131, 75134), False, 'import math, sys, os, re, random\n'), ((78687, 78711), 'sys.argv.index', 'sys.argv.index', (['"""--prog"""'], {}), "('--prog')\n", (78701, 78711), False, 'import math, sys, os, re, random\n'), ((81806, 81829), 'sys.argv.index', 'sys.argv.index', (['"""--cdt"""'], {}), "('--cdt')\n", (81820, 81829), False, 'import math, sys, os, re, random\n'), ((82160, 82184), 'sys.argv.index', 'sys.argv.index', (['"""--iter"""'], {}), "('--iter')\n", (82174, 82184), False, 'import math, sys, os, re, random\n'), ((16705, 16727), 'random.getrandbits', 'random.getrandbits', (['(24)'], {}), '(24)\n', (16723, 16727), False, 'import math, sys, os, re, random\n'), ((17065, 17087), 'random.getrandbits', 'random.getrandbits', (['(24)'], {}), '(24)\n', (17083, 17087), False, 'import math, sys, os, re, random\n'), ((17427, 17449), 'random.getrandbits', 'random.getrandbits', (['(24)'], {}), '(24)\n', (17445, 17449), False, 'import math, sys, os, re, random\n'), ((17789, 17811), 'random.getrandbits', 'random.getrandbits', (['(24)'], {}), '(24)\n', (17807, 17811), False, 'import math, sys, os, re, random\n'), ((18077, 18097), 'math.log', 'math.log', (['param_n', '(2)'], {}), '(param_n, 2)\n', (18085, 18097), False, 'import math, sys, os, re, random\n'), ((50834, 50856), 'random.getrandbits', 'random.getrandbits', (['(24)'], {}), '(24)\n', (50852, 50856), False, 'import math, sys, os, re, random\n'), ((64177, 64200), 'math.ceil', 'math.ceil', (['(param_n / 34)'], {}), '(param_n / 34)\n', (64186, 64200), False, 'import math, sys, os, re, random\n'), ((64274, 64297), 'math.ceil', 'math.ceil', (['(param_n / 18)'], {}), '(param_n / 18)\n', (64283, 64297), False, 'import math, sys, os, re, random\n'), ((68438, 68468), 'numpy.load', 'np.load', (['f1'], {'allow_pickle': '(True)'}), '(f1, allow_pickle=True)\n', (68445, 68468), True, 'import numpy as np\n'), ((68508, 68538), 'numpy.load', 'np.load', (['f2'], {'allow_pickle': '(True)'}), '(f2, allow_pickle=True)\n', (68515, 68538), True, 'import numpy as np\n'), ((74467, 74496), 'numpy.load', 'np.load', (['f'], {'allow_pickle': '(True)'}), '(f, allow_pickle=True)\n', (74474, 74496), True, 'import numpy as np\n'), ((81639, 81662), 'sys.argv.index', 'sys.argv.index', (['"""--cdt"""'], {}), "('--cdt')\n", (81653, 81662), False, 'import math, sys, os, re, random\n'), ((81733, 81756), 'sys.argv.index', 'sys.argv.index', (['"""--cdt"""'], {}), "('--cdt')\n", (81747, 81756), False, 'import math, sys, os, re, random\n'), ((51171, 51191), 'math.log', 'math.log', (['param_n', '(2)'], {}), '(param_n, 2)\n', (51179, 51191), False, 'import math, sys, os, re, random\n')] |
"""
Copyright 2020 <NAME>.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the 'Software'), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
from geometry_msgs.msg import Quaternion, TransformStamped, Twist
from nav_msgs.msg import Odometry
import numpy as np
from rcl_interfaces.msg import ParameterType
import rclpy
from rclpy.node import Node, ParameterDescriptor
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Header
from tf2_ros.transform_broadcaster import TransformBroadcaster
from .neato_driver import NeatoRobot
class NeatoNode(Node):
def __init__(self, robot: NeatoRobot):
super(NeatoNode, self).__init__('neato')
self._robot = robot
self._scan_pub = self.create_publisher(LaserScan, 'scan', 1)
self._odom_pub = self.create_publisher(Odometry, 'odom', 1)
self._tf_broadcaster = TransformBroadcaster(self)
self._cmd_vel_sub = self.create_subscription(
Twist, 'cmd_vel', self._process_cmd_vel, 1)
self.motor_commands = {'left_dist': 0,
'right_dist': 0,
'speed': 0}
self.declare_parameter(
'frame_id',
'laser_link',
ParameterDescriptor(
type=ParameterType.PARAMETER_STRING,
description='Frame ID for the laser'))
scan_link = self.get_parameter_or('frame_id').value
self._scan = LaserScan(header=Header(frame_id=scan_link))
self._scan.angle_min = 0.0
self._scan.angle_max = np.pi * 2
self._scan.angle_increment = (
self._scan.angle_max - self._scan.angle_min) / 360.0
self._scan.range_min = 0.020
self._scan.range_max = 5.0
self.x, self.y, self.th = 0.0, 0.0, 0.0
self._encoders = [0, 0]
self._odom = Odometry(header=Header(frame_id='odom'),
child_frame_id='base_footprint')
self._bl_tf = TransformStamped(header=Header(frame_id='odom'),
child_frame_id='base_footprint')
self._bl_tf.transform.translation.x = 0.0
self._bl_tf.transform.translation.y = 0.0
self._bl_tf.transform.translation.z = 0.0
self._bl_tf.transform.rotation.w = 1.0
self._bl_tf.transform.rotation.x = 0.0
self._bl_tf.transform.rotation.y = 0.0
self._bl_tf.transform.rotation.z = 0.0
def _process_cmd_vel(self, twist: Twist):
self.get_logger().debug('twist: {}'.format(twist))
x = twist.linear.x
th = twist.angular.z * (self._robot.base_width / 2)
k = max(abs(x - th), abs(x + th))
self.get_logger().debug('x: {}, th: {}, k: {}'.format(x, th, k))
# sending commands higher than max speed will fail
if k > self._robot.max_speed:
factor = self._robot.max_speed / k
x *= factor
th *= factor
self.get_logger().debug(
'Scaling velocities down by {}: x: {}, th: {}'.format(
factor, x, th))
left, right = x - th, x + th
speed = max(abs(left),
abs(right))
self.get_logger().debug(
'Motor commands: left: {}: right: {}, speed: {}'.format(
left, right, speed))
self.motor_commands = {'left_dist': int(left * 1000),
'right_dist': int(right * 1000),
'speed': int(speed * 1000)}
def tick(self, previous_time):
now = self.get_clock().now()
dt = now - previous_time
dt_secs = dt.nanoseconds / 1000000000
self.get_logger().debug('tick')
motor_state = self._robot.get_motors()
self.get_logger().debug('tack')
self._robot.set_motors(**self.motor_commands)
self.get_logger().debug('teck')
laser_ranges, laser_rpm = self._robot.get_laser_scan()
self.get_logger().debug('tuck')
self._scan.ranges = list(np.array(laser_ranges) / 1000)
self._scan.header.stamp = now.to_msg()
self._scan_pub.publish(self._scan)
d_left = (motor_state['LeftWheel_PositionInMM'] -
self._encoders[0]) / 1000.0
d_right = (
motor_state['RightWheel_PositionInMM'] - self._encoders[1]) / 1000.0
self._encoders = [motor_state['LeftWheel_PositionInMM'],
motor_state['RightWheel_PositionInMM']]
dx = (d_left + d_right) / 2
dth = (d_right - d_left) / self._robot.base_width
x = np.cos(dth) * dx
y = -np.sin(dth) * dx
self.x += np.cos(self.th) * x - np.sin(self.th) * y
self.y += np.sin(self.th) * x + np.cos(self.th) * y
self.th += dth
# prepare tf from base_link to odom
quaternion = Quaternion()
quaternion.z = np.sin(self.th / 2.0)
quaternion.w = np.cos(self.th / 2.0)
# Fill in odometry
self._odom.header.stamp = now.to_msg()
self._odom.pose.pose.position.x = self.x
self._odom.pose.pose.position.y = self.y
self._odom.pose.pose.position.z = 0.0
self._odom.pose.pose.orientation = quaternion
self._odom.twist.twist.linear.x = dx / dt_secs
self._odom.twist.twist.angular.z = dth / dt_secs
self._odom_pub.publish(self._odom)
self._bl_tf.header.stamp = now.to_msg()
self._bl_tf.transform.translation.x = self.x
self._bl_tf.transform.translation.y = self.y
self._bl_tf.transform.rotation = quaternion
self._tf_broadcaster.sendTransform(self._bl_tf)
self.get_logger().debug('tock')
def main(args=None):
rclpy.init(args=args)
robot = NeatoRobot(port='/dev/ttyACM0')
with robot.operational():
node = NeatoNode(robot)
time.sleep(1)
# rclpy.spin(node)
node.get_logger().info('Robot operational, starting loop')
prev = node.get_clock().now()
while rclpy.ok():
try:
rclpy.spin_once(node, timeout_sec=0.1)
node.tick(prev)
prev = node.get_clock().now()
except KeyboardInterrupt:
break
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| [
"tf2_ros.transform_broadcaster.TransformBroadcaster",
"rclpy.ok",
"time.sleep",
"numpy.array",
"std_msgs.msg.Header",
"geometry_msgs.msg.Quaternion",
"rclpy.spin_once",
"numpy.cos",
"numpy.sin",
"rclpy.init",
"rclpy.shutdown",
"rclpy.node.ParameterDescriptor"
] | [((6625, 6646), 'rclpy.init', 'rclpy.init', ([], {'args': 'args'}), '(args=args)\n', (6635, 6646), False, 'import rclpy\n'), ((7176, 7192), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (7190, 7192), False, 'import rclpy\n'), ((1782, 1808), 'tf2_ros.transform_broadcaster.TransformBroadcaster', 'TransformBroadcaster', (['self'], {}), '(self)\n', (1802, 1808), False, 'from tf2_ros.transform_broadcaster import TransformBroadcaster\n'), ((5761, 5773), 'geometry_msgs.msg.Quaternion', 'Quaternion', ([], {}), '()\n', (5771, 5773), False, 'from geometry_msgs.msg import Quaternion, TransformStamped, Twist\n'), ((5797, 5818), 'numpy.sin', 'np.sin', (['(self.th / 2.0)'], {}), '(self.th / 2.0)\n', (5803, 5818), True, 'import numpy as np\n'), ((5842, 5863), 'numpy.cos', 'np.cos', (['(self.th / 2.0)'], {}), '(self.th / 2.0)\n', (5848, 5863), True, 'import numpy as np\n'), ((6764, 6777), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6774, 6777), False, 'import time\n'), ((6925, 6935), 'rclpy.ok', 'rclpy.ok', ([], {}), '()\n', (6933, 6935), False, 'import rclpy\n'), ((2154, 2253), 'rclpy.node.ParameterDescriptor', 'ParameterDescriptor', ([], {'type': 'ParameterType.PARAMETER_STRING', 'description': '"""Frame ID for the laser"""'}), "(type=ParameterType.PARAMETER_STRING, description=\n 'Frame ID for the laser')\n", (2173, 2253), False, 'from rclpy.node import Node, ParameterDescriptor\n'), ((5505, 5516), 'numpy.cos', 'np.cos', (['dth'], {}), '(dth)\n', (5511, 5516), True, 'import numpy as np\n'), ((2381, 2407), 'std_msgs.msg.Header', 'Header', ([], {'frame_id': 'scan_link'}), '(frame_id=scan_link)\n', (2387, 2407), False, 'from std_msgs.msg import Header\n'), ((2779, 2802), 'std_msgs.msg.Header', 'Header', ([], {'frame_id': '"""odom"""'}), "(frame_id='odom')\n", (2785, 2802), False, 'from std_msgs.msg import Header\n'), ((2914, 2937), 'std_msgs.msg.Header', 'Header', ([], {'frame_id': '"""odom"""'}), "(frame_id='odom')\n", (2920, 2937), False, 'from std_msgs.msg import Header\n'), ((4938, 4960), 'numpy.array', 'np.array', (['laser_ranges'], {}), '(laser_ranges)\n', (4946, 4960), True, 'import numpy as np\n'), ((5535, 5546), 'numpy.sin', 'np.sin', (['dth'], {}), '(dth)\n', (5541, 5546), True, 'import numpy as np\n'), ((5570, 5585), 'numpy.cos', 'np.cos', (['self.th'], {}), '(self.th)\n', (5576, 5585), True, 'import numpy as np\n'), ((5592, 5607), 'numpy.sin', 'np.sin', (['self.th'], {}), '(self.th)\n', (5598, 5607), True, 'import numpy as np\n'), ((5630, 5645), 'numpy.sin', 'np.sin', (['self.th'], {}), '(self.th)\n', (5636, 5645), True, 'import numpy as np\n'), ((5652, 5667), 'numpy.cos', 'np.cos', (['self.th'], {}), '(self.th)\n', (5658, 5667), True, 'import numpy as np\n'), ((6970, 7008), 'rclpy.spin_once', 'rclpy.spin_once', (['node'], {'timeout_sec': '(0.1)'}), '(node, timeout_sec=0.1)\n', (6985, 7008), False, 'import rclpy\n')] |
# ==============================================================================
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# ==============================================================================
"""Openvino Tensorflow BiasAdd operation test
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from common import NgraphTest
np.random.seed(8)
class TestBiasAddOperations(NgraphTest):
def test_BiasAdd1(self):
input_data = (0, 1, 0, 1, 2, 1, 1, 0, 3, 1, 1, 0, 4, 4, 5, 4)
input_data = np.reshape(input_data, (2, 2, 2, 2))
input_var = tf.compat.v1.placeholder(tf.float32, shape=(2, 2, 2, 2))
bias_data = (100., -100.)
bias_var = tf.compat.v1.placeholder(tf.float32, shape=(2))
out = tf.nn.bias_add(input_var, bias_var, 'NHWC')
def run_test(sess):
return sess.run(
out, feed_dict={
input_var: input_data,
bias_var: bias_data
})
assert (
self.with_ngraph(run_test) == self.without_ngraph(run_test)).all()
def test_BiasAdd2(self):
input_data = (0, 1, 0, 1, 2, 1, 1, 0, 3, 1, 1, 0, 4, 4, 5, 4)
input_data = np.reshape(input_data, (2, 2, 2, 2))
input_var = tf.compat.v1.placeholder(tf.float32, shape=(2, 2, 2, 2))
bias_data = (100., -100.)
bias_var = tf.compat.v1.placeholder(tf.float32, shape=(2))
out = tf.nn.bias_add(input_var, bias_var, 'NCHW')
def run_test(sess):
return sess.run(
out, feed_dict={
input_var: input_data,
bias_var: bias_data
})
assert (
self.with_ngraph(run_test) == self.without_ngraph(run_test)).all()
def test_BiasAdd3(self):
input_data = (0, 1, 0, 1, 2, 1, 1, 0, 3, 1, 1, 0, 4, 4, 5, 4, 3, 5, 1,
2, 0, 4, 0, 1)
input_data = np.reshape(input_data, (2, 3, 2, 2))
input_var = tf.compat.v1.placeholder(tf.float32, shape=(2, 3, 2, 2))
bias_data = (100., -100., 50) # channels = 3
bias_var = tf.compat.v1.placeholder(tf.float32, shape=(3))
out = tf.nn.bias_add(input_var, bias_var, 'NCHW')
def run_test(sess):
return sess.run(
out, feed_dict={
input_var: input_data,
bias_var: bias_data
})
assert (
self.with_ngraph(run_test) == self.without_ngraph(run_test)).all()
def test_BiasAdd4(self):
input_data = (0, 1, 0, 1, 2, 1, 1, 0, 3, 1, 1, 0, 4, 4, 5, 4, 3, 5, 1,
2, 0, 4, 0, 1)
input_data = np.reshape(input_data, (2, 2, 2, 3))
input_var = tf.compat.v1.placeholder(tf.float32, shape=(2, 2, 2, 3))
bias_data = (100., -100., 50) # channels = 3
bias_var = tf.compat.v1.placeholder(tf.float32, shape=(3))
out = tf.nn.bias_add(input_var, bias_var, 'NHWC')
def run_test(sess):
return sess.run(
out, feed_dict={
input_var: input_data,
bias_var: bias_data
})
assert (
self.with_ngraph(run_test) == self.without_ngraph(run_test)).all()
| [
"tensorflow.compat.v1.placeholder",
"numpy.reshape",
"tensorflow.compat.v1.disable_eager_execution",
"numpy.random.seed",
"tensorflow.nn.bias_add"
] | [((459, 497), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (495, 497), True, 'import tensorflow as tf\n'), ((530, 547), 'numpy.random.seed', 'np.random.seed', (['(8)'], {}), '(8)\n', (544, 547), True, 'import numpy as np\n'), ((712, 748), 'numpy.reshape', 'np.reshape', (['input_data', '(2, 2, 2, 2)'], {}), '(input_data, (2, 2, 2, 2))\n', (722, 748), True, 'import numpy as np\n'), ((769, 825), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '(2, 2, 2, 2)'}), '(tf.float32, shape=(2, 2, 2, 2))\n', (793, 825), True, 'import tensorflow as tf\n'), ((880, 925), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '(2)'}), '(tf.float32, shape=2)\n', (904, 925), True, 'import tensorflow as tf\n'), ((943, 986), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['input_var', 'bias_var', '"""NHWC"""'], {}), "(input_var, bias_var, 'NHWC')\n", (957, 986), True, 'import tensorflow as tf\n'), ((1398, 1434), 'numpy.reshape', 'np.reshape', (['input_data', '(2, 2, 2, 2)'], {}), '(input_data, (2, 2, 2, 2))\n', (1408, 1434), True, 'import numpy as np\n'), ((1455, 1511), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '(2, 2, 2, 2)'}), '(tf.float32, shape=(2, 2, 2, 2))\n', (1479, 1511), True, 'import tensorflow as tf\n'), ((1566, 1611), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '(2)'}), '(tf.float32, shape=2)\n', (1590, 1611), True, 'import tensorflow as tf\n'), ((1629, 1672), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['input_var', 'bias_var', '"""NCHW"""'], {}), "(input_var, bias_var, 'NCHW')\n", (1643, 1672), True, 'import tensorflow as tf\n'), ((2130, 2166), 'numpy.reshape', 'np.reshape', (['input_data', '(2, 3, 2, 2)'], {}), '(input_data, (2, 3, 2, 2))\n', (2140, 2166), True, 'import numpy as np\n'), ((2187, 2243), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '(2, 3, 2, 2)'}), '(tf.float32, shape=(2, 3, 2, 2))\n', (2211, 2243), True, 'import tensorflow as tf\n'), ((2318, 2363), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '(3)'}), '(tf.float32, shape=3)\n', (2342, 2363), True, 'import tensorflow as tf\n'), ((2381, 2424), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['input_var', 'bias_var', '"""NCHW"""'], {}), "(input_var, bias_var, 'NCHW')\n", (2395, 2424), True, 'import tensorflow as tf\n'), ((2882, 2918), 'numpy.reshape', 'np.reshape', (['input_data', '(2, 2, 2, 3)'], {}), '(input_data, (2, 2, 2, 3))\n', (2892, 2918), True, 'import numpy as np\n'), ((2939, 2995), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '(2, 2, 2, 3)'}), '(tf.float32, shape=(2, 2, 2, 3))\n', (2963, 2995), True, 'import tensorflow as tf\n'), ((3070, 3115), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '(3)'}), '(tf.float32, shape=3)\n', (3094, 3115), True, 'import tensorflow as tf\n'), ((3133, 3176), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['input_var', 'bias_var', '"""NHWC"""'], {}), "(input_var, bias_var, 'NHWC')\n", (3147, 3176), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 19:53:57 2019
@author: george
gaussian experiment for HMMS
"""
import sys
sys.path.append('../')
import numpy as np
from _experiments import gauss_seq1d
from _misc import make_supervised, compute_forw, make_supervised2
from HMMs import MHMM
import matplotlib.pyplot as plt
np.random.seed( seed = 0)
#GENERATE DATA
a0 = [0.9, 0.1]
a1 = [0.4, 0.6]
m_0 = 0
m_1 = 4
std_0 = 1
std_1 = 1
A = np.array([a0, a1])
T = 15
N = 1000
data, states = gauss_seq1d(T = T, N = N, A = A, m_0 = m_0, m_1 = m_1,
std_0 = std_0, std_1 = std_1)
dates =np.zeros( shape = [N, 2])
dates[:,0] = np.random.choice( np.arange(8), size = N)
dates[:,1] = np.random.choice( np.arange(8, 15), size = N)
#TRAIN HMM
n_HMMS = 1
n_Comp = 1
EM_iter = 2
#states1 = make_supervised(states.copy(), value = 0)
states1 = make_supervised2(states.copy(), drop = 0)
#statesinf = np.full( shape = [states1.shape[0], states1.shape[1]], fill_value = -np.inf )
#statesinf[0, 10] = 1
mhmm = MHMM(n_HMMS = n_HMMS, n_states = 2, n_Comp = n_Comp, EM_iter = EM_iter, tol = 10**(-5))
mhmm = mhmm.fit( data = data, states = states1, dates = None, save_name = 'mymhmm.npy')
#get the hmm
hmm = mhmm.HMMS[0]
params = hmm.get_params()
zers, ones = compute_forw(hmm, data)
fig, ax = plt.subplots(1,3, figsize = (10,4))
ax[0].hist(zers, bins = 30)
ax[1].hist(ones, bins = 30)
ax[2].hist(np.concatenate((ones, zers), axis = 0), bins = 30)
ax[2].set_title('All states1')
| [
"_misc.compute_forw",
"numpy.array",
"numpy.zeros",
"numpy.random.seed",
"numpy.concatenate",
"sys.path.append",
"_experiments.gauss_seq1d",
"matplotlib.pyplot.subplots",
"numpy.arange",
"HMMs.MHMM"
] | [((151, 173), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (166, 173), False, 'import sys\n'), ((352, 374), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(0)'}), '(seed=0)\n', (366, 374), True, 'import numpy as np\n'), ((468, 486), 'numpy.array', 'np.array', (['[a0, a1]'], {}), '([a0, a1])\n', (476, 486), True, 'import numpy as np\n'), ((519, 589), '_experiments.gauss_seq1d', 'gauss_seq1d', ([], {'T': 'T', 'N': 'N', 'A': 'A', 'm_0': 'm_0', 'm_1': 'm_1', 'std_0': 'std_0', 'std_1': 'std_1'}), '(T=T, N=N, A=A, m_0=m_0, m_1=m_1, std_0=std_0, std_1=std_1)\n', (530, 589), False, 'from _experiments import gauss_seq1d\n'), ((640, 662), 'numpy.zeros', 'np.zeros', ([], {'shape': '[N, 2]'}), '(shape=[N, 2])\n', (648, 662), True, 'import numpy as np\n'), ((1053, 1130), 'HMMs.MHMM', 'MHMM', ([], {'n_HMMS': 'n_HMMS', 'n_states': '(2)', 'n_Comp': 'n_Comp', 'EM_iter': 'EM_iter', 'tol': '(10 ** -5)'}), '(n_HMMS=n_HMMS, n_states=2, n_Comp=n_Comp, EM_iter=EM_iter, tol=10 ** -5)\n', (1057, 1130), False, 'from HMMs import MHMM\n'), ((1303, 1326), '_misc.compute_forw', 'compute_forw', (['hmm', 'data'], {}), '(hmm, data)\n', (1315, 1326), False, 'from _misc import make_supervised, compute_forw, make_supervised2\n'), ((1338, 1373), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(10, 4)'}), '(1, 3, figsize=(10, 4))\n', (1350, 1373), True, 'import matplotlib.pyplot as plt\n'), ((697, 709), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (706, 709), True, 'import numpy as np\n'), ((752, 768), 'numpy.arange', 'np.arange', (['(8)', '(15)'], {}), '(8, 15)\n', (761, 768), True, 'import numpy as np\n'), ((1441, 1477), 'numpy.concatenate', 'np.concatenate', (['(ones, zers)'], {'axis': '(0)'}), '((ones, zers), axis=0)\n', (1455, 1477), True, 'import numpy as np\n')] |
import numpy as np
import random
import tensorflow as tf
import matplotlib.pyplot as plt
from AirSimClient import *
import sys
import time
import random
import msvcrt
np.set_printoptions(threshold=np.nan)
# if true, use Q-learning. Else, use SARSA
qlearning = True
readWeights = True # read in saved weights to resume progress
# drone boundaries
goal_limit = 0.5
max_radius = 3
# Set learning parameters
y = 0.1 # discount rate
e = 0.2 # epsilon
target_z = -2 # target height in NED coordinate system
num_episodes = 50000
episode_length = 100 # number of actions per episode
# ANN parameters
step_size = 2.5 # action space in increments of degrees
num_increments = 5
translate_scale = -5
num_outputs = num_increments**2
num_inputs = 6
learning_rate = 0.001
num_hidden = 10
def reward(state):
# if did_reach_goal(state):
if is_in_bounds_3d(state[:3], goal_limit):
return 10
else:
return -50
def did_reach_goal(state):
return is_in_bounds_3d(state[:3], goal_limit)
# takes an index of the action space (max Q) and converts to the action values a = (roll, pitch)
def get_action(index):
return (normalize_deg((index // num_increments)*step_size + translate_scale),
normalize_deg((index % num_increments)*step_size + translate_scale))
def normalize_deg(x):
return x/90
def scale_pos(s):
pos_scaler = 5
return [[ s[0]/pos_scaler, s[1]/pos_scaler, s[2]/pos_scaler, s[3], s[4], s[5] ]]
def distance(x, y):
ref_x = 0
ref_y = 0
return np.sqrt((ref_x - x)**2 + (ref_y - y)**2)
def distance_3d(pos):
x1 = 0; y1 = 0; z1 = target_z
return np.sqrt((x1-pos[0])**2 + (y1-pos[1])**2 + (z1-pos[2])**2)
def is_in_bounds(x, y):
return distance(x, y) < max_radius
def is_in_bounds_3d(pos, limit):
x1 = 0; y1 = 0; z1 = target_z
return np.sqrt((x1-pos[0])**2 + (y1-pos[1])**2 + (z1-pos[2])**2) < limit
def loadweights(type):
if type == 1:
f = open('weights_output.txt', 'r')
return np.array([ list(map(np.float32,line.split())) for line in f ])
else:
f = open('weights_hidden.txt', 'r')
return np.array([ list(map(np.float32,line.split())) for line in f ])
def draw_rewards(reward_list, qlearning, block):
plt.close()
plt.subplot(2, 1, 1) # set to first column plot
if qlearning:
plt.title("Average Reward per Episode (Q-Learning)")
else:
plt.title("Average Reward per Episode (SARSA)")
plt.xlabel("Episode number")
plt.ylabel("Reward")
plt.plot(reward_list, label="Reward")
plt.legend()
plt.subplot(2, 1, 2) # set to first column plot
if qlearning:
plt.title("Average Reward per 100 Episodes (Q-Learning)")
else:
plt.title("Average Reward per 100 Episodes (SARSA)")
plt.xlabel("Episode number (100's)")
plt.ylabel("Reward")
avg = np.zeros(len(reward_list)//100 + 1)
for index, val in enumerate(reward_list):
avg[index//100] += val
for i in range(len(avg)-1):
avg[i] /= 100
avg[len(avg)-1] /= len(reward_list) % 100
plt.plot(avg, label="Reward")
plt.legend()
plt.tight_layout()
plt.show(block=block)
# init drone
client = MultirotorClient()
client.confirmConnection()
client.enableApiControl(True)
client.armDisarm(True)
# hidden layer
inputs1 = tf.placeholder(shape=[1,num_inputs], dtype=tf.float32)
if readWeights:
weights_hidden = tf.Variable(loadweights(0))
else:
weights_hidden = tf.Variable(tf.random_normal([num_inputs, num_hidden]))
bias_hidden = tf.Variable(tf.random_normal([num_hidden]))
# preactivations_hidden = tf.add(tf.matmul(inputs1, weights_hidden), bias_hidden)
preactivations_hidden = tf.matmul(inputs1, weights_hidden)
# activations_hidden = tf.nn.sigmoid(preactivations_hidden)
activations_hidden = tf.tanh(preactivations_hidden)
# output layer
if readWeights:
weights_output = tf.Variable(loadweights(1))
else:
weights_output = tf.Variable(tf.random_normal([num_hidden, num_outputs]))
bias_output = tf.Variable(tf.random_normal([num_outputs]))
# Qout = tf.add(tf.matmul(activations_hidden, weights_output), bias_output)
Qout = tf.matmul(activations_hidden, weights_output)
predict = tf.argmax(Qout,1)
# training
nextQ = tf.placeholder(shape=[1,num_outputs], dtype=tf.float32)
loss = tf.reduce_sum(tf.square(nextQ - Qout))
trainer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
updateModel = trainer.minimize(loss)
init = tf.global_variables_initializer()
#create lists to contain total rewards and steps per episode
total_reward_list = np.zeros(num_episodes)
steps_to_success = np.zeros(num_episodes)
percent_success_actions = np.zeros(num_episodes)
num_to_graph = 0
with tf.Session() as sess:
sess.run(init)
# episode loop
for i in range(num_episodes):
if msvcrt.kbhit():
# script must be run from cmd.exe in order to register keypresses
print("You pressed ", msvcrt.getch(), " so now i will quit.")
break
print("\n\n\nEPISODE " + str(i) + "\n\n\n")
#Reset drone and get state
init_orient = (0, 0, 0)
print("===== Initial Orientation " + str(init_orient))
client.simSetPose(Pose(Vector3r(0,0,target_z),
AirSimClientBase.toQuaternion(init_orient[0], init_orient[1], init_orient[2])), True)
success_counter = 0
num_success_actions = 0
num_actions_taken = 0
# action loop
for j in range(episode_length):
# get current state
print("===== Action " + str(j))
curr_pos = client.getPosition()
curr_orient = client.getRollPitchYaw()
curr_s = [curr_pos.x_val, curr_pos.y_val, curr_pos.z_val,
curr_orient[0], curr_orient[1], curr_orient[2]]
scaled_curr_s = scale_pos(curr_s)
print(" STATE " + str(curr_s))
print(" ====== scaled s " + str(scaled_curr_s))
if not is_in_bounds(curr_s[0], curr_s[1]):
# drone has gone too far -- reset
print("===== OUT OF BOUNDS")
break
# a_index index of max action, allQ all Q-vals for current state
a_index,allQ = sess.run([predict,Qout],feed_dict={inputs1:scaled_curr_s})
if j == 0:
sarsa_index = a_index
if(qlearning):
# decide next action (angle change relative to previous roll and pitch)
if np.random.rand(1) < e:
# epsilon-greedy - random option
print(" !!!!!!!! EPSILON")
next_action = get_action(np.random.randint(0, num_outputs, dtype="int64"))
else:
next_action = get_action(a_index[0])
else:
# SARSA
next_action = get_action(sarsa_index[0])
# calculate action input to AirSim
roll_diff = np.asscalar(next_action[0])
pitch_diff = np.asscalar(next_action[1])
print(" ====== next action " + str(next_action))
rpy = client.getRollPitchYaw()
roll = rpy[0] + roll_diff
pitch = rpy[1] + pitch_diff
yaw = 0; duration = 0.5; sleep_time = 0.1
print(" ====== moving to (" + str(roll*90) + " " + str(pitch*90) + ")")
# take action
client.moveByAngle(pitch, roll, target_z, yaw, 0.1)
# time.sleep(sleep_time) # wait for action to occur
# get next state and reward as result of action
s1Position = client.getPosition()
s1Orientation = client.getRollPitchYaw()
s1 = [s1Position.x_val, s1Position.y_val, s1Position.z_val, s1Orientation[0], s1Orientation[1], s1Orientation[2]]
scaled_s1 = scale_pos(s1)
r = reward(s1)
total_reward_list[i] += r
print(" ==== Reward " + str(r))
# evaluate goal criteria
if did_reach_goal(s1):
print(" ******* reached goal " )
num_success_actions += 1
success_counter += 1
if success_counter >= 30:
print("\n\n SUCCESS " + str(i) + "\n\n")
# record number of steps to success
steps_to_success[i] = j
# break
else:
# make sure successful actions are consecutive
success_counter = 0
# Obtain the Q' values by feeding the new state through our network
Q1 = sess.run(Qout,feed_dict={inputs1:scaled_s1})
if qlearning:
# Obtain maxQ' and set our target value for chosen action.
maxQ1 = np.max(Q1) # from neural net
print(" ===== MAX Q1 " + str(maxQ1))
targetQ = allQ
targetQ[0,a_index[0]] = r + y*maxQ1
print(" ===== TARGET " + str(r + y*maxQ1))
else:
# SARSA
if np.random.rand(1) < e:
sarsa_index[0] = np.random.randint(0, num_outputs)
# epsilon-greedy - random option
print(" !!!!!!!! EPSILON IN SARSA")
else:
sarsa_index[0] = np.asscalar(np.argmax(Q1))
actual_q = Q1[0][sarsa_index[0]]
targetQ = allQ
targetQ[0,sarsa_index[0]] = r + y*actual_q
print(" ===== TARGET " + str(r + y*actual_q))
# train ANN using target Q
_,W1,W0 = sess.run([updateModel,weights_output, weights_hidden ], feed_dict={inputs1:scaled_curr_s,nextQ:targetQ})
with open("weights_output.txt", "w") as weights_file:
weights_file.write(str(W1))
with open("weights_hidden.txt", "w") as weights_file:
weights_file.write(str(W0))
num_actions_taken += 1
# episode done
print("\n\n\nTotal Reward")
print(total_reward_list[i])
print(num_actions_taken)
print("\n\n\n")
total_reward_list[i] = total_reward_list[i]/num_actions_taken
percent_success_actions[i] = num_success_actions/num_actions_taken
e = 2./((i/1000) + 10)
num_to_graph += 1
if i % 50 == 0:
draw_rewards(total_reward_list[:num_to_graph], qlearning, False)
print("Epsilon " + str(e))
# print("WEIGHTS\n" + str(W1))
plt.close()
plt.title("Number of Actions Taken to Reach Goal")
plt.xlabel("Episode number")
plt.ylabel("Actions")
plt.plot(steps_to_success[:num_to_graph], label="Actions")
plt.legend()
plt.show()
plt.title("Percentage of Successful Actions Per Episode")
plt.xlabel("Episode number")
plt.ylabel("Percentage")
plt.plot(np.multiply(percent_success_actions[:num_to_graph],100.0), label="Percent")
plt.legend()
plt.show()
draw_rewards(total_reward_list[:num_to_graph], qlearning, True)
| [
"msvcrt.kbhit",
"numpy.sqrt",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"tensorflow.tanh",
"numpy.multiply",
"tensorflow.random_normal",
"tensorflow.placeholder",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"tensorflow.Session",
"numpy.max",
"matplotlib.pyplot.close",
"ten... | [((168, 205), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.nan'}), '(threshold=np.nan)\n', (187, 205), True, 'import numpy as np\n'), ((3295, 3350), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[1, num_inputs]', 'dtype': 'tf.float32'}), '(shape=[1, num_inputs], dtype=tf.float32)\n', (3309, 3350), True, 'import tensorflow as tf\n'), ((3662, 3696), 'tensorflow.matmul', 'tf.matmul', (['inputs1', 'weights_hidden'], {}), '(inputs1, weights_hidden)\n', (3671, 3696), True, 'import tensorflow as tf\n'), ((3778, 3808), 'tensorflow.tanh', 'tf.tanh', (['preactivations_hidden'], {}), '(preactivations_hidden)\n', (3785, 3808), True, 'import tensorflow as tf\n'), ((4116, 4161), 'tensorflow.matmul', 'tf.matmul', (['activations_hidden', 'weights_output'], {}), '(activations_hidden, weights_output)\n', (4125, 4161), True, 'import tensorflow as tf\n'), ((4173, 4191), 'tensorflow.argmax', 'tf.argmax', (['Qout', '(1)'], {}), '(Qout, 1)\n', (4182, 4191), True, 'import tensorflow as tf\n'), ((4211, 4267), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[1, num_outputs]', 'dtype': 'tf.float32'}), '(shape=[1, num_outputs], dtype=tf.float32)\n', (4225, 4267), True, 'import tensorflow as tf\n'), ((4323, 4385), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (4356, 4385), True, 'import tensorflow as tf\n'), ((4431, 4464), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4462, 4464), True, 'import tensorflow as tf\n'), ((4547, 4569), 'numpy.zeros', 'np.zeros', (['num_episodes'], {}), '(num_episodes)\n', (4555, 4569), True, 'import numpy as np\n'), ((4589, 4611), 'numpy.zeros', 'np.zeros', (['num_episodes'], {}), '(num_episodes)\n', (4597, 4611), True, 'import numpy as np\n'), ((4638, 4660), 'numpy.zeros', 'np.zeros', (['num_episodes'], {}), '(num_episodes)\n', (4646, 4660), True, 'import numpy as np\n'), ((10506, 10517), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10515, 10517), True, 'import matplotlib.pyplot as plt\n'), ((10518, 10568), 'matplotlib.pyplot.title', 'plt.title', (['"""Number of Actions Taken to Reach Goal"""'], {}), "('Number of Actions Taken to Reach Goal')\n", (10527, 10568), True, 'import matplotlib.pyplot as plt\n'), ((10569, 10597), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode number"""'], {}), "('Episode number')\n", (10579, 10597), True, 'import matplotlib.pyplot as plt\n'), ((10598, 10619), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Actions"""'], {}), "('Actions')\n", (10608, 10619), True, 'import matplotlib.pyplot as plt\n'), ((10620, 10678), 'matplotlib.pyplot.plot', 'plt.plot', (['steps_to_success[:num_to_graph]'], {'label': '"""Actions"""'}), "(steps_to_success[:num_to_graph], label='Actions')\n", (10628, 10678), True, 'import matplotlib.pyplot as plt\n'), ((10679, 10691), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10689, 10691), True, 'import matplotlib.pyplot as plt\n'), ((10692, 10702), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10700, 10702), True, 'import matplotlib.pyplot as plt\n'), ((10704, 10761), 'matplotlib.pyplot.title', 'plt.title', (['"""Percentage of Successful Actions Per Episode"""'], {}), "('Percentage of Successful Actions Per Episode')\n", (10713, 10761), True, 'import matplotlib.pyplot as plt\n'), ((10762, 10790), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode number"""'], {}), "('Episode number')\n", (10772, 10790), True, 'import matplotlib.pyplot as plt\n'), ((10791, 10815), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage"""'], {}), "('Percentage')\n", (10801, 10815), True, 'import matplotlib.pyplot as plt\n'), ((10901, 10913), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10911, 10913), True, 'import matplotlib.pyplot as plt\n'), ((10914, 10924), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10922, 10924), True, 'import matplotlib.pyplot as plt\n'), ((1500, 1544), 'numpy.sqrt', 'np.sqrt', (['((ref_x - x) ** 2 + (ref_y - y) ** 2)'], {}), '((ref_x - x) ** 2 + (ref_y - y) ** 2)\n', (1507, 1544), True, 'import numpy as np\n'), ((1608, 1677), 'numpy.sqrt', 'np.sqrt', (['((x1 - pos[0]) ** 2 + (y1 - pos[1]) ** 2 + (z1 - pos[2]) ** 2)'], {}), '((x1 - pos[0]) ** 2 + (y1 - pos[1]) ** 2 + (z1 - pos[2]) ** 2)\n', (1615, 1677), True, 'import numpy as np\n'), ((2224, 2235), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2233, 2235), True, 'import matplotlib.pyplot as plt\n'), ((2240, 2260), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (2251, 2260), True, 'import matplotlib.pyplot as plt\n'), ((2437, 2465), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode number"""'], {}), "('Episode number')\n", (2447, 2465), True, 'import matplotlib.pyplot as plt\n'), ((2470, 2490), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Reward"""'], {}), "('Reward')\n", (2480, 2490), True, 'import matplotlib.pyplot as plt\n'), ((2495, 2532), 'matplotlib.pyplot.plot', 'plt.plot', (['reward_list'], {'label': '"""Reward"""'}), "(reward_list, label='Reward')\n", (2503, 2532), True, 'import matplotlib.pyplot as plt\n'), ((2537, 2549), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2547, 2549), True, 'import matplotlib.pyplot as plt\n'), ((2555, 2575), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (2566, 2575), True, 'import matplotlib.pyplot as plt\n'), ((2762, 2798), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode number (100\'s)"""'], {}), '("Episode number (100\'s)")\n', (2772, 2798), True, 'import matplotlib.pyplot as plt\n'), ((2803, 2823), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Reward"""'], {}), "('Reward')\n", (2813, 2823), True, 'import matplotlib.pyplot as plt\n'), ((3051, 3080), 'matplotlib.pyplot.plot', 'plt.plot', (['avg'], {'label': '"""Reward"""'}), "(avg, label='Reward')\n", (3059, 3080), True, 'import matplotlib.pyplot as plt\n'), ((3085, 3097), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3095, 3097), True, 'import matplotlib.pyplot as plt\n'), ((3102, 3120), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3118, 3120), True, 'import matplotlib.pyplot as plt\n'), ((3125, 3146), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': 'block'}), '(block=block)\n', (3133, 3146), True, 'import matplotlib.pyplot as plt\n'), ((3524, 3554), 'tensorflow.random_normal', 'tf.random_normal', (['[num_hidden]'], {}), '([num_hidden])\n', (3540, 3554), True, 'import tensorflow as tf\n'), ((4000, 4031), 'tensorflow.random_normal', 'tf.random_normal', (['[num_outputs]'], {}), '([num_outputs])\n', (4016, 4031), True, 'import tensorflow as tf\n'), ((4288, 4311), 'tensorflow.square', 'tf.square', (['(nextQ - Qout)'], {}), '(nextQ - Qout)\n', (4297, 4311), True, 'import tensorflow as tf\n'), ((4684, 4696), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4694, 4696), True, 'import tensorflow as tf\n'), ((10825, 10883), 'numpy.multiply', 'np.multiply', (['percent_success_actions[:num_to_graph]', '(100.0)'], {}), '(percent_success_actions[:num_to_graph], 100.0)\n', (10836, 10883), True, 'import numpy as np\n'), ((1807, 1876), 'numpy.sqrt', 'np.sqrt', (['((x1 - pos[0]) ** 2 + (y1 - pos[1]) ** 2 + (z1 - pos[2]) ** 2)'], {}), '((x1 - pos[0]) ** 2 + (y1 - pos[1]) ** 2 + (z1 - pos[2]) ** 2)\n', (1814, 1876), True, 'import numpy as np\n'), ((2314, 2366), 'matplotlib.pyplot.title', 'plt.title', (['"""Average Reward per Episode (Q-Learning)"""'], {}), "('Average Reward per Episode (Q-Learning)')\n", (2323, 2366), True, 'import matplotlib.pyplot as plt\n'), ((2385, 2432), 'matplotlib.pyplot.title', 'plt.title', (['"""Average Reward per Episode (SARSA)"""'], {}), "('Average Reward per Episode (SARSA)')\n", (2394, 2432), True, 'import matplotlib.pyplot as plt\n'), ((2629, 2686), 'matplotlib.pyplot.title', 'plt.title', (['"""Average Reward per 100 Episodes (Q-Learning)"""'], {}), "('Average Reward per 100 Episodes (Q-Learning)')\n", (2638, 2686), True, 'import matplotlib.pyplot as plt\n'), ((2705, 2757), 'matplotlib.pyplot.title', 'plt.title', (['"""Average Reward per 100 Episodes (SARSA)"""'], {}), "('Average Reward per 100 Episodes (SARSA)')\n", (2714, 2757), True, 'import matplotlib.pyplot as plt\n'), ((3454, 3496), 'tensorflow.random_normal', 'tf.random_normal', (['[num_inputs, num_hidden]'], {}), '([num_inputs, num_hidden])\n', (3470, 3496), True, 'import tensorflow as tf\n'), ((3929, 3972), 'tensorflow.random_normal', 'tf.random_normal', (['[num_hidden, num_outputs]'], {}), '([num_hidden, num_outputs])\n', (3945, 3972), True, 'import tensorflow as tf\n'), ((4790, 4804), 'msvcrt.kbhit', 'msvcrt.kbhit', ([], {}), '()\n', (4802, 4804), False, 'import msvcrt\n'), ((6946, 6973), 'numpy.asscalar', 'np.asscalar', (['next_action[0]'], {}), '(next_action[0])\n', (6957, 6973), True, 'import numpy as np\n'), ((6999, 7026), 'numpy.asscalar', 'np.asscalar', (['next_action[1]'], {}), '(next_action[1])\n', (7010, 7026), True, 'import numpy as np\n'), ((4918, 4932), 'msvcrt.getch', 'msvcrt.getch', ([], {}), '()\n', (4930, 4932), False, 'import msvcrt\n'), ((8772, 8782), 'numpy.max', 'np.max', (['Q1'], {}), '(Q1)\n', (8778, 8782), True, 'import numpy as np\n'), ((6474, 6491), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (6488, 6491), True, 'import numpy as np\n'), ((9059, 9076), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (9073, 9076), True, 'import numpy as np\n'), ((9119, 9152), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_outputs'], {}), '(0, num_outputs)\n', (9136, 9152), True, 'import numpy as np\n'), ((6643, 6691), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_outputs'], {'dtype': '"""int64"""'}), "(0, num_outputs, dtype='int64')\n", (6660, 6691), True, 'import numpy as np\n'), ((9334, 9347), 'numpy.argmax', 'np.argmax', (['Q1'], {}), '(Q1)\n', (9343, 9347), True, 'import numpy as np\n')] |
import numpy as np
from RLL17code import RLL17code
from PolarCode import PolarCode
class Scheme():
def __init__(self, m, n, k, nc, nCodewords):
self.n = n
self.m = m
self.nCodewords = nCodewords
self.rateRLL = m / n
self.rll = RLL17code()
self.polar = PolarCode(nc, k, nCodewords)
# ========================= Encoder ========================= #
def encode(self, x):
# --- Step 1: Polar Code
outputPolar = np.zeros((self.nCodewords, self.polar.n))
for i in range(self.nCodewords):
outputPolar[i,:], _ = self.polar.encoder(x[i,:], 0, -1)
# --- Step 2: Interleaver
outputIter = np.ndarray.flatten(outputPolar.T)
# --- Step 3: RLL(1,7)
outputRLL = self.rll.encode(outputIter)
# --- Step 4:
output = self.encodeNRZI(outputRLL)
return output
def encodeNRZI(self, rllCodeword):
length = len(rllCodeword)
self.nrziCodeword = np.zeros(length)
currValue = 1
for i in range(length):
if rllCodeword[i]:
self.nrziCodeword[i] = currValue * -1
currValue = self.nrziCodeword[i]
else:
self.nrziCodeword[i] = currValue
return self.nrziCodeword
# ========================= Decoder ========================= #
def decode(self, y):
# --- Step 1: NRZI
outputNRZI = self.decodeNRZI(y)
# --- Step 2: RLL(1,7)
# output = self.rll.decode(y)
outputRLL = self.rll.decode(outputNRZI)
# --- Step 3: Interleaver
outputInter = np.reshape(outputRLL, (self.polar.n, 2)).T
# --- Step 4:
output = np.zeros((self.nCodewords, self.polar.k))
for i in range(self.nCodewords):
output[i,:], _ = self.polar.decodeSC(1-2*outputInter[i,:], np.zeros(self.polar.n - self.polar.k))
return output
def decodeNRZI(self, y):
length = len(y)
msg = np.zeros(length)
currValue = 1
for i in range(length):
if currValue != y[i]:
msg[i] = 1
currValue = y[i]
else:
msg[i] = 0
return msg
| [
"RLL17code.RLL17code",
"numpy.reshape",
"PolarCode.PolarCode",
"numpy.ndarray.flatten",
"numpy.zeros"
] | [((282, 293), 'RLL17code.RLL17code', 'RLL17code', ([], {}), '()\n', (291, 293), False, 'from RLL17code import RLL17code\n'), ((316, 344), 'PolarCode.PolarCode', 'PolarCode', (['nc', 'k', 'nCodewords'], {}), '(nc, k, nCodewords)\n', (325, 344), False, 'from PolarCode import PolarCode\n'), ((503, 544), 'numpy.zeros', 'np.zeros', (['(self.nCodewords, self.polar.n)'], {}), '((self.nCodewords, self.polar.n))\n', (511, 544), True, 'import numpy as np\n'), ((717, 750), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['outputPolar.T'], {}), '(outputPolar.T)\n', (735, 750), True, 'import numpy as np\n'), ((1035, 1051), 'numpy.zeros', 'np.zeros', (['length'], {}), '(length)\n', (1043, 1051), True, 'import numpy as np\n'), ((1789, 1830), 'numpy.zeros', 'np.zeros', (['(self.nCodewords, self.polar.k)'], {}), '((self.nCodewords, self.polar.k))\n', (1797, 1830), True, 'import numpy as np\n'), ((2085, 2101), 'numpy.zeros', 'np.zeros', (['length'], {}), '(length)\n', (2093, 2101), True, 'import numpy as np\n'), ((1703, 1743), 'numpy.reshape', 'np.reshape', (['outputRLL', '(self.polar.n, 2)'], {}), '(outputRLL, (self.polar.n, 2))\n', (1713, 1743), True, 'import numpy as np\n'), ((1945, 1982), 'numpy.zeros', 'np.zeros', (['(self.polar.n - self.polar.k)'], {}), '(self.polar.n - self.polar.k)\n', (1953, 1982), True, 'import numpy as np\n')] |
import numpy
from numpy.testing import assert_allclose
from cogent3.draw.drawable import Drawable
from cogent3.maths.geometry import SimplexTransform
from cogent3.util.union_dict import UnionDict
__author__ = "<NAME> and <NAME>"
__copyright__ = "Copyright 2007-2019, The Cogent Project"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__license__ = "BSD-3"
__version__ = "2019.9.13a"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Alpha"
class Simplex(Drawable):
def __init__(self, vertex_labels=None, **kwargs):
super(Simplex, self).__init__(**kwargs)
self.vertex_labels = vertex_labels
self._transformer = None
self._vertices = None
self._groups = set()
self._used_groups = set()
# points
self._points = []
self._point_hovertext = []
self._point_legendgroups = []
# segments
self._segments = []
self._segment_hovertext = []
self._segment_legendgroups = []
@property
def transformer(self):
if self._transformer is None:
self._transformer = SimplexTransform()
return self._transformer
@property
def vertices(self):
# todo when we get autoscaled, we need to combine both points and
# segments
if self._vertices is None:
self._vertices = self.transformer.q
return self._vertices
def add_point(self, probs, hovertext=None, legendgroup=None):
"""add single point in the simplex
Parameters
----------
probs : dict or DictArray
a probability vector to be displayed as a point in the simplex
hovertext
a corresponding annotation to be associated as hovertext
legendgroup
group to which the point belongs
"""
if self.vertex_labels is None:
labels = list(sorted(probs.keys()))
assert len(labels) == 4, "4-state systems only"
self.vertex_labels = labels
probs = [probs[k] for k in self.vertex_labels]
assert_allclose(sum(probs), 1.0)
self._points.append(probs)
self._point_hovertext.append(hovertext)
self._point_legendgroups.append(legendgroup)
self._groups.add(legendgroup)
def add_points(self, probs, hovertext=None, legendgroup=None):
"""adds series of points via successive calls to add_points
Parameters
----------
probs : a series of dict or DictArray objects
each element is a probability vector for display
hovertext : series or None
an equal length series to probs
legendgroup : series or None
an equal length series to probs
"""
for i in range(len(probs)):
p = probs[i]
h = None if hovertext is None else hovertext[i]
l = None if legendgroup is None else legendgroup[i]
self.add_point(p, hovertext=h, legendgroup=l)
def add_segment(self, segment, hovertext=None, legendgroup=None):
"""add series of points connected by a line
Parameters
----------
segment : series of dict or DictArray
each element of segment is a probability vector that can be
displayed as a point in the simplex
hovertext
a corresponding annotation to be associated as hovertext
legendgroup
group to which the segment belongs
"""
assert segment[0], "must provide valid data"
if self.vertex_labels is None:
labels = list(sorted(segment[0].keys()))
assert len(labels) == 4, "4-state systems only"
self.vertex_labels = labels
probs = []
for point in segment:
point = [point[k] for k in self.vertex_labels]
assert_allclose(sum(point), 1.0)
probs.append(point)
self._segments.append(probs)
self._segment_hovertext.append(hovertext)
self._segment_legendgroups.append(legendgroup)
self._groups.add(legendgroup)
def _get_frame_trace(self):
from itertools import combinations
combos = numpy.array(list(combinations(self.vertices, 2)))
trace = UnionDict(
type="scatter3d",
# Draw the edges of the Tetrahedron
name="frame",
x=combos[:, :, 0].ravel(),
y=combos[:, :, 1].ravel(),
z=combos[:, :, 2].ravel(),
marker=UnionDict(size=4, color="#1f77b4", colorscale="Viridis"),
line=UnionDict(color="#1f77b4", width=5),
mode="lines",
hoverinfo="skip",
showlegend=False,
)
return trace
def _get_vertex_label_trace(self):
trace = UnionDict(
type="scatter3d",
# Draw the vertex labels
x=self.vertices[:, 0],
y=self.vertices[:, 1],
z=self.vertices[:, 2],
marker=UnionDict(size=4, color="#1f77b4", colorscale="Viridis"),
text=self.vertex_labels,
textfont=UnionDict(size=16, family="sans serif"),
mode="markers+text",
hoverinfo="skip",
showlegend=False,
name="labels",
)
return trace
def _get_3d_scatter(
self,
data,
mode,
name=None,
legendgroup=None,
text=None,
showlegend=None,
line=None,
):
data = numpy.array(data, dtype=float)
points = numpy.array([v @ self.transformer for v in data])
scatter = UnionDict(
type="scatter3d",
x=points[:, 0],
y=points[:, 1],
z=points[:, 2],
marker=UnionDict(size=4, colorscale="Viridis"),
showlegend=showlegend,
mode=mode,
name=name,
text=text,
opacity=0.75,
legendgroup=legendgroup,
line=line,
)
return scatter
def _get_point_traces(self):
"""returns scatter 3D for points"""
data = numpy.array(self._points, dtype=float)
if any(self._point_hovertext):
hovertext = numpy.array(self._point_hovertext, dtype="O")
else:
hovertext = None
groups = set(self._point_legendgroups)
multigroup = len(groups) > 1
legendgroups = numpy.array(self._point_legendgroups, dtype="O")
traces = []
for group in groups:
name = None
if multigroup and group is None:
name = "Other"
showlegend = True
elif not multigroup:
name = None
showlegend = False
self._used_groups.add(group)
indices = legendgroups == group
group_data = data[indices, :]
if hovertext is not None:
group_text = hovertext[indices]
else:
group_text = None
trace = self._get_3d_scatter(
group_data,
mode="markers",
name=name,
legendgroup=group,
text=group_text,
showlegend=showlegend,
)
traces.append(trace)
return traces
def _get_segment_traces(self):
"""returns scatter 3D for segments"""
multigroup = len(self._groups) > 1
traces = []
for i, segment in enumerate(self._segments):
group = self._segment_legendgroups[i]
name = None
if multigroup and group is None:
name = "Other"
showlegend = True
elif not multigroup:
name = None
showlegend = False
self._used_groups.add(group)
data = numpy.array(segment, dtype=float)
traces.append(
self._get_3d_scatter(
data,
"lines+markers",
name=name,
legendgroup=group,
showlegend=showlegend,
line=UnionDict(width=3),
)
)
return traces
def _build_fig(self, **kwargs):
self.traces.extend([self._get_frame_trace(), self._get_vertex_label_trace()])
if self._points:
self.traces.extend(self._get_point_traces())
if self._segments:
self.traces.extend(self._get_segment_traces())
# Layout attributes for plotly
axis_range = [self.vertices.min(), self.vertices.max()]
layout = UnionDict(
scene=UnionDict(
xaxis=UnionDict(title="x", visible=False, range=axis_range),
yaxis=UnionDict(title="y", visible=False, range=axis_range),
zaxis=UnionDict(title="z", visible=False, range=axis_range),
),
autosize=False,
)
self.layout |= layout
| [
"itertools.combinations",
"numpy.array",
"cogent3.util.union_dict.UnionDict",
"cogent3.maths.geometry.SimplexTransform"
] | [((5495, 5525), 'numpy.array', 'numpy.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (5506, 5525), False, 'import numpy\n'), ((5543, 5594), 'numpy.array', 'numpy.array', (['[(v @ self.transformer) for v in data]'], {}), '([(v @ self.transformer) for v in data])\n', (5554, 5594), False, 'import numpy\n'), ((6112, 6150), 'numpy.array', 'numpy.array', (['self._points'], {'dtype': 'float'}), '(self._points, dtype=float)\n', (6123, 6150), False, 'import numpy\n'), ((6410, 6458), 'numpy.array', 'numpy.array', (['self._point_legendgroups'], {'dtype': '"""O"""'}), "(self._point_legendgroups, dtype='O')\n", (6421, 6458), False, 'import numpy\n'), ((1106, 1124), 'cogent3.maths.geometry.SimplexTransform', 'SimplexTransform', ([], {}), '()\n', (1122, 1124), False, 'from cogent3.maths.geometry import SimplexTransform\n'), ((6214, 6259), 'numpy.array', 'numpy.array', (['self._point_hovertext'], {'dtype': '"""O"""'}), "(self._point_hovertext, dtype='O')\n", (6225, 6259), False, 'import numpy\n'), ((7849, 7882), 'numpy.array', 'numpy.array', (['segment'], {'dtype': 'float'}), '(segment, dtype=float)\n', (7860, 7882), False, 'import numpy\n'), ((4202, 4232), 'itertools.combinations', 'combinations', (['self.vertices', '(2)'], {}), '(self.vertices, 2)\n', (4214, 4232), False, 'from itertools import combinations\n'), ((4502, 4558), 'cogent3.util.union_dict.UnionDict', 'UnionDict', ([], {'size': '(4)', 'color': '"""#1f77b4"""', 'colorscale': '"""Viridis"""'}), "(size=4, color='#1f77b4', colorscale='Viridis')\n", (4511, 4558), False, 'from cogent3.util.union_dict import UnionDict\n'), ((4577, 4612), 'cogent3.util.union_dict.UnionDict', 'UnionDict', ([], {'color': '"""#1f77b4"""', 'width': '(5)'}), "(color='#1f77b4', width=5)\n", (4586, 4612), False, 'from cogent3.util.union_dict import UnionDict\n'), ((4989, 5045), 'cogent3.util.union_dict.UnionDict', 'UnionDict', ([], {'size': '(4)', 'color': '"""#1f77b4"""', 'colorscale': '"""Viridis"""'}), "(size=4, color='#1f77b4', colorscale='Viridis')\n", (4998, 5045), False, 'from cogent3.util.union_dict import UnionDict\n'), ((5105, 5144), 'cogent3.util.union_dict.UnionDict', 'UnionDict', ([], {'size': '(16)', 'family': '"""sans serif"""'}), "(size=16, family='sans serif')\n", (5114, 5144), False, 'from cogent3.util.union_dict import UnionDict\n'), ((5755, 5794), 'cogent3.util.union_dict.UnionDict', 'UnionDict', ([], {'size': '(4)', 'colorscale': '"""Viridis"""'}), "(size=4, colorscale='Viridis')\n", (5764, 5794), False, 'from cogent3.util.union_dict import UnionDict\n'), ((8149, 8167), 'cogent3.util.union_dict.UnionDict', 'UnionDict', ([], {'width': '(3)'}), '(width=3)\n', (8158, 8167), False, 'from cogent3.util.union_dict import UnionDict\n'), ((8698, 8751), 'cogent3.util.union_dict.UnionDict', 'UnionDict', ([], {'title': '"""x"""', 'visible': '(False)', 'range': 'axis_range'}), "(title='x', visible=False, range=axis_range)\n", (8707, 8751), False, 'from cogent3.util.union_dict import UnionDict\n'), ((8775, 8828), 'cogent3.util.union_dict.UnionDict', 'UnionDict', ([], {'title': '"""y"""', 'visible': '(False)', 'range': 'axis_range'}), "(title='y', visible=False, range=axis_range)\n", (8784, 8828), False, 'from cogent3.util.union_dict import UnionDict\n'), ((8852, 8905), 'cogent3.util.union_dict.UnionDict', 'UnionDict', ([], {'title': '"""z"""', 'visible': '(False)', 'range': 'axis_range'}), "(title='z', visible=False, range=axis_range)\n", (8861, 8905), False, 'from cogent3.util.union_dict import UnionDict\n')] |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""tests for type_utils.py"""
import numpy as np
from qiskit.providers.aer.pulse.de.type_utils import (convert_state,
type_spec_from_instance,
StateTypeConverter)
from ...common import QiskitAerTestCase
class TestTypeUtils(QiskitAerTestCase):
def test_convert_state(self):
"""Test convert_state"""
type_spec = {'type': 'array', 'shape': (4,)}
y = np.array([[1, 2],[3, 4]])
expected = np.array([1, 2, 3, 4])
self.assertAlmostEqual(convert_state(y, type_spec), expected)
type_spec = {'type': 'array'}
y = [[1, 2], [3, 4]]
expected = np.array([[1, 2],[3, 4]])
self.assertAlmostEqual(convert_state(y, type_spec), expected)
def test_type_spec_from_instance(self):
"""Test type_spec_from_instance"""
y = np.array([1, 2, 3, 4])
type_spec = type_spec_from_instance(y)
self.assertEqual(type_spec, {'type': 'array', 'shape': (4,)})
y = np.array([[1, 2], [3, 4], [5, 6]])
type_spec = type_spec_from_instance(y)
self.assertEqual(type_spec, {'type': 'array', 'shape': (3, 2)})
def test_converter_inner_outer(self):
"""Test standard constructor of StateTypeConverter along with basic state conversion
functions"""
inner_spec = {'type': 'array', 'shape': (4,)}
outer_spec = {'type': 'array', 'shape': (2,2)}
converter = StateTypeConverter(inner_spec, outer_spec)
y_in = np.array([1,2,3,4])
y_out = np.array([[1, 2], [3, 4]])
convert_out = converter.inner_to_outer(y_in)
convert_in = converter.outer_to_inner(y_out)
self.assertAlmostEqual(convert_out, y_out)
self.assertAlmostEqual(convert_in, y_in)
def test_from_instances(self):
"""Test from_instances constructor"""
inner_y = np.array([1, 2, 3, 4])
outer_y = np.array([[1, 2], [3, 4]])
converter = StateTypeConverter.from_instances(inner_y, outer_y)
self.assertEqual(converter.inner_type_spec, {'type': 'array', 'shape': (4,)})
self.assertEqual(converter.outer_type_spec, {'type': 'array', 'shape': (2,2)})
converter = StateTypeConverter.from_instances(inner_y)
self.assertEqual(converter.inner_type_spec, {'type': 'array', 'shape': (4,)})
self.assertEqual(converter.outer_type_spec, {'type': 'array', 'shape': (4,)})
def test_from_outer_instance_inner_type_spec(self):
"""Test from_outer_instance_inner_type_spec constructor"""
# test case for inner type spec with 1d array
inner_type_spec = {'type': 'array', 'ndim': 1}
outer_y = np.array([[1, 2], [3, 4]])
converter = StateTypeConverter.from_outer_instance_inner_type_spec(outer_y, inner_type_spec)
self.assertEqual(converter.inner_type_spec, {'type': 'array', 'shape': (4,)})
self.assertEqual(converter.outer_type_spec, {'type': 'array', 'shape': (2,2)})
# inner type spec is a generic array
inner_type_spec = {'type': 'array'}
outer_y = np.array([[1, 2], [3, 4]])
converter = StateTypeConverter.from_outer_instance_inner_type_spec(outer_y, inner_type_spec)
self.assertEqual(converter.inner_type_spec, {'type': 'array', 'shape': (2,2)})
self.assertEqual(converter.outer_type_spec, {'type': 'array', 'shape': (2,2)})
def test_transform_rhs_funcs(self):
"""Test rhs function conversion"""
inner_spec = {'type': 'array', 'shape': (4,)}
outer_spec = {'type': 'array', 'shape': (2,2)}
converter = StateTypeConverter(inner_spec, outer_spec)
# do matrix multiplication (a truly '2d' operation)
def rhs(t, y):
return t * (y @ y)
rhs_funcs = {'rhs': rhs}
new_rhs_funcs = converter.transform_rhs_funcs(rhs_funcs)
test_t = np.pi
y_2d = np.array([[1, 2], [3, 4]])
y_1d = y_2d.flatten()
expected_output = rhs(test_t, y_2d).flatten()
output = new_rhs_funcs['rhs'](test_t, y_1d)
self.assertAlmostEqual(output, expected_output)
def assertAlmostEqual(self, A, B, tol=10**-15):
self.assertTrue(np.abs(A - B).max() < tol)
| [
"qiskit.providers.aer.pulse.de.type_utils.StateTypeConverter",
"qiskit.providers.aer.pulse.de.type_utils.StateTypeConverter.from_outer_instance_inner_type_spec",
"numpy.abs",
"qiskit.providers.aer.pulse.de.type_utils.StateTypeConverter.from_instances",
"qiskit.providers.aer.pulse.de.type_utils.type_spec_fro... | [((974, 1000), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (982, 1000), True, 'import numpy as np\n'), ((1019, 1041), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1027, 1041), True, 'import numpy as np\n'), ((1200, 1226), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (1208, 1226), True, 'import numpy as np\n'), ((1398, 1420), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1406, 1420), True, 'import numpy as np\n'), ((1441, 1467), 'qiskit.providers.aer.pulse.de.type_utils.type_spec_from_instance', 'type_spec_from_instance', (['y'], {}), '(y)\n', (1464, 1467), False, 'from qiskit.providers.aer.pulse.de.type_utils import convert_state, type_spec_from_instance, StateTypeConverter\n'), ((1552, 1586), 'numpy.array', 'np.array', (['[[1, 2], [3, 4], [5, 6]]'], {}), '([[1, 2], [3, 4], [5, 6]])\n', (1560, 1586), True, 'import numpy as np\n'), ((1607, 1633), 'qiskit.providers.aer.pulse.de.type_utils.type_spec_from_instance', 'type_spec_from_instance', (['y'], {}), '(y)\n', (1630, 1633), False, 'from qiskit.providers.aer.pulse.de.type_utils import convert_state, type_spec_from_instance, StateTypeConverter\n'), ((1994, 2036), 'qiskit.providers.aer.pulse.de.type_utils.StateTypeConverter', 'StateTypeConverter', (['inner_spec', 'outer_spec'], {}), '(inner_spec, outer_spec)\n', (2012, 2036), False, 'from qiskit.providers.aer.pulse.de.type_utils import convert_state, type_spec_from_instance, StateTypeConverter\n'), ((2053, 2075), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (2061, 2075), True, 'import numpy as np\n'), ((2089, 2115), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (2097, 2115), True, 'import numpy as np\n'), ((2425, 2447), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (2433, 2447), True, 'import numpy as np\n'), ((2466, 2492), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (2474, 2492), True, 'import numpy as np\n'), ((2514, 2565), 'qiskit.providers.aer.pulse.de.type_utils.StateTypeConverter.from_instances', 'StateTypeConverter.from_instances', (['inner_y', 'outer_y'], {}), '(inner_y, outer_y)\n', (2547, 2565), False, 'from qiskit.providers.aer.pulse.de.type_utils import convert_state, type_spec_from_instance, StateTypeConverter\n'), ((2761, 2803), 'qiskit.providers.aer.pulse.de.type_utils.StateTypeConverter.from_instances', 'StateTypeConverter.from_instances', (['inner_y'], {}), '(inner_y)\n', (2794, 2803), False, 'from qiskit.providers.aer.pulse.de.type_utils import convert_state, type_spec_from_instance, StateTypeConverter\n'), ((3229, 3255), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (3237, 3255), True, 'import numpy as np\n'), ((3277, 3362), 'qiskit.providers.aer.pulse.de.type_utils.StateTypeConverter.from_outer_instance_inner_type_spec', 'StateTypeConverter.from_outer_instance_inner_type_spec', (['outer_y', 'inner_type_spec'], {}), '(outer_y, inner_type_spec\n )\n', (3331, 3362), False, 'from qiskit.providers.aer.pulse.de.type_utils import convert_state, type_spec_from_instance, StateTypeConverter\n'), ((3640, 3666), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (3648, 3666), True, 'import numpy as np\n'), ((3688, 3773), 'qiskit.providers.aer.pulse.de.type_utils.StateTypeConverter.from_outer_instance_inner_type_spec', 'StateTypeConverter.from_outer_instance_inner_type_spec', (['outer_y', 'inner_type_spec'], {}), '(outer_y, inner_type_spec\n )\n', (3742, 3773), False, 'from qiskit.providers.aer.pulse.de.type_utils import convert_state, type_spec_from_instance, StateTypeConverter\n'), ((4158, 4200), 'qiskit.providers.aer.pulse.de.type_utils.StateTypeConverter', 'StateTypeConverter', (['inner_spec', 'outer_spec'], {}), '(inner_spec, outer_spec)\n', (4176, 4200), False, 'from qiskit.providers.aer.pulse.de.type_utils import convert_state, type_spec_from_instance, StateTypeConverter\n'), ((4454, 4480), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (4462, 4480), True, 'import numpy as np\n'), ((1074, 1101), 'qiskit.providers.aer.pulse.de.type_utils.convert_state', 'convert_state', (['y', 'type_spec'], {}), '(y, type_spec)\n', (1087, 1101), False, 'from qiskit.providers.aer.pulse.de.type_utils import convert_state, type_spec_from_instance, StateTypeConverter\n'), ((1258, 1285), 'qiskit.providers.aer.pulse.de.type_utils.convert_state', 'convert_state', (['y', 'type_spec'], {}), '(y, type_spec)\n', (1271, 1285), False, 'from qiskit.providers.aer.pulse.de.type_utils import convert_state, type_spec_from_instance, StateTypeConverter\n'), ((4752, 4765), 'numpy.abs', 'np.abs', (['(A - B)'], {}), '(A - B)\n', (4758, 4765), True, 'import numpy as np\n')] |
# -*- coding: UTF8 -*-
"""
container class for results
author: <NAME>
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import copy
import logging
import numpy as np
from evo import EvoException
logger = logging.getLogger(__name__)
class ResultException(EvoException):
pass
class Result(object):
def __init__(self):
self.info = {}
self.stats = {}
self.np_arrays = {}
self.trajectories = {}
def __str__(self):
return self.pretty_str(stats=True)
def __eq__(self, other):
if not isinstance(other, Result):
return False
equal = (self.info == other.info)
equal &= (self.stats == other.stats)
equal &= (self.trajectories == other.trajectories)
for k in self.np_arrays:
if k not in other.np_arrays:
equal &= False
break
if not equal:
break
equal &= all(
[np.array_equal(self.np_arrays[k], other.np_arrays[k])])
return equal
def __ne__(self, other):
return not self == other
def pretty_str(self, title=True, stats=True, info=False):
p_str = ""
p_str += "{}\n\n".format(self.info["title"]) if title else ""
if stats:
for name, val in sorted(self.stats.items()):
p_str += "{:>10}\t{:.6f}\n".format(name, val)
if info:
for name, val in sorted(self.info.items()):
p_str += "{:>10}\t{}\n".format(name, val)
return p_str
def add_np_array(self, name, array):
self.np_arrays[name] = array
def add_info(self, info_dict):
self.info.update(info_dict)
def add_stats(self, stats_dict):
self.stats.update(stats_dict)
def add_trajectory(self, name, traj):
self.trajectories[name] = traj
def merge_results(results):
if not results or not all(isinstance(r, Result) for r in results):
raise ValueError("no results to merge")
if len(results) == 1:
return results[0]
# Check if all results share keys for "stats" and "np_arrays" dicts.
dict_lists = [[r.np_arrays for r in results], [r.stats for r in results]]
for dicts in dict_lists:
if not all(a.keys() == b.keys() for a, b in zip(dicts, dicts[1:])):
raise ResultException("can't merge results with non-matching keys")
# Determine merge strategy:
strategy = "average"
length_lists = [[a.size for a in r.np_arrays.values()] for r in results]
if not all(a == b for a, b in zip(length_lists, length_lists[1:])):
logger.warning("Appending raw value arrays due to different lengths.")
strategy = "append"
else:
logger.info("Averaging raw values of input results in merged result.")
merged_result = copy.deepcopy(results[0])
logger.warning("Using info dict of first result.")
for result in results[1:]:
merged_result.stats = {
key: ((merged_result.stats[key] + result.stats[key]) / 2)
for key in merged_result.stats
}
for key, array in merged_result.np_arrays.items():
if strategy == "average":
merged_result.np_arrays[key] = np.mean(
(array, result.np_arrays[key]), axis=0)
elif strategy == "append":
merged_result.np_arrays[key] = np.append(
array, result.np_arrays[key])
return merged_result
| [
"logging.getLogger",
"numpy.mean",
"numpy.append",
"numpy.array_equal",
"copy.deepcopy"
] | [((805, 832), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (822, 832), False, 'import logging\n'), ((3415, 3440), 'copy.deepcopy', 'copy.deepcopy', (['results[0]'], {}), '(results[0])\n', (3428, 3440), False, 'import copy\n'), ((3826, 3873), 'numpy.mean', 'np.mean', (['(array, result.np_arrays[key])'], {'axis': '(0)'}), '((array, result.np_arrays[key]), axis=0)\n', (3833, 3873), True, 'import numpy as np\n'), ((1563, 1616), 'numpy.array_equal', 'np.array_equal', (['self.np_arrays[k]', 'other.np_arrays[k]'], {}), '(self.np_arrays[k], other.np_arrays[k])\n', (1577, 1616), True, 'import numpy as np\n'), ((3981, 4020), 'numpy.append', 'np.append', (['array', 'result.np_arrays[key]'], {}), '(array, result.np_arrays[key])\n', (3990, 4020), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import tensorflow as tf
import numpy as np
import os
import math
import foolbox
import scipy
import matplotlib.pyplot as plt
from PIL import Image
#Utilizes the FoolBox Python library (https://github.com/bethgelab/foolbox) to implement a variety
#of adversarial attacks against deep-learning models implimented in TensorFlow's Core API
class parent_attack:
def __init__(self, attack_dic,
criterion=foolbox.criteria.Misclassification()):
self.model_prediction_function = attack_dic['model_prediction_function']
self.model_weights = attack_dic['model_weights']
self.var_list = attack_dic['var_list']
self.weights_dic = attack_dic['weights_dic']
self.biases_dic = attack_dic['biases_dic']
self.input_data = attack_dic['input_data']
self.input_labels = attack_dic['input_labels']
self.input_placeholder = attack_dic['input_placeholder']
self.dropout_rate_placeholder = attack_dic['dropout_rate_placeholder']
self.output_directory = attack_dic['output_directory']
self.num_attack_examples = attack_dic['num_attack_examples']
self.dynamic_dic = attack_dic['dynamic_dic'] #Determines if e.g. a network section is ablated, or noise is added to the logits
self.batch_size = attack_dic['batch_size']
self.save_images = attack_dic['save_images']
self.estimate_gradients = attack_dic['estimate_gradients']
self.adver_model = attack_dic['adver_model']
self.adver_checkpoint = attack_dic['adver_checkpoint']
self.criterion = criterion #note by default this is simply foolbox's Misclassification criterion
#Define the class attribute, attack_method, to be the Blended Uniform Noise attack by default
attack_method = foolbox.attacks.BlendedUniformNoiseAttack
foolbox_distance_metric = foolbox.distances.MeanSquaredDistance
attack_type_dir = 'Parent_*_not_advised_*_'
def evaluate_resistance(self):
if self.adver_model == None:
logits, _ = self.model_prediction_function(self.input_placeholder, self.dropout_rate_placeholder, self.weights_dic, self.biases_dic, self.dynamic_dic)
saver = tf.train.Saver(self.var_list) #Define saver object for use later when loading the model weights
else:
saver = tf.train.Saver()
self.mk_dir()
with tf.Session() as session:
#Define the foolbox model
if self.adver_model == None:
print("\nEvaluating a non-adversarially trained model")
saver.restore(session, self.model_weights) #Note when restoring weights its important not to run init on the same
#variables, as this will over-write the learned weights with randomly initialized ones
fmodel = foolbox.models.TensorFlowModel(self.input_placeholder, logits, (0,1))
else:
print("\nEvaluating an adversarially trained model")
saver.restore(session, self.adver_checkpoint)
fmodel = foolbox.models.TensorFlowModel(self.input_placeholder, self.adver_model.pre_softmax, (0,1))
#Wrap the model to enable estimated gradients if desired
if self.estimate_gradients == True:
print("\nUsing a model with *estimated* gradients.")
estimator = foolbox.gradient_estimators.CoordinateWiseGradientEstimator(epsilon=0.01)
fmodel = foolbox.models.ModelWithEstimatedGradients(fmodel, gradient_estimator=estimator)
#The default CoordinateWiseGradientEstimator estimator is the same used in the Schott et al, 2019 ABS paper from ICLR
print("\nPerforming " + self.attack_type_dir + " attack")
print("Evaluating " + str(self.num_attack_examples) + " adversarial example(s)")
#Arrays for storing results of the evaluation
adversary_found = np.zeros([self.num_attack_examples]) #array of booleans that indicates if an adversary was found for a particular image
adversary_distance = np.zeros([self.num_attack_examples])
adversaries_array = np.zeros(np.concatenate(([self.num_attack_examples], self.input_data.shape[1:])))
adversary_labels = []
self.attack_specification(fmodel)
for batch_iter in range(math.ceil(self.num_attack_examples/self.batch_size)):
execution_batch_data = self.input_data[batch_iter*self.batch_size:min((batch_iter+1)*self.batch_size, self.num_attack_examples)]
execution_batch_labels = np.argmax(self.input_labels[batch_iter*self.batch_size:min((batch_iter+1)*self.batch_size, self.num_attack_examples)], axis=1)
#Carry out the attack
adversarial_images, batch_adversary_labels = self.create_adversarial(execution_batch_data, execution_batch_labels)
adversary_labels.extend(batch_adversary_labels)
#Process results of the batched attack
for example_iter in range(execution_batch_data.shape[0]):
if np.any(adversarial_images[example_iter] == None) or np.all(np.isnan(adversarial_images[example_iter])):
print("\nNo adversarial image found - attack returned None or array of NaNs\n")
#As in Schott, 2019 et al, the distance of an unsuccessful attack is recorded as infinity
adversary_distance[batch_iter*self.batch_size + example_iter] = np.inf
else:
adversary_found, adversary_distance, adversaries_array = self.store_data(adversary_found, adversary_distance, adversaries_array,
execution_batch_data[example_iter], execution_batch_labels[example_iter], adversarial_images[example_iter], batch_iter*self.batch_size + example_iter, fmodel)
adversary_labels = np.asarray(adversary_labels)
return adversary_found, adversary_distance, adversaries_array, adversary_labels
def attack_specification(self, fmodel):
self.attack_fmodel = self.attack_method(model=fmodel, criterion=self.criterion, distance=self.foolbox_distance_metric)
#Make the attack directory for storing results
def mk_dir(self):
if os.path.exists('adversarial_images/' + self.output_directory + '/' + self.attack_type_dir + '/') == 0:
try:
os.mkdir('adversarial_images/' + self.output_directory + '/')
except OSError:
pass
try:
os.mkdir('adversarial_images/' + self.output_directory + '/' + self.attack_type_dir + '/')
except OSError:
pass
def create_adversarial(self, execution_data, execution_label):
adversarials = self.attack_fmodel(execution_data, execution_label, unpack=False)
adversary_labels = np.asarray([a.adversarial_class for a in adversarials])
adversarial_images = np.asarray([a.perturbed for a in adversarials])
return adversarial_images, adversary_labels
def store_data(self, adversary_found, adversary_distance, adversaries_array, execution_data, execution_label, adversarial_image, results_iter, fmodel):
adversaries_array[results_iter] = adversarial_image
#Note only 10 adversarial images are saved for a given attack to reduce memory issues
if self.save_images == True and (results_iter<10):
if adversarial_image.shape[2] == 3:
image_to_png = adversarial_image
elif adversarial_image.shape[2] == 1:
image_to_png = np.squeeze(adversarial_image, axis=2) #Remove last dimension if saving to greyscale
plt.imsave('adversarial_images/' + self.output_directory + '/' +
self.attack_type_dir + '/AttackNum' + str(results_iter) + '_Predicted' + str(np.argmax(fmodel.forward(adversarial_image[None, :, :, :]))) +
'_GroundTruth' + str(execution_label) + '.png', image_to_png)
print("The classification label following attack is " + str(np.argmax(fmodel.forward(adversarial_image[None]))) + " from an original classification of " + str(execution_label))
distance, distance_name = self.distance_metric(execution_data.flatten(), adversarial_image.flatten())
print("The " + distance_name + " distance of the adversary is " + str(distance))
adversary_found[results_iter] = 1
adversary_distance[results_iter] = distance
return adversary_found, adversary_distance, adversaries_array
def distance_metric(self, vector1, vector2):
distance = scipy.spatial.distance.euclidean(vector1, vector2)
distance_name = 'Euclidean (L-2)'
return distance, distance_name
class check_stochasticity(parent_attack):
#Performs checks to ensure there are no unintended stochastic elements (e.g. due to numerical issues) in a models predictions in foolbox
def perform_check(self):
logits, _ = self.model_prediction_function(self.input_placeholder, self.dropout_rate_placeholder, self.weights_dic, self.biases_dic, self.dynamic_dic)
saver = tf.train.Saver(self.var_list)
with tf.Session() as session:
saver.restore(session, self.model_weights)
fmodel = foolbox.models.TensorFlowModel(self.input_placeholder, logits, (0,1))
print('Checking the models performance on multiple runs of the same images')
for example_iter in range(self.num_attack_examples):
execution_data = self.input_data[example_iter, :, :, :]
logits_list = []
labels_list = []
#Check the same image with multiple runs
for ii in range(10):
#Return the logits and label of the model
predicted_logits = fmodel.forward(execution_data[None,:,:,:])
logits_list.extend(predicted_logits)
#Check every element is equivalent to the most recent prediction
assert np.all(logits_list == np.asarray(predicted_logits)), "***Some of the logits are changing stochastically***"
print("No stochastic elements identified")
class transfer_attack_L2(parent_attack):
#Overwrite parent constructor for two additional attributes : starting_adversaries, epsilon_step_size, and max_iterations
def __init__(self, attack_dic,
starting_adversaries,
epsilon_step_size=0.01,
max_iterations=1000):
parent_attack.__init__(self, attack_dic)
self.starting_adversaries = starting_adversaries
self.epsilon_step_size = epsilon_step_size
self.max_iterations = max_iterations
attack_type_dir = 'Transfer_L2'
#Overwrite evaluate_resistance method with one that finds minimal transfer-attack images
def evaluate_resistance(self):
if self.adver_model == None:
logits, _ = self.model_prediction_function(self.input_placeholder, self.dropout_rate_placeholder, self.weights_dic, self.biases_dic, self.dynamic_dic)
saver = tf.train.Saver(self.var_list) #Define saver object for use later when loading the model weights
else:
saver = tf.train.Saver()
self.mk_dir()
with tf.Session() as session:
#Define the foolbox model
if self.adver_model == None:
print("\nEvaluating a non-adversarially trained model")
saver.restore(session, self.model_weights) #Note when restoring weights its important not to run init on the same
#variables, as this will over-write the learned weights with randomly initialized ones
fmodel = foolbox.models.TensorFlowModel(self.input_placeholder, logits, (0,1))
else:
print("\nEvaluating an adversarially trained model")
saver.restore(session, self.adver_checkpoint)
fmodel = foolbox.models.TensorFlowModel(self.input_placeholder, self.adver_model.pre_softmax, (0,1))
print("\nPerforming a Transfer attack")
print("Evaluating " + str(self.num_attack_examples) + " adversarial example(s)")
#Arrays for storing results of the evaluation
adversary_distance = np.zeros([4, self.num_attack_examples])
for example_iter in range(self.num_attack_examples):
print("Transfer attack number " + str(example_iter))
#Iterate through the four different starting points for generating adversaries (two different gradient
# based attacks for each of the two main architecture types --> binding or not binding); the minimally-perturbed attack will be returned
for base_method_iter in range(4):
adversary_distance = self.iterative_perturbation(fmodel, adversary_distance, example_iter, base_method_iter, unperturbed_image=self.input_data[example_iter],
ground_truth_label=self.input_labels[example_iter], starting_adversary=self.starting_adversaries[base_method_iter, example_iter])
print("Method " + str(base_method_iter) + " distance is " + str(adversary_distance[base_method_iter, example_iter]))
#Of all images genereated from the base attack types, select the minimally perturbed image for each example
adversary_distance = adversary_distance.min(axis=0)
return adversary_distance
def iterative_perturbation(self, fmodel, adversary_distance, example_iter, base_method_iter, unperturbed_image, ground_truth_label, starting_adversary):
epsilon = 0.0
current_iteration = 1
#First check if the base attack method failed on the surrogate model
#If so, see if the target model correctly classifies it, in which case it is a failed attack, or otherwise it is a successful attack with distance 0
if np.any(starting_adversary == None) or np.all(np.isnan(starting_adversary)):
if (np.argmax(fmodel.forward(unperturbed_image[None])) == np.argmax(ground_truth_label)):
print("Base attack failed, and target model correctly classified image.")
adversary_distance[base_method_iter, example_iter] = np.inf
else:
print("Base attack failed, but target model misclassified image.")
adversary_distance[base_method_iter, example_iter] = 0
else:
#Begin with an *unperturbed* image, as this may already be enough to fool the target model
transfer_perturbed = unperturbed_image
print("Original classification is " + str(np.argmax(fmodel.forward(transfer_perturbed[None]))))
print("Ground truth label is " + str(np.argmax(ground_truth_label)))
# Binary search for transfer attack as used in Schott et al; based on code provided by <NAME> (Bethge Lab)
direction = starting_adversary - unperturbed_image
bad = 0
good = None
epsilon_binary = 1
k = 10
#Rapidly identify starting point for binary search
for _ in range(k):
transfer_perturbed = unperturbed_image + epsilon_binary * direction
transfer_perturbed = np.clip(transfer_perturbed, 0, 1)
print("Epsilon is " + str(epsilon_binary))
if (np.argmax(fmodel.forward(transfer_perturbed[None])) != np.argmax(ground_truth_label)):
good = epsilon_binary
break
else:
bad = epsilon_binary
epsilon_binary *= 2
print("After exponential binary search, the classification is " + str(np.argmax(fmodel.forward(transfer_perturbed[None]))))
if np.argmax(fmodel.forward(transfer_perturbed[None])) == np.argmax(ground_truth_label):
print("Exponential search failed")
adversary_distance[base_method_iter, example_iter] = np.inf
print("The distance is " + str(adversary_distance[base_method_iter, example_iter]))
else:
for _ in range(k):
epsilon_binary = (good + bad) / 2.
transfer_perturbed = unperturbed_image + epsilon_binary * direction
transfer_perturbed = np.clip(transfer_perturbed, 0, 1)
if (np.argmax(fmodel.forward(transfer_perturbed[None])) != np.argmax(ground_truth_label)):
good = epsilon_binary
else:
bad = epsilon_binary
adversary_distance[base_method_iter, example_iter], _ = self.distance_metric(unperturbed_image.flatten(), transfer_perturbed.flatten())
print("After standard binary search, the classification is " + str(np.argmax(fmodel.forward(transfer_perturbed[None]))))
print("The distance is " + str(adversary_distance[base_method_iter, example_iter]))
return adversary_distance
#*** L-0 Distance Attacks ***
class pointwise_attack_L0(parent_attack):
attack_method = foolbox.attacks.PointwiseAttack
foolbox_distance_metric = foolbox.distances.L0 #This is the distance metric used during optimization by FoolBox attacks
attack_type_dir = 'Pointwise_L0'
#This is the distance metric used to evalute the final distances of the returned images from the original
def distance_metric(self, vector1, vector2):
distance = scipy.spatial.distance.hamming(vector1, vector2)*len(vector1)
distance_name = 'Hamming (L-0)'
return distance, distance_name
class salt_pepper_attack(pointwise_attack_L0):
#Inherit the attributes of the pointwise_attack_L0 class, then overwrite the attack method
attack_method = foolbox.attacks.SaltAndPepperNoiseAttack
attack_type_dir = 'Salt_and_Pepper'
#*** L-2 Distance Attacks ***
class blended_noise_attack(parent_attack):
attack_type_dir = 'Blended_noise'
#As the parent attack already uses the blended uniform noise attack by default, no changes are necessary
class gaussian_noise_attack(parent_attack):
attack_method = foolbox.attacks.AdditiveGaussianNoiseAttack
attack_type_dir = 'Gaussian_noise'
class pointwise_attack_L2(parent_attack):
#Note this version of the point-wise attack inherits the L2 distance metric from the parent class
attack_method = foolbox.attacks.PointwiseAttack
attack_type_dir = 'Pointwise_L2'
class FGM_attack(parent_attack):
attack_method = foolbox.attacks.GradientSignAttack
attack_type_dir = 'FGM'
# attack_method = foolbox.attacks.GradientAttack
# attack_type_dir = 'FGM'
class BIM_L2_attack(parent_attack):
attack_method = foolbox.attacks.L2BasicIterativeAttack
attack_type_dir = 'BIM_L2'
class DeepFool_L2_attack(parent_attack):
attack_method = foolbox.attacks.DeepFoolL2Attack
attack_type_dir = 'DeepFool_L2'
class boundary_attack(parent_attack):
#Overwrite parent constructor for two additional attributes : num_iterations and log_every_n_steps
def __init__(self, attack_dic,
criterion=foolbox.criteria.Misclassification(),
num_iterations=50,
log_every_n_steps=50):
parent_attack.__init__(self, attack_dic,
criterion)
self.num_iterations = num_iterations
self.log_every_n_steps = log_every_n_steps
attack_method = foolbox.attacks.BoundaryAttack
attack_type_dir = 'Boundary'
#Overwrite create adversarial method, as the boundary attack takes a specified number of iterations
def create_adversarial(self, execution_data, execution_label):
adversarials = self.attack_fmodel(execution_data, execution_label, iterations=self.num_iterations,
log_every_n_steps=self.log_every_n_steps, verbose=False, unpack=False)
adversary_labels = np.asarray([a.adversarial_class for a in adversarials])
adversarial_images = np.asarray([a.perturbed for a in adversarials])
return adversarial_images, adversary_labels
#*** L-Inf Distance Attacks ***
class transfer_attack_LInf(transfer_attack_L2):
attack_type_dir = 'Transfer_LInf'
def distance_metric(self, vector1, vector2):
distance = scipy.spatial.distance.chebyshev(vector1, vector2)
distance_name = 'Chebyshev (L-Inf)'
return distance, distance_name
class FGSM_attack(parent_attack):
attack_method = foolbox.attacks.GradientSignAttack
attack_type_dir = 'FGSM'
foolbox_distance_metric = foolbox.distances.Linfinity
def distance_metric(self, vector1, vector2):
distance = scipy.spatial.distance.chebyshev(vector1, vector2)
distance_name = 'Chebyshev (L-Inf)'
return distance, distance_name
class BIM_Linfinity_attack(FGSM_attack):
attack_method = foolbox.attacks.LinfinityBasicIterativeAttack
attack_type_dir = 'BIM_LInf'
class DeepFool_LInf_attack(FGSM_attack):
attack_method = foolbox.attacks.DeepFoolLinfinityAttack
attack_type_dir = 'DeepFool_LInf'
class MIM_attack(FGSM_attack):
attack_method = foolbox.attacks.MomentumIterativeAttack
attack_type_dir = 'MIM'
| [
"numpy.clip",
"foolbox.criteria.Misclassification",
"scipy.spatial.distance.hamming",
"foolbox.models.ModelWithEstimatedGradients",
"os.path.exists",
"tensorflow.Session",
"numpy.asarray",
"os.mkdir",
"numpy.concatenate",
"numpy.argmax",
"numpy.any",
"numpy.squeeze",
"numpy.isnan",
"scipy.... | [((450, 486), 'foolbox.criteria.Misclassification', 'foolbox.criteria.Misclassification', ([], {}), '()\n', (484, 486), False, 'import foolbox\n'), ((7022, 7077), 'numpy.asarray', 'np.asarray', (['[a.adversarial_class for a in adversarials]'], {}), '([a.adversarial_class for a in adversarials])\n', (7032, 7077), True, 'import numpy as np\n'), ((7107, 7154), 'numpy.asarray', 'np.asarray', (['[a.perturbed for a in adversarials]'], {}), '([a.perturbed for a in adversarials])\n', (7117, 7154), True, 'import numpy as np\n'), ((8783, 8833), 'scipy.spatial.distance.euclidean', 'scipy.spatial.distance.euclidean', (['vector1', 'vector2'], {}), '(vector1, vector2)\n', (8815, 8833), False, 'import scipy\n'), ((9305, 9334), 'tensorflow.train.Saver', 'tf.train.Saver', (['self.var_list'], {}), '(self.var_list)\n', (9319, 9334), True, 'import tensorflow as tf\n'), ((19538, 19574), 'foolbox.criteria.Misclassification', 'foolbox.criteria.Misclassification', ([], {}), '()\n', (19572, 19574), False, 'import foolbox\n'), ((20328, 20383), 'numpy.asarray', 'np.asarray', (['[a.adversarial_class for a in adversarials]'], {}), '([a.adversarial_class for a in adversarials])\n', (20338, 20383), True, 'import numpy as np\n'), ((20413, 20460), 'numpy.asarray', 'np.asarray', (['[a.perturbed for a in adversarials]'], {}), '([a.perturbed for a in adversarials])\n', (20423, 20460), True, 'import numpy as np\n'), ((20704, 20754), 'scipy.spatial.distance.chebyshev', 'scipy.spatial.distance.chebyshev', (['vector1', 'vector2'], {}), '(vector1, vector2)\n', (20736, 20754), False, 'import scipy\n'), ((21084, 21134), 'scipy.spatial.distance.chebyshev', 'scipy.spatial.distance.chebyshev', (['vector1', 'vector2'], {}), '(vector1, vector2)\n', (21116, 21134), False, 'import scipy\n'), ((2294, 2323), 'tensorflow.train.Saver', 'tf.train.Saver', (['self.var_list'], {}), '(self.var_list)\n', (2308, 2323), True, 'import tensorflow as tf\n'), ((2428, 2444), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2442, 2444), True, 'import tensorflow as tf\n'), ((2482, 2494), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2492, 2494), True, 'import tensorflow as tf\n'), ((4042, 4078), 'numpy.zeros', 'np.zeros', (['[self.num_attack_examples]'], {}), '([self.num_attack_examples])\n', (4050, 4078), True, 'import numpy as np\n'), ((4195, 4231), 'numpy.zeros', 'np.zeros', (['[self.num_attack_examples]'], {}), '([self.num_attack_examples])\n', (4203, 4231), True, 'import numpy as np\n'), ((6037, 6065), 'numpy.asarray', 'np.asarray', (['adversary_labels'], {}), '(adversary_labels)\n', (6047, 6065), True, 'import numpy as np\n'), ((6416, 6517), 'os.path.exists', 'os.path.exists', (["('adversarial_images/' + self.output_directory + '/' + self.attack_type_dir +\n '/')"], {}), "('adversarial_images/' + self.output_directory + '/' + self.\n attack_type_dir + '/')\n", (6430, 6517), False, 'import os\n'), ((9349, 9361), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (9359, 9361), True, 'import tensorflow as tf\n'), ((9451, 9521), 'foolbox.models.TensorFlowModel', 'foolbox.models.TensorFlowModel', (['self.input_placeholder', 'logits', '(0, 1)'], {}), '(self.input_placeholder, logits, (0, 1))\n', (9481, 9521), False, 'import foolbox\n'), ((11365, 11394), 'tensorflow.train.Saver', 'tf.train.Saver', (['self.var_list'], {}), '(self.var_list)\n', (11379, 11394), True, 'import tensorflow as tf\n'), ((11499, 11515), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (11513, 11515), True, 'import tensorflow as tf\n'), ((11553, 11565), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (11563, 11565), True, 'import tensorflow as tf\n'), ((12569, 12608), 'numpy.zeros', 'np.zeros', (['[4, self.num_attack_examples]'], {}), '([4, self.num_attack_examples])\n', (12577, 12608), True, 'import numpy as np\n'), ((14253, 14287), 'numpy.any', 'np.any', (['(starting_adversary == None)'], {}), '(starting_adversary == None)\n', (14259, 14287), True, 'import numpy as np\n'), ((17879, 17927), 'scipy.spatial.distance.hamming', 'scipy.spatial.distance.hamming', (['vector1', 'vector2'], {}), '(vector1, vector2)\n', (17909, 17927), False, 'import scipy\n'), ((2922, 2992), 'foolbox.models.TensorFlowModel', 'foolbox.models.TensorFlowModel', (['self.input_placeholder', 'logits', '(0, 1)'], {}), '(self.input_placeholder, logits, (0, 1))\n', (2952, 2992), False, 'import foolbox\n'), ((3168, 3265), 'foolbox.models.TensorFlowModel', 'foolbox.models.TensorFlowModel', (['self.input_placeholder', 'self.adver_model.pre_softmax', '(0, 1)'], {}), '(self.input_placeholder, self.adver_model.\n pre_softmax, (0, 1))\n', (3198, 3265), False, 'import foolbox\n'), ((3475, 3548), 'foolbox.gradient_estimators.CoordinateWiseGradientEstimator', 'foolbox.gradient_estimators.CoordinateWiseGradientEstimator', ([], {'epsilon': '(0.01)'}), '(epsilon=0.01)\n', (3534, 3548), False, 'import foolbox\n'), ((3574, 3659), 'foolbox.models.ModelWithEstimatedGradients', 'foolbox.models.ModelWithEstimatedGradients', (['fmodel'], {'gradient_estimator': 'estimator'}), '(fmodel, gradient_estimator=estimator\n )\n', (3616, 3659), False, 'import foolbox\n'), ((4274, 4345), 'numpy.concatenate', 'np.concatenate', (['([self.num_attack_examples], self.input_data.shape[1:])'], {}), '(([self.num_attack_examples], self.input_data.shape[1:]))\n', (4288, 4345), True, 'import numpy as np\n'), ((4465, 4518), 'math.ceil', 'math.ceil', (['(self.num_attack_examples / self.batch_size)'], {}), '(self.num_attack_examples / self.batch_size)\n', (4474, 4518), False, 'import math\n'), ((6552, 6613), 'os.mkdir', 'os.mkdir', (["('adversarial_images/' + self.output_directory + '/')"], {}), "('adversarial_images/' + self.output_directory + '/')\n", (6560, 6613), False, 'import os\n'), ((6696, 6791), 'os.mkdir', 'os.mkdir', (["('adversarial_images/' + self.output_directory + '/' + self.attack_type_dir +\n '/')"], {}), "('adversarial_images/' + self.output_directory + '/' + self.\n attack_type_dir + '/')\n", (6704, 6791), False, 'import os\n'), ((11993, 12063), 'foolbox.models.TensorFlowModel', 'foolbox.models.TensorFlowModel', (['self.input_placeholder', 'logits', '(0, 1)'], {}), '(self.input_placeholder, logits, (0, 1))\n', (12023, 12063), False, 'import foolbox\n'), ((12239, 12336), 'foolbox.models.TensorFlowModel', 'foolbox.models.TensorFlowModel', (['self.input_placeholder', 'self.adver_model.pre_softmax', '(0, 1)'], {}), '(self.input_placeholder, self.adver_model.\n pre_softmax, (0, 1))\n', (12269, 12336), False, 'import foolbox\n'), ((14298, 14326), 'numpy.isnan', 'np.isnan', (['starting_adversary'], {}), '(starting_adversary)\n', (14306, 14326), True, 'import numpy as np\n'), ((14399, 14428), 'numpy.argmax', 'np.argmax', (['ground_truth_label'], {}), '(ground_truth_label)\n', (14408, 14428), True, 'import numpy as np\n'), ((15634, 15667), 'numpy.clip', 'np.clip', (['transfer_perturbed', '(0)', '(1)'], {}), '(transfer_perturbed, 0, 1)\n', (15641, 15667), True, 'import numpy as np\n'), ((16228, 16257), 'numpy.argmax', 'np.argmax', (['ground_truth_label'], {}), '(ground_truth_label)\n', (16237, 16257), True, 'import numpy as np\n'), ((7766, 7803), 'numpy.squeeze', 'np.squeeze', (['adversarial_image'], {'axis': '(2)'}), '(adversarial_image, axis=2)\n', (7776, 7803), True, 'import numpy as np\n'), ((15819, 15848), 'numpy.argmax', 'np.argmax', (['ground_truth_label'], {}), '(ground_truth_label)\n', (15828, 15848), True, 'import numpy as np\n'), ((16724, 16757), 'numpy.clip', 'np.clip', (['transfer_perturbed', '(0)', '(1)'], {}), '(transfer_perturbed, 0, 1)\n', (16731, 16757), True, 'import numpy as np\n'), ((5221, 5269), 'numpy.any', 'np.any', (['(adversarial_images[example_iter] == None)'], {}), '(adversarial_images[example_iter] == None)\n', (5227, 5269), True, 'import numpy as np\n'), ((10261, 10289), 'numpy.asarray', 'np.asarray', (['predicted_logits'], {}), '(predicted_logits)\n', (10271, 10289), True, 'import numpy as np\n'), ((15108, 15137), 'numpy.argmax', 'np.argmax', (['ground_truth_label'], {}), '(ground_truth_label)\n', (15117, 15137), True, 'import numpy as np\n'), ((16838, 16867), 'numpy.argmax', 'np.argmax', (['ground_truth_label'], {}), '(ground_truth_label)\n', (16847, 16867), True, 'import numpy as np\n'), ((5280, 5322), 'numpy.isnan', 'np.isnan', (['adversarial_images[example_iter]'], {}), '(adversarial_images[example_iter])\n', (5288, 5322), True, 'import numpy as np\n')] |
import glob
import os
import sys
from argparse import ArgumentParser
from datetime import datetime
import numpy as np
import torch
import torch.utils.data as Data
from Functions import generate_grid, Dataset_epoch, Predict_dataset, transform_unit_flow_to_flow_cuda, \
generate_grid_unit
from miccai2021_model import Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1, \
Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl3, \
SpatialTransform_unit, SpatialTransformNearest_unit, smoothloss, \
neg_Jdet_loss, NCC, multi_resolution_NCC
parser = ArgumentParser()
parser.add_argument("--lr", type=float,
dest="lr", default=1e-4, help="learning rate")
parser.add_argument("--iteration_lvl1", type=int,
dest="iteration_lvl1", default=30001,
help="number of lvl1 iterations")
parser.add_argument("--iteration_lvl2", type=int,
dest="iteration_lvl2", default=30001,
help="number of lvl2 iterations")
parser.add_argument("--iteration_lvl3", type=int,
dest="iteration_lvl3", default=60001,
help="number of lvl3 iterations")
parser.add_argument("--antifold", type=float,
dest="antifold", default=0.,
help="Anti-fold loss: suggested range 1 to 10000")
parser.add_argument("--checkpoint", type=int,
dest="checkpoint", default=5000,
help="frequency of saving models")
parser.add_argument("--start_channel", type=int,
dest="start_channel", default=7, # default:8, 7 for stage
help="number of start channels")
parser.add_argument("--datapath", type=str,
dest="datapath",
default='../Dataset/Brain_dataset/OASIS/crop_min_max/norm',
help="data path for training images")
parser.add_argument("--freeze_step", type=int,
dest="freeze_step", default=3000,
help="Number of step to freeze the previous level")
opt = parser.parse_args()
lr = opt.lr
start_channel = opt.start_channel
antifold = opt.antifold
n_checkpoint = opt.checkpoint
datapath = opt.datapath
freeze_step = opt.freeze_step
iteration_lvl1 = opt.iteration_lvl1
iteration_lvl2 = opt.iteration_lvl2
iteration_lvl3 = opt.iteration_lvl3
model_name = "LDR_OASIS_NCC_unit_disp_add_fea7_reg01_10_testing_"
def train_lvl1():
print("Training lvl1...")
model = Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1(2, 3, start_channel, is_train=True,
imgshape=imgshape_4,
range_flow=range_flow).cuda()
loss_similarity = NCC(win=3)
loss_smooth = smoothloss
loss_Jdet = neg_Jdet_loss
transform = SpatialTransform_unit().cuda()
for param in transform.parameters():
param.requires_grad = False
param.volatile = True
# OASIS
names = sorted(glob.glob(datapath + '/*.nii'))
grid_4 = generate_grid(imgshape_4)
grid_4 = torch.from_numpy(np.reshape(grid_4, (1,) + grid_4.shape)).cuda().float()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)
model_dir = '../Model/Stage'
if not os.path.isdir(model_dir):
os.mkdir(model_dir)
lossall = np.zeros((4, iteration_lvl1 + 1))
training_generator = Data.DataLoader(Dataset_epoch(names, norm=False), batch_size=1,
shuffle=True, num_workers=2)
step = 0
load_model = False
if load_model is True:
model_path = "../Model/LDR_LPBA_NCC_lap_share_preact_1_05_3000.pth"
print("Loading weight: ", model_path)
step = 3000
model.load_state_dict(torch.load(model_path))
temp_lossall = np.load("../Model/loss_LDR_LPBA_NCC_lap_share_preact_1_05_3000.npy")
lossall[:, 0:3000] = temp_lossall[:, 0:3000]
while step <= iteration_lvl1:
for X, Y in training_generator:
X = X.cuda().float()
Y = Y.cuda().float()
reg_code = torch.rand(1, dtype=X.dtype, device=X.device).unsqueeze(dim=0)
F_X_Y, X_Y, Y_4x, F_xy, _ = model(X, Y, reg_code)
loss_multiNCC = loss_similarity(X_Y, Y_4x)
F_X_Y_norm = transform_unit_flow_to_flow_cuda(F_X_Y.permute(0, 2, 3, 4, 1).clone())
loss_Jacobian = loss_Jdet(F_X_Y_norm, grid_4)
_, _, x, y, z = F_X_Y.shape
norm_vector = torch.zeros((1, 3, 1, 1, 1), dtype=F_X_Y.dtype, device=F_X_Y.device)
norm_vector[0, 0, 0, 0, 0] = (z - 1)
norm_vector[0, 1, 0, 0, 0] = (y - 1)
norm_vector[0, 2, 0, 0, 0] = (x - 1)
loss_regulation = loss_smooth(F_X_Y * norm_vector)
smo_weight = reg_code * max_smooth
loss = loss_multiNCC + antifold * loss_Jacobian + smo_weight * loss_regulation
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
lossall[:, step] = np.array(
[loss.item(), loss_multiNCC.item(), loss_Jacobian.item(), loss_regulation.item()])
sys.stdout.write(
"\r" + 'step "{0}" -> training loss "{1:.4f}" - sim_NCC "{2:4f}" - Jdet "{3:.10f}" -smo "{4:.4f} -reg_c "{5:.4f}"'.format(
step, loss.item(), loss_multiNCC.item(), loss_Jacobian.item(), loss_regulation.item(),
reg_code[0].item()))
sys.stdout.flush()
# with lr 1e-3 + with bias
if step % n_checkpoint == 0:
modelname = model_dir + '/' + model_name + "stagelvl1_" + str(step) + '.pth'
torch.save(model.state_dict(), modelname)
np.save(model_dir + '/loss' + model_name + "stagelvl1_" + str(step) + '.npy', lossall)
step += 1
if step > iteration_lvl1:
break
print("one epoch pass")
np.save(model_dir + '/loss' + model_name + 'stagelvl1.npy', lossall)
def train_lvl2():
print("Training lvl2...")
model_lvl1 = Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1(2, 3, start_channel, is_train=True,
imgshape=imgshape_4,
range_flow=range_flow).cuda()
model_path = sorted(glob.glob("../Model/Stage/" + model_name + "stagelvl1_?????.pth"))[-1]
model_lvl1.load_state_dict(torch.load(model_path))
print("Loading weight for model_lvl1...", model_path)
# Freeze model_lvl1 weight
for param in model_lvl1.parameters():
param.requires_grad = False
model = Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2(2, 3, start_channel, is_train=True,
imgshape=imgshape_2,
range_flow=range_flow, model_lvl1=model_lvl1).cuda()
loss_similarity = multi_resolution_NCC(win=5, scale=2)
loss_smooth = smoothloss
loss_Jdet = neg_Jdet_loss
transform = SpatialTransform_unit().cuda()
for param in transform.parameters():
param.requires_grad = False
param.volatile = True
# OASIS
names = sorted(glob.glob(datapath + '/*.nii'))
grid_2 = generate_grid(imgshape_2)
grid_2 = torch.from_numpy(np.reshape(grid_2, (1,) + grid_2.shape)).cuda().float()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)
model_dir = '../Model/Stage'
if not os.path.isdir(model_dir):
os.mkdir(model_dir)
lossall = np.zeros((4, iteration_lvl2 + 1))
training_generator = Data.DataLoader(Dataset_epoch(names, norm=False), batch_size=1,
shuffle=True, num_workers=2)
step = 0
load_model = False
if load_model is True:
model_path = "../Model/LDR_LPBA_NCC_lap_share_preact_1_05_3000.pth"
print("Loading weight: ", model_path)
step = 3000
model.load_state_dict(torch.load(model_path))
temp_lossall = np.load("../Model/loss_LDR_LPBA_NCC_lap_share_preact_1_05_3000.npy")
lossall[:, 0:3000] = temp_lossall[:, 0:3000]
while step <= iteration_lvl2:
for X, Y in training_generator:
X = X.cuda().float()
Y = Y.cuda().float()
reg_code = torch.rand(1, dtype=X.dtype, device=X.device).unsqueeze(dim=0)
F_X_Y, X_Y, Y_4x, F_xy, F_xy_lvl1, _ = model(X, Y, reg_code)
loss_multiNCC = loss_similarity(X_Y, Y_4x)
F_X_Y_norm = transform_unit_flow_to_flow_cuda(F_X_Y.permute(0, 2, 3, 4, 1).clone())
loss_Jacobian = loss_Jdet(F_X_Y_norm, grid_2)
_, _, x, y, z = F_X_Y.shape
norm_vector = torch.zeros((1, 3, 1, 1, 1), dtype=F_X_Y.dtype, device=F_X_Y.device)
norm_vector[0, 0, 0, 0, 0] = (z - 1)
norm_vector[0, 1, 0, 0, 0] = (y - 1)
norm_vector[0, 2, 0, 0, 0] = (x - 1)
loss_regulation = loss_smooth(F_X_Y * norm_vector)
smo_weight = reg_code * max_smooth
loss = loss_multiNCC + antifold * loss_Jacobian + smo_weight * loss_regulation
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
lossall[:, step] = np.array(
[loss.item(), loss_multiNCC.item(), loss_Jacobian.item(), loss_regulation.item()])
sys.stdout.write(
"\r" + 'step "{0}" -> training loss "{1:.4f}" - sim_NCC "{2:4f}" - Jdet "{3:.10f}" -smo "{4:.4f} -reg_c "{5:.4f}"'.format(
step, loss.item(), loss_multiNCC.item(), loss_Jacobian.item(), loss_regulation.item(),
reg_code[0].item()))
sys.stdout.flush()
# with lr 1e-3 + with bias
if (step % n_checkpoint == 0):
modelname = model_dir + '/' + model_name + "stagelvl2_" + str(step) + '.pth'
torch.save(model.state_dict(), modelname)
np.save(model_dir + '/loss' + model_name + "stagelvl2_" + str(step) + '.npy', lossall)
if step == freeze_step:
model.unfreeze_modellvl1()
step += 1
if step > iteration_lvl2:
break
print("one epoch pass")
np.save(model_dir + '/loss' + model_name + 'stagelvl2.npy', lossall)
def train_lvl3():
print("Training lvl3...")
model_lvl1 = Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1(2, 3, start_channel, is_train=True,
imgshape=imgshape_4,
range_flow=range_flow).cuda()
model_lvl2 = Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2(2, 3, start_channel, is_train=True,
imgshape=imgshape_2,
range_flow=range_flow,
model_lvl1=model_lvl1).cuda()
model_path = sorted(glob.glob("../Model/Stage/" + model_name + "stagelvl2_?????.pth"))[-1]
model_lvl2.load_state_dict(torch.load(model_path))
print("Loading weight for model_lvl2...", model_path)
# Freeze model_lvl1 weight
for param in model_lvl2.parameters():
param.requires_grad = False
model = Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl3(2, 3, start_channel, is_train=True,
imgshape=imgshape,
range_flow=range_flow, model_lvl2=model_lvl2).cuda()
loss_similarity = multi_resolution_NCC(win=7, scale=3)
loss_smooth = smoothloss
loss_Jdet = neg_Jdet_loss
transform = SpatialTransform_unit().cuda()
transform_nearest = SpatialTransformNearest_unit().cuda()
for param in transform.parameters():
param.requires_grad = False
param.volatile = True
# OASIS
names = sorted(glob.glob(datapath + '/*.nii'))
grid = generate_grid(imgshape)
grid = torch.from_numpy(np.reshape(grid, (1,) + grid.shape)).cuda().float()
grid_unit = generate_grid_unit(imgshape)
grid_unit = torch.from_numpy(np.reshape(grid_unit, (1,) + grid_unit.shape)).cuda().float()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)
model_dir = '../Model'
if not os.path.isdir(model_dir):
os.mkdir(model_dir)
lossall = np.zeros((4, iteration_lvl3 + 1))
training_generator = Data.DataLoader(Dataset_epoch(names, norm=False), batch_size=1,
shuffle=True, num_workers=2)
step = 0
load_model = False
if load_model is True:
model_path = "../Model/LDR_LPBA_NCC_lap_share_preact_1_05_3000.pth"
print("Loading weight: ", model_path)
step = 3000
model.load_state_dict(torch.load(model_path))
temp_lossall = np.load("../Model/loss_LDR_LPBA_NCC_lap_share_preact_1_05_3000.npy")
lossall[:, 0:3000] = temp_lossall[:, 0:3000]
while step <= iteration_lvl3:
for X, Y in training_generator:
X = X.cuda().float()
Y = Y.cuda().float()
reg_code = torch.rand(1, dtype=X.dtype, device=X.device).unsqueeze(dim=0)
F_X_Y, X_Y, Y_4x, F_xy, F_xy_lvl1, F_xy_lvl2, _ = model(X, Y, reg_code)
loss_multiNCC = loss_similarity(X_Y, Y_4x)
F_X_Y_norm = transform_unit_flow_to_flow_cuda(F_X_Y.permute(0, 2, 3, 4, 1).clone())
loss_Jacobian = loss_Jdet(F_X_Y_norm, grid)
_, _, x, y, z = F_X_Y.shape
norm_vector = torch.zeros((1, 3, 1, 1, 1), dtype=F_X_Y.dtype, device=F_X_Y.device)
norm_vector[0, 0, 0, 0, 0] = (z - 1)
norm_vector[0, 1, 0, 0, 0] = (y - 1)
norm_vector[0, 2, 0, 0, 0] = (x - 1)
loss_regulation = loss_smooth(F_X_Y * norm_vector)
smo_weight = reg_code * max_smooth
loss = loss_multiNCC + antifold * loss_Jacobian + smo_weight * loss_regulation
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
lossall[:, step] = np.array(
[loss.item(), loss_multiNCC.item(), loss_Jacobian.item(), loss_regulation.item()])
sys.stdout.write(
"\r" + 'step "{0}" -> training loss "{1:.4f}" - sim_NCC "{2:4f}" - Jdet "{3:.10f}" -smo "{4:.4f} -reg_c "{5:.4f}"'.format(
step, loss.item(), loss_multiNCC.item(), loss_Jacobian.item(), loss_regulation.item(),
reg_code[0].item()))
sys.stdout.flush()
# with lr 1e-3 + with bias
if step % n_checkpoint == 0:
modelname = model_dir + '/' + model_name + "stagelvl3_" + str(step) + '.pth'
torch.save(model.state_dict(), modelname)
np.save(model_dir + '/loss' + model_name + "stagelvl3_" + str(step) + '.npy', lossall)
# Put your validation code here
# ---------------------------------------
if step == freeze_step:
model.unfreeze_modellvl2()
step += 1
if step > iteration_lvl3:
break
print("one epoch pass")
np.save(model_dir + '/loss' + model_name + 'stagelvl3.npy', lossall)
imgshape = (160, 192, 144)
imgshape_4 = (160 / 4, 192 / 4, 144 / 4)
imgshape_2 = (160 / 2, 192 / 2, 144 / 2)
range_flow = 0.4
max_smooth = 10.
start_t = datetime.now()
train_lvl1()
train_lvl2()
train_lvl3()
# time
end_t = datetime.now()
total_t = end_t - start_t
print("Time: ", total_t.total_seconds())
| [
"numpy.save",
"miccai2021_model.Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1",
"numpy.reshape",
"argparse.ArgumentParser",
"os.path.isdir",
"os.mkdir",
"miccai2021_model.NCC",
"sys.stdout.flush",
"glob.glob",
"miccai2021_model.SpatialTransformNearest_unit",
"Functions.Dataset_epoch",
... | [((627, 643), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (641, 643), False, 'from argparse import ArgumentParser\n'), ((16340, 16354), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16352, 16354), False, 'from datetime import datetime\n'), ((16409, 16423), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16421, 16423), False, 'from datetime import datetime\n'), ((2851, 2861), 'miccai2021_model.NCC', 'NCC', ([], {'win': '(3)'}), '(win=3)\n', (2854, 2861), False, 'from miccai2021_model import Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl3, SpatialTransform_unit, SpatialTransformNearest_unit, smoothloss, neg_Jdet_loss, NCC, multi_resolution_NCC\n'), ((3155, 3180), 'Functions.generate_grid', 'generate_grid', (['imgshape_4'], {}), '(imgshape_4)\n', (3168, 3180), False, 'from Functions import generate_grid, Dataset_epoch, Predict_dataset, transform_unit_flow_to_flow_cuda, generate_grid_unit\n'), ((3517, 3550), 'numpy.zeros', 'np.zeros', (['(4, iteration_lvl1 + 1)'], {}), '((4, iteration_lvl1 + 1))\n', (3525, 3550), True, 'import numpy as np\n'), ((6238, 6306), 'numpy.save', 'np.save', (["(model_dir + '/loss' + model_name + 'stagelvl1.npy')", 'lossall'], {}), "(model_dir + '/loss' + model_name + 'stagelvl1.npy', lossall)\n", (6245, 6306), True, 'import numpy as np\n'), ((7320, 7356), 'miccai2021_model.multi_resolution_NCC', 'multi_resolution_NCC', ([], {'win': '(5)', 'scale': '(2)'}), '(win=5, scale=2)\n', (7340, 7356), False, 'from miccai2021_model import Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl3, SpatialTransform_unit, SpatialTransformNearest_unit, smoothloss, neg_Jdet_loss, NCC, multi_resolution_NCC\n'), ((7650, 7675), 'Functions.generate_grid', 'generate_grid', (['imgshape_2'], {}), '(imgshape_2)\n', (7663, 7675), False, 'from Functions import generate_grid, Dataset_epoch, Predict_dataset, transform_unit_flow_to_flow_cuda, generate_grid_unit\n'), ((8012, 8045), 'numpy.zeros', 'np.zeros', (['(4, iteration_lvl2 + 1)'], {}), '((4, iteration_lvl2 + 1))\n', (8020, 8045), True, 'import numpy as np\n'), ((10826, 10894), 'numpy.save', 'np.save', (["(model_dir + '/loss' + model_name + 'stagelvl2.npy')", 'lossall'], {}), "(model_dir + '/loss' + model_name + 'stagelvl2.npy', lossall)\n", (10833, 10894), True, 'import numpy as np\n'), ((12308, 12344), 'miccai2021_model.multi_resolution_NCC', 'multi_resolution_NCC', ([], {'win': '(7)', 'scale': '(3)'}), '(win=7, scale=3)\n', (12328, 12344), False, 'from miccai2021_model import Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl3, SpatialTransform_unit, SpatialTransformNearest_unit, smoothloss, neg_Jdet_loss, NCC, multi_resolution_NCC\n'), ((12698, 12721), 'Functions.generate_grid', 'generate_grid', (['imgshape'], {}), '(imgshape)\n', (12711, 12721), False, 'from Functions import generate_grid, Dataset_epoch, Predict_dataset, transform_unit_flow_to_flow_cuda, generate_grid_unit\n'), ((12819, 12847), 'Functions.generate_grid_unit', 'generate_grid_unit', (['imgshape'], {}), '(imgshape)\n', (12837, 12847), False, 'from Functions import generate_grid, Dataset_epoch, Predict_dataset, transform_unit_flow_to_flow_cuda, generate_grid_unit\n'), ((13187, 13220), 'numpy.zeros', 'np.zeros', (['(4, iteration_lvl3 + 1)'], {}), '((4, iteration_lvl3 + 1))\n', (13195, 13220), True, 'import numpy as np\n'), ((16115, 16183), 'numpy.save', 'np.save', (["(model_dir + '/loss' + model_name + 'stagelvl3.npy')", 'lossall'], {}), "(model_dir + '/loss' + model_name + 'stagelvl3.npy', lossall)\n", (16122, 16183), True, 'import numpy as np\n'), ((3109, 3139), 'glob.glob', 'glob.glob', (["(datapath + '/*.nii')"], {}), "(datapath + '/*.nii')\n", (3118, 3139), False, 'import glob\n'), ((3448, 3472), 'os.path.isdir', 'os.path.isdir', (['model_dir'], {}), '(model_dir)\n', (3461, 3472), False, 'import os\n'), ((3482, 3501), 'os.mkdir', 'os.mkdir', (['model_dir'], {}), '(model_dir)\n', (3490, 3501), False, 'import os\n'), ((3593, 3625), 'Functions.Dataset_epoch', 'Dataset_epoch', (['names'], {'norm': '(False)'}), '(names, norm=False)\n', (3606, 3625), False, 'from Functions import generate_grid, Dataset_epoch, Predict_dataset, transform_unit_flow_to_flow_cuda, generate_grid_unit\n'), ((3993, 4061), 'numpy.load', 'np.load', (['"""../Model/loss_LDR_LPBA_NCC_lap_share_preact_1_05_3000.npy"""'], {}), "('../Model/loss_LDR_LPBA_NCC_lap_share_preact_1_05_3000.npy')\n", (4000, 4061), True, 'import numpy as np\n'), ((6790, 6812), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (6800, 6812), False, 'import torch\n'), ((7604, 7634), 'glob.glob', 'glob.glob', (["(datapath + '/*.nii')"], {}), "(datapath + '/*.nii')\n", (7613, 7634), False, 'import glob\n'), ((7943, 7967), 'os.path.isdir', 'os.path.isdir', (['model_dir'], {}), '(model_dir)\n', (7956, 7967), False, 'import os\n'), ((7977, 7996), 'os.mkdir', 'os.mkdir', (['model_dir'], {}), '(model_dir)\n', (7985, 7996), False, 'import os\n'), ((8088, 8120), 'Functions.Dataset_epoch', 'Dataset_epoch', (['names'], {'norm': '(False)'}), '(names, norm=False)\n', (8101, 8120), False, 'from Functions import generate_grid, Dataset_epoch, Predict_dataset, transform_unit_flow_to_flow_cuda, generate_grid_unit\n'), ((8488, 8556), 'numpy.load', 'np.load', (['"""../Model/loss_LDR_LPBA_NCC_lap_share_preact_1_05_3000.npy"""'], {}), "('../Model/loss_LDR_LPBA_NCC_lap_share_preact_1_05_3000.npy')\n", (8495, 8556), True, 'import numpy as np\n'), ((11780, 11802), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (11790, 11802), False, 'import torch\n'), ((12654, 12684), 'glob.glob', 'glob.glob', (["(datapath + '/*.nii')"], {}), "(datapath + '/*.nii')\n", (12663, 12684), False, 'import glob\n'), ((13118, 13142), 'os.path.isdir', 'os.path.isdir', (['model_dir'], {}), '(model_dir)\n', (13131, 13142), False, 'import os\n'), ((13152, 13171), 'os.mkdir', 'os.mkdir', (['model_dir'], {}), '(model_dir)\n', (13160, 13171), False, 'import os\n'), ((13263, 13295), 'Functions.Dataset_epoch', 'Dataset_epoch', (['names'], {'norm': '(False)'}), '(names, norm=False)\n', (13276, 13295), False, 'from Functions import generate_grid, Dataset_epoch, Predict_dataset, transform_unit_flow_to_flow_cuda, generate_grid_unit\n'), ((13663, 13731), 'numpy.load', 'np.load', (['"""../Model/loss_LDR_LPBA_NCC_lap_share_preact_1_05_3000.npy"""'], {}), "('../Model/loss_LDR_LPBA_NCC_lap_share_preact_1_05_3000.npy')\n", (13670, 13731), True, 'import numpy as np\n'), ((2549, 2688), 'miccai2021_model.Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1', 'Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1', (['(2)', '(3)', 'start_channel'], {'is_train': '(True)', 'imgshape': 'imgshape_4', 'range_flow': 'range_flow'}), '(2, 3, start_channel,\n is_train=True, imgshape=imgshape_4, range_flow=range_flow)\n', (2604, 2688), False, 'from miccai2021_model import Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl3, SpatialTransform_unit, SpatialTransformNearest_unit, smoothloss, neg_Jdet_loss, NCC, multi_resolution_NCC\n'), ((2938, 2961), 'miccai2021_model.SpatialTransform_unit', 'SpatialTransform_unit', ([], {}), '()\n', (2959, 2961), False, 'from miccai2021_model import Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl3, SpatialTransform_unit, SpatialTransformNearest_unit, smoothloss, neg_Jdet_loss, NCC, multi_resolution_NCC\n'), ((3946, 3968), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (3956, 3968), False, 'import torch\n'), ((4685, 4753), 'torch.zeros', 'torch.zeros', (['(1, 3, 1, 1, 1)'], {'dtype': 'F_X_Y.dtype', 'device': 'F_X_Y.device'}), '((1, 3, 1, 1, 1), dtype=F_X_Y.dtype, device=F_X_Y.device)\n', (4696, 4753), False, 'import torch\n'), ((5764, 5782), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5780, 5782), False, 'import sys\n'), ((6374, 6513), 'miccai2021_model.Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1', 'Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1', (['(2)', '(3)', 'start_channel'], {'is_train': '(True)', 'imgshape': 'imgshape_4', 'range_flow': 'range_flow'}), '(2, 3, start_channel,\n is_train=True, imgshape=imgshape_4, range_flow=range_flow)\n', (6429, 6513), False, 'from miccai2021_model import Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl3, SpatialTransform_unit, SpatialTransformNearest_unit, smoothloss, neg_Jdet_loss, NCC, multi_resolution_NCC\n'), ((6688, 6753), 'glob.glob', 'glob.glob', (["('../Model/Stage/' + model_name + 'stagelvl1_?????.pth')"], {}), "('../Model/Stage/' + model_name + 'stagelvl1_?????.pth')\n", (6697, 6753), False, 'import glob\n'), ((6995, 7162), 'miccai2021_model.Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2', 'Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2', (['(2)', '(3)', 'start_channel'], {'is_train': '(True)', 'imgshape': 'imgshape_2', 'range_flow': 'range_flow', 'model_lvl1': 'model_lvl1'}), '(2, 3, start_channel,\n is_train=True, imgshape=imgshape_2, range_flow=range_flow, model_lvl1=\n model_lvl1)\n', (7050, 7162), False, 'from miccai2021_model import Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl3, SpatialTransform_unit, SpatialTransformNearest_unit, smoothloss, neg_Jdet_loss, NCC, multi_resolution_NCC\n'), ((7433, 7456), 'miccai2021_model.SpatialTransform_unit', 'SpatialTransform_unit', ([], {}), '()\n', (7454, 7456), False, 'from miccai2021_model import Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl3, SpatialTransform_unit, SpatialTransformNearest_unit, smoothloss, neg_Jdet_loss, NCC, multi_resolution_NCC\n'), ((8441, 8463), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (8451, 8463), False, 'import torch\n'), ((9191, 9259), 'torch.zeros', 'torch.zeros', (['(1, 3, 1, 1, 1)'], {'dtype': 'F_X_Y.dtype', 'device': 'F_X_Y.device'}), '((1, 3, 1, 1, 1), dtype=F_X_Y.dtype, device=F_X_Y.device)\n', (9202, 9259), False, 'import torch\n'), ((10270, 10288), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10286, 10288), False, 'import sys\n'), ((10962, 11101), 'miccai2021_model.Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1', 'Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1', (['(2)', '(3)', 'start_channel'], {'is_train': '(True)', 'imgshape': 'imgshape_4', 'range_flow': 'range_flow'}), '(2, 3, start_channel,\n is_train=True, imgshape=imgshape_4, range_flow=range_flow)\n', (11017, 11101), False, 'from miccai2021_model import Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl3, SpatialTransform_unit, SpatialTransformNearest_unit, smoothloss, neg_Jdet_loss, NCC, multi_resolution_NCC\n'), ((11268, 11435), 'miccai2021_model.Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2', 'Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2', (['(2)', '(3)', 'start_channel'], {'is_train': '(True)', 'imgshape': 'imgshape_2', 'range_flow': 'range_flow', 'model_lvl1': 'model_lvl1'}), '(2, 3, start_channel,\n is_train=True, imgshape=imgshape_2, range_flow=range_flow, model_lvl1=\n model_lvl1)\n', (11323, 11435), False, 'from miccai2021_model import Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl3, SpatialTransform_unit, SpatialTransformNearest_unit, smoothloss, neg_Jdet_loss, NCC, multi_resolution_NCC\n'), ((11678, 11743), 'glob.glob', 'glob.glob', (["('../Model/Stage/' + model_name + 'stagelvl2_?????.pth')"], {}), "('../Model/Stage/' + model_name + 'stagelvl2_?????.pth')\n", (11687, 11743), False, 'import glob\n'), ((11985, 12150), 'miccai2021_model.Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl3', 'Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl3', (['(2)', '(3)', 'start_channel'], {'is_train': '(True)', 'imgshape': 'imgshape', 'range_flow': 'range_flow', 'model_lvl2': 'model_lvl2'}), '(2, 3, start_channel,\n is_train=True, imgshape=imgshape, range_flow=range_flow, model_lvl2=\n model_lvl2)\n', (12040, 12150), False, 'from miccai2021_model import Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl3, SpatialTransform_unit, SpatialTransformNearest_unit, smoothloss, neg_Jdet_loss, NCC, multi_resolution_NCC\n'), ((12421, 12444), 'miccai2021_model.SpatialTransform_unit', 'SpatialTransform_unit', ([], {}), '()\n', (12442, 12444), False, 'from miccai2021_model import Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl3, SpatialTransform_unit, SpatialTransformNearest_unit, smoothloss, neg_Jdet_loss, NCC, multi_resolution_NCC\n'), ((12476, 12506), 'miccai2021_model.SpatialTransformNearest_unit', 'SpatialTransformNearest_unit', ([], {}), '()\n', (12504, 12506), False, 'from miccai2021_model import Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl1, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl2, Miccai2021_LDR_conditional_laplacian_unit_disp_add_lvl3, SpatialTransform_unit, SpatialTransformNearest_unit, smoothloss, neg_Jdet_loss, NCC, multi_resolution_NCC\n'), ((13616, 13638), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (13626, 13638), False, 'import torch\n'), ((14375, 14443), 'torch.zeros', 'torch.zeros', (['(1, 3, 1, 1, 1)'], {'dtype': 'F_X_Y.dtype', 'device': 'F_X_Y.device'}), '((1, 3, 1, 1, 1), dtype=F_X_Y.dtype, device=F_X_Y.device)\n', (14386, 14443), False, 'import torch\n'), ((15454, 15472), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (15470, 15472), False, 'import sys\n'), ((4280, 4325), 'torch.rand', 'torch.rand', (['(1)'], {'dtype': 'X.dtype', 'device': 'X.device'}), '(1, dtype=X.dtype, device=X.device)\n', (4290, 4325), False, 'import torch\n'), ((8775, 8820), 'torch.rand', 'torch.rand', (['(1)'], {'dtype': 'X.dtype', 'device': 'X.device'}), '(1, dtype=X.dtype, device=X.device)\n', (8785, 8820), False, 'import torch\n'), ((13950, 13995), 'torch.rand', 'torch.rand', (['(1)'], {'dtype': 'X.dtype', 'device': 'X.device'}), '(1, dtype=X.dtype, device=X.device)\n', (13960, 13995), False, 'import torch\n'), ((3211, 3250), 'numpy.reshape', 'np.reshape', (['grid_4', '((1,) + grid_4.shape)'], {}), '(grid_4, (1,) + grid_4.shape)\n', (3221, 3250), True, 'import numpy as np\n'), ((7706, 7745), 'numpy.reshape', 'np.reshape', (['grid_2', '((1,) + grid_2.shape)'], {}), '(grid_2, (1,) + grid_2.shape)\n', (7716, 7745), True, 'import numpy as np\n'), ((12750, 12785), 'numpy.reshape', 'np.reshape', (['grid', '((1,) + grid.shape)'], {}), '(grid, (1,) + grid.shape)\n', (12760, 12785), True, 'import numpy as np\n'), ((12881, 12926), 'numpy.reshape', 'np.reshape', (['grid_unit', '((1,) + grid_unit.shape)'], {}), '(grid_unit, (1,) + grid_unit.shape)\n', (12891, 12926), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 9 13:55:54 2020
@author: lenovouser
"""
import matplotlib.pyplot as plt
import numpy as np
def f(x,y):
# the height function
return (1 - x / 2 + x**5 + y**3) * np.exp(-x**2 -y**2)
n = 256
x = np.linspace(-3, 3, n)
y = np.linspace(-3, 3, n)
X,Y = np.meshgrid(x, y)
# use plt.contourf to filling contours
# X, Y and value for (X,Y) point
F=plt.contourf(X, Y, f(X, Y), 10, alpha=.75, cmap=plt.cm.hot)
# use plt.contour to add contour lines
C = plt.contour(X, Y, f(X, Y), 20, colors='r')
plt.colorbar(F)
# adding label
plt.clabel(C, inline=True, fontsize=10)
plt.xticks(())
plt.yticks(())
plt.show() | [
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.colorbar",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.clabel",
"numpy.meshgrid",
"matplotlib.pyplot.show"
] | [((252, 273), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', 'n'], {}), '(-3, 3, n)\n', (263, 273), True, 'import numpy as np\n'), ((278, 299), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', 'n'], {}), '(-3, 3, n)\n', (289, 299), True, 'import numpy as np\n'), ((306, 323), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (317, 323), True, 'import numpy as np\n'), ((546, 561), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['F'], {}), '(F)\n', (558, 561), True, 'import matplotlib.pyplot as plt\n'), ((577, 616), 'matplotlib.pyplot.clabel', 'plt.clabel', (['C'], {'inline': '(True)', 'fontsize': '(10)'}), '(C, inline=True, fontsize=10)\n', (587, 616), True, 'import matplotlib.pyplot as plt\n'), ((618, 632), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (628, 632), True, 'import matplotlib.pyplot as plt\n'), ((633, 647), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (643, 647), True, 'import matplotlib.pyplot as plt\n'), ((648, 658), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (656, 658), True, 'import matplotlib.pyplot as plt\n'), ((219, 243), 'numpy.exp', 'np.exp', (['(-x ** 2 - y ** 2)'], {}), '(-x ** 2 - y ** 2)\n', (225, 243), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from keras import backend as K
from keras.models import Sequential, model_from_json
from keras.layers import Lambda
from tensorflow.python.framework import ops
from scipy.ndimage.interpolation import zoom
import keras
import tempfile
import os
def loss_calculation(x, category_index, nb_classes):
return tf.multiply(x, K.one_hot((category_index), nb_classes))
def loss_calculation_shape(input_shape):
return input_shape
def normalize(x):
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def prepareGradCAM(input_model, conv_layer_index, nb_classes):
model = input_model
# because non-manufacturability is 1
explanation_catagory = 1
loss_function = lambda x: loss_calculation(x, 1, nb_classes)
model.add(Lambda(loss_function,
output_shape=loss_calculation_shape))
# use the loss from the layer before softmax. As best practices
loss = K.sum(model.layers[-1].output)
# last fully Convolutional layer to use for computing GradCAM
conv_output = model.layers[-6].output
grads = normalize(K.gradients(loss, conv_output)[0])
gradient_function = K.function([model.layers[0].input, K.learning_phase()], [conv_output, grads])
return gradient_function
def registerGradient():
if "GuidedBackProp" not in ops._gradient_registry._registry:
@ops.RegisterGradient("GuidedBackProp")
def _GuidedBackProp(op, grad):
dtype = op.inputs[0].dtype
return grad * tf.cast(grad > 0., dtype) * \
tf.cast(op.inputs[0] > 0., dtype)
def compileSaliencyFunction(model, ld_model_fn, model_no, channels, activation, voxelCount, nbClasses, activation_layer=-5):
guidedModel = modifyBackprop(model, 'GuidedBackProp', ld_model_fn, model_no, channels, activation, voxelCount, nbClasses)
input_img = guidedModel.input
layer_output = guidedModel.layers[activation_layer].output
saliency = K.gradients(K.sum(layer_output), input_img)[0]
return K.function([input_img, K.learning_phase()], [saliency])
def modifyBackprop(model, name, ld_model_fn, model_no, channels, activation, voxelCount, nbClasses):
registerGradient()
g = tf.get_default_graph()
with g.gradient_override_map({'Relu': name}):
# get layers that have an activation
layer_dict = [layer for layer in model.layers[1:]
if hasattr(layer, 'activation')]
# replace relu activation
for layer in layer_dict:
if layer.activation == keras.activations.relu:
layer.activation = tf.nn.relu
model = ld_model_fn(model_no, channels, activation, voxelCount, nbClasses)
model.load_weights('log/weights/model%s_%schannel_%sactivation_%svoxel_count_%sclasses.h5' % (model_no, channels, activation, voxelCount, nbClasses))
# Popping the softmax layer as it creates ambiguity in the explanation
model.pop()
return model
def GradCAM(gradient_function, input_file):
explanation_catagory = 1
# Shape of the fully convolutional layer to use
f = 5
output, grads_val = gradient_function([input_file, 0])
grads_val = grads_val / (np.max(grads_val) + K.epsilon())
print(grads_val.shape)
weights = np.mean(grads_val, axis=(1, 2, 3))
weights.flatten()
print('weights', weights)
print('output', output.shape)
if K.image_data_format() == "channels_last":
grad_cam = np.ones(output.shape[1:-1], dtype=K.floatx())
else:
grad_cam = np.ones(output.shape[2:], dtype=K.floatx())
for i, w in enumerate(np.transpose(weights)):
if K.image_data_format() == "channels_last":
grad_cam += w * output[0, ..., i]
else:
grad_cam += w * output[0, i, ...]
grad_cam = np.maximum(grad_cam, 0)
print(weights)
grad_cam = grad_cam / np.max(grad_cam)
attMap = np.zeros_like(input_file)
zoom_factor = [i / (j * 1.0) for i, j in iter(zip(input_file.shape, grad_cam.shape))]
attMap[..., 0] = zoom(grad_cam, zoom_factor)
attMap = (1 * np.float32(attMap)) + (1 * np.float32(input_file))
attMap = (attMap / np.max(attMap))
return attMap
| [
"tensorflow.python.framework.ops.RegisterGradient",
"keras.backend.sum",
"keras.backend.learning_phase",
"keras.backend.gradients",
"keras.backend.floatx",
"scipy.ndimage.interpolation.zoom",
"tensorflow.cast",
"numpy.mean",
"keras.backend.image_data_format",
"keras.backend.square",
"numpy.max",... | [((947, 977), 'keras.backend.sum', 'K.sum', (['model.layers[-1].output'], {}), '(model.layers[-1].output)\n', (952, 977), True, 'from keras import backend as K\n'), ((2211, 2233), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (2231, 2233), True, 'import tensorflow as tf\n'), ((3258, 3292), 'numpy.mean', 'np.mean', (['grads_val'], {'axis': '(1, 2, 3)'}), '(grads_val, axis=(1, 2, 3))\n', (3265, 3292), True, 'import numpy as np\n'), ((3792, 3815), 'numpy.maximum', 'np.maximum', (['grad_cam', '(0)'], {}), '(grad_cam, 0)\n', (3802, 3815), True, 'import numpy as np\n'), ((3891, 3916), 'numpy.zeros_like', 'np.zeros_like', (['input_file'], {}), '(input_file)\n', (3904, 3916), True, 'import numpy as np\n'), ((4029, 4056), 'scipy.ndimage.interpolation.zoom', 'zoom', (['grad_cam', 'zoom_factor'], {}), '(grad_cam, zoom_factor)\n', (4033, 4056), False, 'from scipy.ndimage.interpolation import zoom\n'), ((368, 405), 'keras.backend.one_hot', 'K.one_hot', (['category_index', 'nb_classes'], {}), '(category_index, nb_classes)\n', (377, 405), True, 'from keras import backend as K\n'), ((786, 844), 'keras.layers.Lambda', 'Lambda', (['loss_function'], {'output_shape': 'loss_calculation_shape'}), '(loss_function, output_shape=loss_calculation_shape)\n', (792, 844), False, 'from keras.layers import Lambda\n'), ((1375, 1413), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""GuidedBackProp"""'], {}), "('GuidedBackProp')\n", (1395, 1413), False, 'from tensorflow.python.framework import ops\n'), ((3386, 3407), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (3405, 3407), True, 'from keras import backend as K\n'), ((3593, 3614), 'numpy.transpose', 'np.transpose', (['weights'], {}), '(weights)\n', (3605, 3614), True, 'import numpy as np\n'), ((3861, 3877), 'numpy.max', 'np.max', (['grad_cam'], {}), '(grad_cam)\n', (3867, 3877), True, 'import numpy as np\n'), ((4150, 4164), 'numpy.max', 'np.max', (['attMap'], {}), '(attMap)\n', (4156, 4164), True, 'import numpy as np\n'), ((1108, 1138), 'keras.backend.gradients', 'K.gradients', (['loss', 'conv_output'], {}), '(loss, conv_output)\n', (1119, 1138), True, 'from keras import backend as K\n'), ((1202, 1220), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (1218, 1220), True, 'from keras import backend as K\n'), ((1975, 1994), 'keras.backend.sum', 'K.sum', (['layer_output'], {}), '(layer_output)\n', (1980, 1994), True, 'from keras import backend as K\n'), ((2044, 2062), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (2060, 2062), True, 'from keras import backend as K\n'), ((3184, 3201), 'numpy.max', 'np.max', (['grads_val'], {}), '(grads_val)\n', (3190, 3201), True, 'import numpy as np\n'), ((3204, 3215), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (3213, 3215), True, 'from keras import backend as K\n'), ((3628, 3649), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (3647, 3649), True, 'from keras import backend as K\n'), ((4076, 4094), 'numpy.float32', 'np.float32', (['attMap'], {}), '(attMap)\n', (4086, 4094), True, 'import numpy as np\n'), ((4103, 4125), 'numpy.float32', 'np.float32', (['input_file'], {}), '(input_file)\n', (4113, 4125), True, 'import numpy as np\n'), ((1564, 1598), 'tensorflow.cast', 'tf.cast', (['(op.inputs[0] > 0.0)', 'dtype'], {}), '(op.inputs[0] > 0.0, dtype)\n', (1571, 1598), True, 'import tensorflow as tf\n'), ((3481, 3491), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3489, 3491), True, 'from keras import backend as K\n'), ((3554, 3564), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3562, 3564), True, 'from keras import backend as K\n'), ((525, 536), 'keras.backend.square', 'K.square', (['x'], {}), '(x)\n', (533, 536), True, 'from keras import backend as K\n'), ((1518, 1544), 'tensorflow.cast', 'tf.cast', (['(grad > 0.0)', 'dtype'], {}), '(grad > 0.0, dtype)\n', (1525, 1544), True, 'import tensorflow as tf\n')] |
import tensorflow as tf
import numpy as np
import pandas as pd
import pickle
import sys
sys.path.append('/Users/slade/Documents/Code/machine-learning/Python/ffm/tools.py')
from tools import transfer_data, get_batch
class Args(object):
# number of latent factors
k = 6
# num of fields
f = 24
# num of features
p = 100
learning_rate = 0.1
batch_size = 64
l2_reg_rate = 0.001
feature2field = None
checkpoint_dir = '/Users/slade/Documents/Code/machine-learning/data/ffm/saver'
is_training = True
epoch = 1
class Model(object):
def __init__(self, args):
self.k = args.k
self.f = args.f
self.p = args.p
self.learning_rate = args.learning_rate
self.batch_size = args.batch_size
self.l2_reg_rate = args.l2_reg_rate
self.feature2field = args.feature2field
self.checkpoint_dir = args.checkpoint_dir
def build_model(self):
self.X = tf.placeholder('float32', [self.batch_size, self.p])
self.y = tf.placeholder('float32', [None, 1])
# linear part
with tf.variable_scope('linear_layer'):
b = tf.get_variable('bias', shape=[1],
initializer=tf.zeros_initializer())
self.w1 = tf.get_variable('w1', shape=[self.p, 1],
initializer=tf.truncated_normal_initializer(mean=0, stddev=0.01))
# shape of [None, 1]
self.linear_terms = tf.add(tf.matmul(self.X, self.w1), b)
print('self.linear_terms:')
print(self.linear_terms)
with tf.variable_scope('nolinear_layer'):
self.v = tf.get_variable('v', shape=[self.p, self.f, self.k], dtype='float32',
initializer=tf.truncated_normal_initializer(mean=0, stddev=0.01))
# v:pxfxk
self.field_cross_interaction = tf.constant(0, dtype='float32')
# 每个特征
for i in range(self.p):
# 寻找没有match过的特征,也就是论文中的j = i+1开始
for j in range(i + 1, self.p):
print('i:%s,j:%s' % (i, j))
# vifj
vifj = self.v[i, self.feature2field[j]]
# vjfi
vjfi = self.v[j, self.feature2field[i]]
# vi · vj
vivj = tf.reduce_sum(tf.multiply(vifj, vjfi))
# xi · xj
xixj = tf.multiply(self.X[:, i], self.X[:, j])
self.field_cross_interaction += tf.multiply(vivj, xixj)
self.field_cross_interaction = tf.reshape(self.field_cross_interaction, (self.batch_size, 1))
print('self.field_cross_interaction:')
print(self.field_cross_interaction)
self.y_out = tf.add(self.linear_terms, self.field_cross_interaction)
print('y_out_prob:')
print(self.y_out)
# -1/1情况下的logistic loss
self.loss = tf.reduce_mean(tf.log(1 + tf.exp(-self.y * self.y_out)))
# 正则:sum(w^2)/2*l2_reg_rate
# 这边只加了weight,有需要的可以加上bias部分
self.loss += tf.contrib.layers.l2_regularizer(self.l2_reg_rate)(self.w1)
self.loss += tf.contrib.layers.l2_regularizer(self.l2_reg_rate)(self.v)
self.global_step = tf.Variable(0, trainable=False)
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
trainable_params = tf.trainable_variables()
print(trainable_params)
gradients = tf.gradients(self.loss, trainable_params)
clip_gradients, _ = tf.clip_by_global_norm(gradients, 5)
self.train_op = opt.apply_gradients(
zip(clip_gradients, trainable_params), global_step=self.global_step)
def train(self, sess, x, label):
loss, _, step = sess.run([self.loss, self.train_op, self.global_step], feed_dict={
self.X: x,
self.y: label
})
return loss, step
def cal(self, sess, x, label):
y_out_prob_ = sess.run([self.y_out], feed_dict={
self.X: x,
self.y: label
})
return y_out_prob_, label
def predict(self, sess, x):
result = sess.run([self.y_out], feed_dict={
self.X: x
})
return result
def save(self, sess, path):
saver = tf.train.Saver()
saver.save(sess, save_path=path)
def restore(self, sess, path):
saver = tf.train.Saver()
saver.restore(sess, save_path=path)
if __name__ == '__main__':
# loading base params
args = Args()
# loading base data
train_data_path = '/Users/slade/Documents/Code/machine-learning/data/avazu_CTR/train_sample.csv'
train_data = pd.read_csv(train_data_path)
train_data['click'] = train_data['click'].map(lambda x: -1 if x == 0 else x)
# loading feature2field dict
with open('/Users/slade/Documents/Code/machine-learning/data/avazu_CTR/sets/feature2field.pkl', 'rb') as f:
args.feature2field = pickle.load(f)
fields = ['C1', 'C18', 'C16', 'click']
fields_dict = {}
for field in fields:
with open('/Users/slade/Documents/Code/machine-learning/data/avazu_CTR/sets/' + field + '.pkl', 'rb') as f:
fields_dict[field] = pickle.load(f)
args.f = len(fields) - 1
print('f:%s' % (args.f))
args.p = max(fields_dict['click'].values()) - 1
print('p:%s' % (max(fields_dict['click'].values()) - 1))
gpu_config = tf.ConfigProto()
gpu_config.gpu_options.allow_growth = True
all_len = max(fields_dict['click'].values()) + 1
cnt = train_data.shape[0] // args.batch_size
with tf.Session(config=gpu_config) as sess:
model = Model(args)
model.build_model()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
if args.is_training:
# batch_size data
for i in range(args.epoch):
for j in range(cnt):
data = get_batch(train_data, args.batch_size, j)
actual_batch_size = len(data)
batch_X = []
batch_y = []
for k in range(actual_batch_size):
sample = data.iloc[k, :]
array = transfer_data(sample, fields_dict, all_len)
# 最后两位[0,-1]:label=0,[0,1]:label=1
batch_X.append(array[:-2])
# 最后一位即为label
batch_y.append(array[-1])
batch_X = np.array(batch_X)
batch_y = np.array(batch_y)
batch_y = batch_y.reshape(args.batch_size, 1)
loss, step = model.train(sess, batch_X, batch_y)
if j % 100 == 0:
print('the times of training is %d, and the loss is %s' % (j, loss))
model.save(sess, args.checkpoint_dir)
# r1, r2 = model.cal(sess, batch_X, batch_y)
# print(r1)
# print(r2)
else:
model.restore(sess, args.checkpoint_dir)
for j in range(cnt):
data = get_batch(train_data, args.batch_size, j)
actual_batch_size = len(data)
batch_X = []
for k in range(actual_batch_size):
sample = data.iloc[k, :]
array = transfer_data(sample, fields_dict, all_len)
batch_X.append(array[:-2])
batch_X = np.array(batch_X)
result = model.predict(sess, batch_X)
print(result)
| [
"tensorflow.local_variables_initializer",
"pandas.read_csv",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.multiply",
"tensorflow.truncated_normal_initializer",
"tensorflow.gradients",
"numpy.array",
"tensorflow.zeros_initializer",
"sys.path.append",
"tensorflow.clip_by_global_norm",
"... | [((89, 177), 'sys.path.append', 'sys.path.append', (['"""/Users/slade/Documents/Code/machine-learning/Python/ffm/tools.py"""'], {}), "(\n '/Users/slade/Documents/Code/machine-learning/Python/ffm/tools.py')\n", (104, 177), False, 'import sys\n'), ((4709, 4737), 'pandas.read_csv', 'pd.read_csv', (['train_data_path'], {}), '(train_data_path)\n', (4720, 4737), True, 'import pandas as pd\n'), ((5452, 5468), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (5466, 5468), True, 'import tensorflow as tf\n'), ((959, 1011), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', '[self.batch_size, self.p]'], {}), "('float32', [self.batch_size, self.p])\n", (973, 1011), True, 'import tensorflow as tf\n'), ((1029, 1065), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', '[None, 1]'], {}), "('float32', [None, 1])\n", (1043, 1065), True, 'import tensorflow as tf\n'), ((2813, 2868), 'tensorflow.add', 'tf.add', (['self.linear_terms', 'self.field_cross_interaction'], {}), '(self.linear_terms, self.field_cross_interaction)\n', (2819, 2868), True, 'import tensorflow as tf\n'), ((3295, 3326), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (3306, 3326), True, 'import tensorflow as tf\n'), ((3341, 3394), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['self.learning_rate'], {}), '(self.learning_rate)\n', (3374, 3394), True, 'import tensorflow as tf\n'), ((3422, 3446), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (3444, 3446), True, 'import tensorflow as tf\n'), ((3499, 3540), 'tensorflow.gradients', 'tf.gradients', (['self.loss', 'trainable_params'], {}), '(self.loss, trainable_params)\n', (3511, 3540), True, 'import tensorflow as tf\n'), ((3569, 3605), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['gradients', '(5)'], {}), '(gradients, 5)\n', (3591, 3605), True, 'import tensorflow as tf\n'), ((4323, 4339), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4337, 4339), True, 'import tensorflow as tf\n'), ((4433, 4449), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4447, 4449), True, 'import tensorflow as tf\n'), ((4993, 5007), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5004, 5007), False, 'import pickle\n'), ((5628, 5657), 'tensorflow.Session', 'tf.Session', ([], {'config': 'gpu_config'}), '(config=gpu_config)\n', (5638, 5657), True, 'import tensorflow as tf\n'), ((1102, 1135), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""linear_layer"""'], {}), "('linear_layer')\n", (1119, 1135), True, 'import tensorflow as tf\n'), ((1617, 1652), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""nolinear_layer"""'], {}), "('nolinear_layer')\n", (1634, 1652), True, 'import tensorflow as tf\n'), ((1913, 1944), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': '"""float32"""'}), "(0, dtype='float32')\n", (1924, 1944), True, 'import tensorflow as tf\n'), ((2630, 2692), 'tensorflow.reshape', 'tf.reshape', (['self.field_cross_interaction', '(self.batch_size, 1)'], {}), '(self.field_cross_interaction, (self.batch_size, 1))\n', (2640, 2692), True, 'import tensorflow as tf\n'), ((3128, 3178), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['self.l2_reg_rate'], {}), '(self.l2_reg_rate)\n', (3160, 3178), True, 'import tensorflow as tf\n'), ((3209, 3259), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['self.l2_reg_rate'], {}), '(self.l2_reg_rate)\n', (3241, 3259), True, 'import tensorflow as tf\n'), ((5248, 5262), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5259, 5262), False, 'import pickle\n'), ((5740, 5773), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5771, 5773), True, 'import tensorflow as tf\n'), ((5792, 5824), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (5822, 5824), True, 'import tensorflow as tf\n'), ((1495, 1521), 'tensorflow.matmul', 'tf.matmul', (['self.X', 'self.w1'], {}), '(self.X, self.w1)\n', (1504, 1521), True, 'import tensorflow as tf\n'), ((7212, 7253), 'tools.get_batch', 'get_batch', (['train_data', 'args.batch_size', 'j'], {}), '(train_data, args.batch_size, j)\n', (7221, 7253), False, 'from tools import transfer_data, get_batch\n'), ((7570, 7587), 'numpy.array', 'np.array', (['batch_X'], {}), '(batch_X)\n', (7578, 7587), True, 'import numpy as np\n'), ((1232, 1254), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (1252, 1254), True, 'import tensorflow as tf\n'), ((1369, 1421), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'mean': '(0)', 'stddev': '(0.01)'}), '(mean=0, stddev=0.01)\n', (1400, 1421), True, 'import tensorflow as tf\n'), ((1794, 1846), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'mean': '(0)', 'stddev': '(0.01)'}), '(mean=0, stddev=0.01)\n', (1825, 1846), True, 'import tensorflow as tf\n'), ((2471, 2510), 'tensorflow.multiply', 'tf.multiply', (['self.X[:, i]', 'self.X[:, j]'], {}), '(self.X[:, i], self.X[:, j])\n', (2482, 2510), True, 'import tensorflow as tf\n'), ((2563, 2586), 'tensorflow.multiply', 'tf.multiply', (['vivj', 'xixj'], {}), '(vivj, xixj)\n', (2574, 2586), True, 'import tensorflow as tf\n'), ((3002, 3030), 'tensorflow.exp', 'tf.exp', (['(-self.y * self.y_out)'], {}), '(-self.y * self.y_out)\n', (3008, 3030), True, 'import tensorflow as tf\n'), ((5989, 6030), 'tools.get_batch', 'get_batch', (['train_data', 'args.batch_size', 'j'], {}), '(train_data, args.batch_size, j)\n', (5998, 6030), False, 'from tools import transfer_data, get_batch\n'), ((6555, 6572), 'numpy.array', 'np.array', (['batch_X'], {}), '(batch_X)\n', (6563, 6572), True, 'import numpy as np\n'), ((6603, 6620), 'numpy.array', 'np.array', (['batch_y'], {}), '(batch_y)\n', (6611, 6620), True, 'import numpy as np\n'), ((7453, 7496), 'tools.transfer_data', 'transfer_data', (['sample', 'fields_dict', 'all_len'], {}), '(sample, fields_dict, all_len)\n', (7466, 7496), False, 'from tools import transfer_data, get_batch\n'), ((2389, 2412), 'tensorflow.multiply', 'tf.multiply', (['vifj', 'vjfi'], {}), '(vifj, vjfi)\n', (2400, 2412), True, 'import tensorflow as tf\n'), ((6283, 6326), 'tools.transfer_data', 'transfer_data', (['sample', 'fields_dict', 'all_len'], {}), '(sample, fields_dict, all_len)\n', (6296, 6326), False, 'from tools import transfer_data, get_batch\n')] |
# -*- coding: utf-8 -*-
"""
根据手工标注的标签生成npy文件,每个npy文件保存了一个二维数组(width, height, 8+1),
前8个通道是图像数据,最后一个通道是标签
"""
import os
import sys
import glob
import json
import tqdm
import skimage.io
import numpy as np
import matplotlib.pyplot as plt
def find_bnd(img):
"""
从标签图像中找到标注的区域(矩形),标签图像是与卫星图大小一致的RGBA图像,未标注的
区域为黑色,标注的区域如果是房子变化则为红色,否则为透明(四通道数值均为0)。
"""
r_s = -1
r_e = -1
c_s = -1
c_e = -1
fg = False
r_m = -1
c_m = -1
for i in range(0, img.shape[0], 256):
if fg:
break
for j in range(0, img.shape[1], 256):
if img[i, j, 0] > 0 or img[i,j,3] == 0:
r_m = i
c_m = j
fg = True
break
if r_m < 0 or c_m < 0:
return r_s, r_e, c_s, c_e
r_s = r_m
c_s = c_m
while img[r_s, c_m, 0] > 0 or img[r_s, c_m, 3] == 0:
r_s -= 1
r_s += 1
while img[r_m, c_s, 0] > 0 or img[r_m, c_s, 3] == 0:
c_s -= 1
c_s += 1
r_e = r_m+1
c_e = c_m+1
while img[r_e, c_m, 0] > 0 or img[r_e, c_m, 3] == 0:
r_e += 1
while img[r_m, c_e, 0] > 0 or img[r_m, c_e, 3] == 0:
c_e += 1
return r_s, r_e, c_s, c_e
def prepare_end2end_data(path15, path17, input_dir, output_dir, base=0, suffix='p2'):
"""
将标注的标签图像转换成npy文件。
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
im15 = skimage.io.imread(path15).astype(np.float32)
im17 = skimage.io.imread(path17).astype(np.float32)
masks = glob.glob(os.path.join(input_dir, '*.tif'))
data_dir = output_dir
if not os.path.exists(data_dir):
os.makedirs(data_dir)
for mp in tqdm.tqdm(masks):
msk = skimage.io.imread(mp)
r_s, r_e, c_s, c_e = find_bnd(msk)
if r_s < 0:
print('%s无效!'%mp, file=sys.stderr)
continue
d15 = im15[r_s:r_e, c_s:c_e, :]
d17 = im17[r_s:r_e, c_s:c_e, :]
m = msk[r_s:r_e, c_s:c_e, 0]
lab = m > 0
lab = lab.astype(d15.dtype)
lab = np.expand_dims(lab, 2)
d = np.concatenate([d15, d17, lab], 2)
vp = d.shape[0]//2
hp = d.shape[1]//2
lst = [d[:vp, :hp, :], d[vp:, :hp, :], d[:vp, hp:, :], d[vp:, hp:, :]]
cord = [(r_s, c_s), (r_s+vp, c_s), (r_s, c_s+hp), (r_s+vp, c_s+hp)]
_, n = os.path.split(mp)
n, _ = os.path.splitext(n)
for k in range(len(lst)):
r, c = cord[k]
dp = os.path.join(data_dir, '%d_%d_%d#%d_%s.npy'%(base, k, r, c, suffix))
np.save(dp, lst[k])
base += 1
def end2end_split(data_dir, splits=None):
"""
将npy文件划分成训练集、验证集和测试集
"""
if splits is None:
splits = [0.7, 0.9]
assert splits[0] < splits[1]
dat_all = glob.glob(os.path.join(data_dir, '*.npy'))
np.random.shuffle(dat_all)
for k in range(len(dat_all)):
_, dat_all[k] = os.path.split(dat_all[k])
mp = {}
sp1 = int(len(dat_all)*splits[0])
sp2 = int(len(dat_all)*splits[1])
mp['train'] = dat_all[:sp1]
mp['validation'] = dat_all[sp1:sp2]
mp['test'] = dat_all[sp2:]
if data_dir[-1] == '/' or data_dir[-1] == '\\':
_, dir_name = os.path.split(data_dir[:-1])
else:
_, dir_name = os.path.split(data_dir)
with open(os.path.join(data_dir, '%s_train_val_test.json'%dir_name), 'w') as file:
file.write(json.dumps(mp))
def end2end_data_view(input_dir, part='validation'):
"""
查看训练集、验证集或测试集里的图像
"""
mp = glob.glob(os.path.join(input_dir, '*.json'))[0]
mp = json.load(open(mp))
paths = mp[part]
for p in paths:
t = os.path.join(input_dir, p)
x = np.load(t)
im15 = skimage.img_as_ubyte(x[:,:,:3].astype(np.uint16))
im17 = skimage.img_as_ubyte(x[:,:,4:7].astype(np.uint16))
msk = x[:,:,-1].astype(np.uint8)
msk *= 90
msk += 255-90
msk = np.expand_dims(msk, 2)
im15 = np.concatenate([im15, msk],2)
im17 = np.concatenate([im17, msk],2)
plt.subplot(1,2,1)
plt.imshow(im15)
plt.title('2015')
plt.suptitle(p)
plt.subplot(1,2,2)
plt.imshow(im17)
plt.title('2017')
plt.show()
if __name__ == '__main__':
prepare_end2end_data(
'../../input/origin/2015p2-denoise-rgbn.tif',
'../../input/origin/2017p2-denoise-rgbn.tif',
'../../input/mark/p2_end2end_1102/',
'../../input/mark/p2_test/', base=0, suffix='p2')
end2end_split('../../input/mark/p2_test/')
end2end_data_view('../../input/mark/p2_test/', part='validation') | [
"matplotlib.pyplot.imshow",
"os.path.exists",
"os.makedirs",
"matplotlib.pyplot.show",
"tqdm.tqdm",
"os.path.join",
"os.path.splitext",
"json.dumps",
"os.path.split",
"numpy.expand_dims",
"numpy.concatenate",
"matplotlib.pyplot.title",
"numpy.load",
"matplotlib.pyplot.subplot",
"matplotl... | [((1677, 1693), 'tqdm.tqdm', 'tqdm.tqdm', (['masks'], {}), '(masks)\n', (1686, 1693), False, 'import tqdm\n'), ((2822, 2848), 'numpy.random.shuffle', 'np.random.shuffle', (['dat_all'], {}), '(dat_all)\n', (2839, 2848), True, 'import numpy as np\n'), ((1342, 1368), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (1356, 1368), False, 'import os\n'), ((1378, 1401), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (1389, 1401), False, 'import os\n'), ((1536, 1568), 'os.path.join', 'os.path.join', (['input_dir', '"""*.tif"""'], {}), "(input_dir, '*.tif')\n", (1548, 1568), False, 'import os\n'), ((1607, 1631), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (1621, 1631), False, 'import os\n'), ((1641, 1662), 'os.makedirs', 'os.makedirs', (['data_dir'], {}), '(data_dir)\n', (1652, 1662), False, 'import os\n'), ((2049, 2071), 'numpy.expand_dims', 'np.expand_dims', (['lab', '(2)'], {}), '(lab, 2)\n', (2063, 2071), True, 'import numpy as np\n'), ((2084, 2118), 'numpy.concatenate', 'np.concatenate', (['[d15, d17, lab]', '(2)'], {}), '([d15, d17, lab], 2)\n', (2098, 2118), True, 'import numpy as np\n'), ((2343, 2360), 'os.path.split', 'os.path.split', (['mp'], {}), '(mp)\n', (2356, 2360), False, 'import os\n'), ((2376, 2395), 'os.path.splitext', 'os.path.splitext', (['n'], {}), '(n)\n', (2392, 2395), False, 'import os\n'), ((2785, 2816), 'os.path.join', 'os.path.join', (['data_dir', '"""*.npy"""'], {}), "(data_dir, '*.npy')\n", (2797, 2816), False, 'import os\n'), ((2907, 2932), 'os.path.split', 'os.path.split', (['dat_all[k]'], {}), '(dat_all[k])\n', (2920, 2932), False, 'import os\n'), ((3198, 3226), 'os.path.split', 'os.path.split', (['data_dir[:-1]'], {}), '(data_dir[:-1])\n', (3211, 3226), False, 'import os\n'), ((3259, 3282), 'os.path.split', 'os.path.split', (['data_dir'], {}), '(data_dir)\n', (3272, 3282), False, 'import os\n'), ((3636, 3662), 'os.path.join', 'os.path.join', (['input_dir', 'p'], {}), '(input_dir, p)\n', (3648, 3662), False, 'import os\n'), ((3675, 3685), 'numpy.load', 'np.load', (['t'], {}), '(t)\n', (3682, 3685), True, 'import numpy as np\n'), ((3912, 3934), 'numpy.expand_dims', 'np.expand_dims', (['msk', '(2)'], {}), '(msk, 2)\n', (3926, 3934), True, 'import numpy as np\n'), ((3950, 3980), 'numpy.concatenate', 'np.concatenate', (['[im15, msk]', '(2)'], {}), '([im15, msk], 2)\n', (3964, 3980), True, 'import numpy as np\n'), ((3995, 4025), 'numpy.concatenate', 'np.concatenate', (['[im17, msk]', '(2)'], {}), '([im17, msk], 2)\n', (4009, 4025), True, 'import numpy as np\n'), ((4033, 4053), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (4044, 4053), True, 'import matplotlib.pyplot as plt\n'), ((4060, 4076), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im15'], {}), '(im15)\n', (4070, 4076), True, 'import matplotlib.pyplot as plt\n'), ((4085, 4102), 'matplotlib.pyplot.title', 'plt.title', (['"""2015"""'], {}), "('2015')\n", (4094, 4102), True, 'import matplotlib.pyplot as plt\n'), ((4111, 4126), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['p'], {}), '(p)\n', (4123, 4126), True, 'import matplotlib.pyplot as plt\n'), ((4135, 4155), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (4146, 4155), True, 'import matplotlib.pyplot as plt\n'), ((4162, 4178), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im17'], {}), '(im17)\n', (4172, 4178), True, 'import matplotlib.pyplot as plt\n'), ((4187, 4204), 'matplotlib.pyplot.title', 'plt.title', (['"""2017"""'], {}), "('2017')\n", (4196, 4204), True, 'import matplotlib.pyplot as plt\n'), ((4213, 4223), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4221, 4223), True, 'import matplotlib.pyplot as plt\n'), ((2474, 2544), 'os.path.join', 'os.path.join', (['data_dir', "('%d_%d_%d#%d_%s.npy' % (base, k, r, c, suffix))"], {}), "(data_dir, '%d_%d_%d#%d_%s.npy' % (base, k, r, c, suffix))\n", (2486, 2544), False, 'import os\n'), ((2555, 2574), 'numpy.save', 'np.save', (['dp', 'lst[k]'], {}), '(dp, lst[k])\n', (2562, 2574), True, 'import numpy as np\n'), ((3297, 3356), 'os.path.join', 'os.path.join', (['data_dir', "('%s_train_val_test.json' % dir_name)"], {}), "(data_dir, '%s_train_val_test.json' % dir_name)\n", (3309, 3356), False, 'import os\n'), ((3389, 3403), 'json.dumps', 'json.dumps', (['mp'], {}), '(mp)\n', (3399, 3403), False, 'import json\n'), ((3516, 3549), 'os.path.join', 'os.path.join', (['input_dir', '"""*.json"""'], {}), "(input_dir, '*.json')\n", (3528, 3549), False, 'import os\n')] |
# Author: <NAME>
# Date: 5 Feb 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess 3D tensors (gray-scale images, gray-scale
videos, speech/time series, text, etc).
If the example size is not fixed (e.g. images of different size), crop a region
then rescale to a fixed size with fixed height-width ratio.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import time
# tf.enable_eager_execution()
_GLOBAL_CROP_SIZE = (224,224)
_GLOBAL_NUM_FRAMES = 10
_GLOBAL_NUM_REPEAT = 4
_GLOBAL_CROP_RATIO = 0.5
_SHUFFLE_BUFFER = 1000
def _crop_3d_tensor(tensor_3d,
crop_size=_GLOBAL_CROP_SIZE,
num_frames=_GLOBAL_NUM_FRAMES,
num_repeat=_GLOBAL_NUM_REPEAT,
crop_ratio=_GLOBAL_CROP_RATIO):
"""Crop a batch of 3-D tensors to `crop_size`.
Args:
tensor_3d: A 3-D tensor batch of shape
(batch_size, sequence_size, row_count, col_count)
crop_size: A Tensor of type `int32`. A 1-D tensor of 2 elements,
size = [crop_height, crop_width]. All cropped image patches are
resized to this size. The aspect ratio of the image content is not
preserved. Both crop_height and crop_width need to be positive.
num_frames: Number of frames to keep (crop).
num_repeat: The number of repetition of cropping cycle for each batch.
crop_ratio: The ratio when cropping height and width.
Returns:
A Tensor of shape
[num_repeat * batch_size, num_frames, crop_height, crop_width]
where crop_size is equal to (crop_height, crop_width).
"""
if not tensor_3d.shape.ndims == 4:
raise ValueError("The shape of the tensor to crop should be " +
"[batch_size, sequence_size, row_count, col_count]!")
batch_size, sequence_size, row_count, col_count = tensor_3d.shape
# Crop time axis
# pad sequence if not long enough
pad_size = tf.maximum(num_frames - tf.shape(tensor_3d)[1], 0)
padded_tensor = tf.pad(tensor_3d, ((0,0), (0, pad_size), (0, 0), (0, 0)))
maxval = padded_tensor.shape[1] - num_frames + 1
# Randomly choose the beginning index of frames
begin = np.random.randint(0, maxval)
sliced_tensor = tf.slice(padded_tensor,
begin=[0, begin, 0, 0],
size=[-1, num_frames, -1, -1])
# Crop spatial axes
# First, transpose from [batch_size, sequence_size, row_count, col_count]
# to [batch_size, row_count, col_count, sequence_size]
sliced_tensor = tf.transpose(sliced_tensor, perm=[0, 2, 3, 1])
# sliced_tensor = tf.transpose(padded_tensor, perm=[0, 2, 3, 1])
# Then apply `tf.image.crop_and_resize` by precompute some size info
y1, x1 = tf.random.uniform(shape=[2, num_repeat * batch_size],
minval=0,
maxval=1 - crop_ratio)
y2 = y1 + crop_ratio
# = tf.random.uniform(shape=[num_repeat * batch_size],
# minval=0,
# maxval=1 - crop_ratio)
x2 = x1 + crop_ratio
boxes = tf.transpose([y1, x1, y2, x2])
box_ind = list(range(batch_size)) * num_repeat
# At last, crop and resize
resized_tensor = tf.image.crop_and_resize(sliced_tensor,
boxes,
box_ind,
crop_size)
return tf.transpose(resized_tensor, perm=[0, 3, 1, 2])
def crop_time_axis(tensor_3d, num_frames, begin_index=None):
"""Given a 3-D tensor, take a slice of length `num_frames` on its time axis.
Args:
tensor_3d: A Tensor of shape [sequence_size, row_count, col_count]
num_frames: An integer representing the resulted chunk (sequence) length
begin_index: The index of the beginning of the chunk. If `None`, chosen
randomly.
Returns:
A Tensor of sequence length `num_frames`, which is a chunk of `tensor_3d`.
"""
# pad sequence if not long enough
pad_size = tf.maximum(num_frames - tf.shape(tensor_3d)[1], 0)
padded_tensor = tf.pad(tensor_3d, ((0, pad_size), (0, 0), (0, 0)))
# If not given, randomly choose the beginning index of frames
if not begin_index:
maxval = tf.shape(padded_tensor)[1] - num_frames + 1
begin_index = tf.random.uniform([1],
minval=0,
maxval=maxval,
dtype=tf.int32)
begin_index = tf.stack([begin_index[0], 0, 0], name='begin_index')
sliced_tensor = tf.slice(padded_tensor,
begin=begin_index,
size=[num_frames, -1, -1])
return sliced_tensor
def resize_space_axes(tensor_3d, new_row_count, new_col_count):
"""Given a 3-D tensor, resize space axes have have target size.
Args:
tensor_3d: A Tensor of shape [sequence_size, row_count, col_count].
new_row_count: An integer indicating the target row count.
new_col_count: An integer indicating the target column count.
Returns:
A Tensor of shape [sequence_size, target_row_count, target_col_count].
"""
transposed = tf.transpose(tensor_3d, perm=[1, 2, 0])
resized = tf.image.resize_images(transposed,
(new_row_count, new_col_count))
return tf.transpose(resized, perm=[2, 0, 1])
def preprocess_tensor_3d(tensor_3d,
input_shape=None,
output_shape=None):
"""Preprocess a 3-D tensor.
Args:
tensor_3d: A Tensor of shape [sequence_size, row_count, col_count].
input_shape: The shape [sequence_size, row_count, col_count] of the input
examples
output_shape: The shape [sequence_size, row_count, col_count] of the oputput
examples. All components should be positive.
"""
if input_shape:
shape = [x if x > 0 else None for x in input_shape]
tensor_3d.set_shape(input_shape)
else:
tensor_3d.set_shape([None, None, None])
if output_shape and output_shape[0] > 0:
num_frames = output_shape[0]
else:
num_frames = _GLOBAL_NUM_FRAMES
if output_shape and output_shape[1] > 0:
new_row_count = output_shape[1]
else:
new_row_count=_GLOBAL_CROP_SIZE[0]
if output_shape and output_shape[2] > 0:
new_col_count = output_shape[2]
else:
new_col_count=_GLOBAL_CROP_SIZE[1]
tensor_t = crop_time_axis(tensor_3d, num_frames=num_frames)
tensor_ts = resize_space_axes(tensor_t,
new_row_count=new_row_count,
new_col_count=new_col_count)
return tensor_ts
def parse_record_fn(value, is_training, dtype):
"""For a (features, labels) pair `value`, apply preprocessing.
"""
# Retrieve first matrix bundle of `features` in the tensor tuples
# (matrix_bundle_0,...,matrix_bundle_(N-1), labels)
# i.e. matrix_bundle_0
tensor_3d = value[0]
# Label is the last element of value
labels = value[-1]
tensor_3d_preprocessed = preprocess_tensor_3d(tensor_3d)
print("tensor_3d_preprocessed:", tensor_3d_preprocessed) # TODO
return tensor_3d_preprocessed, labels
def input_function(dataset,
is_training,
batch_size,
shuffle_buffer=_SHUFFLE_BUFFER,
parse_record_fn=parse_record_fn,
num_epochs=1,
dtype=tf.float32,
datasets_num_private_threads=None,
num_parallel_batches=1):
"""Given a Dataset of 3-D tensors, return an iterator over the records.
Inspired from:
https://github.com/tensorflow/models/blob/master/official/resnet/resnet_run_loop.py#L49
Args:
dataset: A Dataset representing 3-D tensors. Each example in this dataset
has shape [sequence_size, row_count, col_count].
is_training: A boolean denoting whether the input is for training.
batch_size: The number of examples per batch.
shuffle_buffer: The buffer size to use when shuffling records. A larger
value results in better randomness, but smaller values reduce startup
time and use less memory.
parse_record_fn: A function that takes a raw record and returns the
corresponding (features, labels) pair.
num_epochs: The number of epochs to repeat the dataset.
dtype: Data type to use for images/features.
datasets_num_private_threads: Number of threads for a private
threadpool created for all datasets computation.
num_parallel_batches: Number of parallel batches for tf.data.
Returns:
Dataset of (features, labels) pairs ready for iteration, where `features` is
a 4-D tensor with known shape:
[batch_size, new_sequence_size, new_row_count, new_col_count]
"""
# Prefetches a batch at a time to smooth out the time taken to load input
# files for shuffling and processing.
# dataset = dataset.prefetch(buffer_size=batch_size)
if is_training:
# Shuffles records before repeating to respect epoch boundaries.
dataset = dataset.shuffle(buffer_size=shuffle_buffer)
# Repeats the dataset for the number of epochs to train.
# dataset = dataset.repeat(num_epochs)
# Parses the raw records into images and labels.
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
lambda *value: parse_record_fn(value, is_training, dtype),
batch_size=batch_size,
num_parallel_batches=num_parallel_batches,
drop_remainder=False))
# Operations between the final prefetch and the get_next call to the iterator
# will happen synchronously during run time. We prefetch here again to
# background all of the above processing work and keep it out of the
# critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE
# allows DistributionStrategies to adjust how many batches to fetch based
# on how many devices are present.
# dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
# Defines a specific size thread pool for tf.data operations.
if datasets_num_private_threads:
tf.compat.v1.logging.info('datasets_num_private_threads: %s',
datasets_num_private_threads)
dataset = threadpool.override_threadpool(
dataset,
threadpool.PrivateThreadPool(
datasets_num_private_threads,
display_name='input_pipeline_thread_pool'))
return dataset
def print_first_element(dataset):
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
writer = tf.summary.FileWriter('.')
writer.add_graph(tf.get_default_graph())
writer.flush()
with tf.Session() as sess:
sess.run(iterator.initializer)
show_all_nodes() # TODO: to delete
haha = sess.run(next_element)
print(haha)
def test_crop():
t_shape = (3, 100, 4, 4)
tensor_3d = tf.random.uniform(t_shape)
# print("Original tensor:" , tensor_3d, '\n', tensor_3d.shape)
crop_size = (224, 224)
cropped_tensor = _crop_3d_tensor(tensor_3d, crop_size)
print("Cropped tensor:", cropped_tensor, '\n', cropped_tensor.shape)
def test_resize_space_axes():
t_shape = [None, None, None]
tensor_3d = tf.placeholder(tf.float32, shape=t_shape)
print("tensor_3d.shape.eval():", tensor_3d.shape)
res = resize_space_axes(tensor_3d,
new_row_count=_GLOBAL_CROP_SIZE[0],
new_col_count=_GLOBAL_CROP_SIZE[1])
with tf.Session() as sess:
rand_array = np.random.rand(100, 224, 224)
print(sess.run(res, feed_dict={tensor_3d: rand_array}))
print(res.shape)
def test_crop_time_axis():
t_shape = [None, None, None]
tensor_3d = tf.placeholder(tf.float32, shape=t_shape)
print("tensor_3d.shape.eval():", tensor_3d.shape)
res = crop_time_axis(tensor_3d,
num_frames=_GLOBAL_NUM_FRAMES)
with tf.Session() as sess:
rand_array = np.random.rand(100, 224, 224)
# print(sess.run(res, feed_dict={tensor_3d: rand_array}))
haha = tf.get_default_graph().get_tensor_by_name("begin_stacked:0")
print(haha)
print(sess.run(haha, feed_dict={tensor_3d: rand_array}))
print(res.shape)
def test_tensorflow():
t_shape = [None, None, None]
tensor_unknown = tf.placeholder(tf.float32, shape=t_shape)
# u_shape = [-1, -1, -1]
# tensor_unknown.set_shape(t_shape)
print("tensor_unknown:", tensor_unknown)
def test_input_fn():
"""Test for the funtion `input_fn`."""
# dataset_dir = '/Users/evariste/projects/autodl-contrib/formatted_datasets/itwas/itwas.data/train'
dataset_dir = '/Users/evariste/projects/autodl-contrib/formatted_datasets/chao/chao.data/train'
# dataset_dir = '/Users/evariste/projects/autodl-contrib/formatted_datasets/katze/katze.data/train'
autodl_dataset = AutoDLDataset(dataset_dir)
dataset = autodl_dataset.get_dataset()
print_first_element(dataset)
row_count, col_count = autodl_dataset.get_metadata().get_matrix_size(0)
sequence_size = autodl_dataset.get_metadata().get_sequence_size()
output_dim = autodl_dataset.get_metadata().get_output_size()
input_size = (sequence_size, row_count, col_count)
begin_time = time.time()
transformed_dataset = input_function(dataset,
is_training=True,
batch_size=30,
shuffle_buffer=_SHUFFLE_BUFFER,
parse_record_fn=parse_record_fn,
num_epochs=42,
dtype=tf.float32)
end_time = time.time()
print("Transformation time used:", end_time - begin_time)
print("transformed_dataset:", transformed_dataset)
print_first_element(transformed_dataset)
def show_all_nodes():
print("Nodes names:", [n.name for n in tf.get_default_graph().as_graph_def().node])
if __name__ == '__main__':
import sys
sys.path.append('/Users/evariste/projects/autodl/codalab_competition_bundle/AutoDL_starting_kit/AutoDL_ingestion_program')
from dataset import AutoDLDataset
test_input_fn()
# test_tensorflow()
# test_crop_time_axis()
| [
"tensorflow.random.uniform",
"tensorflow.slice",
"tensorflow.image.resize_images",
"tensorflow.shape",
"tensorflow.pad",
"tensorflow.transpose",
"numpy.random.rand",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.random.randint",
"tensorflow.image.crop_and_resize",
"tensorflow.get_defa... | [((2660, 2718), 'tensorflow.pad', 'tf.pad', (['tensor_3d', '((0, 0), (0, pad_size), (0, 0), (0, 0))'], {}), '(tensor_3d, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n', (2666, 2718), True, 'import tensorflow as tf\n'), ((2829, 2857), 'numpy.random.randint', 'np.random.randint', (['(0)', 'maxval'], {}), '(0, maxval)\n', (2846, 2857), True, 'import numpy as np\n'), ((2876, 2954), 'tensorflow.slice', 'tf.slice', (['padded_tensor'], {'begin': '[0, begin, 0, 0]', 'size': '[-1, num_frames, -1, -1]'}), '(padded_tensor, begin=[0, begin, 0, 0], size=[-1, num_frames, -1, -1])\n', (2884, 2954), True, 'import tensorflow as tf\n'), ((3182, 3228), 'tensorflow.transpose', 'tf.transpose', (['sliced_tensor'], {'perm': '[0, 2, 3, 1]'}), '(sliced_tensor, perm=[0, 2, 3, 1])\n', (3194, 3228), True, 'import tensorflow as tf\n'), ((3378, 3468), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '[2, num_repeat * batch_size]', 'minval': '(0)', 'maxval': '(1 - crop_ratio)'}), '(shape=[2, num_repeat * batch_size], minval=0, maxval=1 -\n crop_ratio)\n', (3395, 3468), True, 'import tensorflow as tf\n'), ((3724, 3754), 'tensorflow.transpose', 'tf.transpose', (['[y1, x1, y2, x2]'], {}), '([y1, x1, y2, x2])\n', (3736, 3754), True, 'import tensorflow as tf\n'), ((3852, 3918), 'tensorflow.image.crop_and_resize', 'tf.image.crop_and_resize', (['sliced_tensor', 'boxes', 'box_ind', 'crop_size'], {}), '(sliced_tensor, boxes, box_ind, crop_size)\n', (3876, 3918), True, 'import tensorflow as tf\n'), ((4060, 4107), 'tensorflow.transpose', 'tf.transpose', (['resized_tensor'], {'perm': '[0, 3, 1, 2]'}), '(resized_tensor, perm=[0, 3, 1, 2])\n', (4072, 4107), True, 'import tensorflow as tf\n'), ((4712, 4762), 'tensorflow.pad', 'tf.pad', (['tensor_3d', '((0, pad_size), (0, 0), (0, 0))'], {}), '(tensor_3d, ((0, pad_size), (0, 0), (0, 0)))\n', (4718, 4762), True, 'import tensorflow as tf\n'), ((5187, 5256), 'tensorflow.slice', 'tf.slice', (['padded_tensor'], {'begin': 'begin_index', 'size': '[num_frames, -1, -1]'}), '(padded_tensor, begin=begin_index, size=[num_frames, -1, -1])\n', (5195, 5256), True, 'import tensorflow as tf\n'), ((5783, 5822), 'tensorflow.transpose', 'tf.transpose', (['tensor_3d'], {'perm': '[1, 2, 0]'}), '(tensor_3d, perm=[1, 2, 0])\n', (5795, 5822), True, 'import tensorflow as tf\n'), ((5835, 5901), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['transposed', '(new_row_count, new_col_count)'], {}), '(transposed, (new_row_count, new_col_count))\n', (5857, 5901), True, 'import tensorflow as tf\n'), ((5946, 5983), 'tensorflow.transpose', 'tf.transpose', (['resized'], {'perm': '[2, 0, 1]'}), '(resized, perm=[2, 0, 1])\n', (5958, 5983), True, 'import tensorflow as tf\n'), ((11147, 11173), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""."""'], {}), "('.')\n", (11168, 11173), True, 'import tensorflow as tf\n'), ((11446, 11472), 'tensorflow.random.uniform', 'tf.random.uniform', (['t_shape'], {}), '(t_shape)\n', (11463, 11472), True, 'import tensorflow as tf\n'), ((11767, 11808), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 't_shape'}), '(tf.float32, shape=t_shape)\n', (11781, 11808), True, 'import tensorflow as tf\n'), ((12250, 12291), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 't_shape'}), '(tf.float32, shape=t_shape)\n', (12264, 12291), True, 'import tensorflow as tf\n'), ((12812, 12853), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 't_shape'}), '(tf.float32, shape=t_shape)\n', (12826, 12853), True, 'import tensorflow as tf\n'), ((13346, 13372), 'dataset.AutoDLDataset', 'AutoDLDataset', (['dataset_dir'], {}), '(dataset_dir)\n', (13359, 13372), False, 'from dataset import AutoDLDataset\n'), ((13720, 13731), 'time.time', 'time.time', ([], {}), '()\n', (13729, 13731), False, 'import time\n'), ((14158, 14169), 'time.time', 'time.time', ([], {}), '()\n', (14167, 14169), False, 'import time\n'), ((14479, 14611), 'sys.path.append', 'sys.path.append', (['"""/Users/evariste/projects/autodl/codalab_competition_bundle/AutoDL_starting_kit/AutoDL_ingestion_program"""'], {}), "(\n '/Users/evariste/projects/autodl/codalab_competition_bundle/AutoDL_starting_kit/AutoDL_ingestion_program'\n )\n", (14494, 14611), False, 'import sys\n'), ((4925, 4988), 'tensorflow.random.uniform', 'tf.random.uniform', (['[1]'], {'minval': '(0)', 'maxval': 'maxval', 'dtype': 'tf.int32'}), '([1], minval=0, maxval=maxval, dtype=tf.int32)\n', (4942, 4988), True, 'import tensorflow as tf\n'), ((5115, 5167), 'tensorflow.stack', 'tf.stack', (['[begin_index[0], 0, 0]'], {'name': '"""begin_index"""'}), "([begin_index[0], 0, 0], name='begin_index')\n", (5123, 5167), True, 'import tensorflow as tf\n'), ((10674, 10769), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""datasets_num_private_threads: %s"""', 'datasets_num_private_threads'], {}), "('datasets_num_private_threads: %s',\n datasets_num_private_threads)\n", (10699, 10769), True, 'import tensorflow as tf\n'), ((11193, 11215), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (11213, 11215), True, 'import tensorflow as tf\n'), ((11241, 11253), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (11251, 11253), True, 'import tensorflow as tf\n'), ((12029, 12041), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (12039, 12041), True, 'import tensorflow as tf\n'), ((12068, 12097), 'numpy.random.rand', 'np.random.rand', (['(100)', '(224)', '(224)'], {}), '(100, 224, 224)\n', (12082, 12097), True, 'import numpy as np\n'), ((12439, 12451), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (12449, 12451), True, 'import tensorflow as tf\n'), ((12478, 12507), 'numpy.random.rand', 'np.random.rand', (['(100)', '(224)', '(224)'], {}), '(100, 224, 224)\n', (12492, 12507), True, 'import numpy as np\n'), ((2615, 2634), 'tensorflow.shape', 'tf.shape', (['tensor_3d'], {}), '(tensor_3d)\n', (2623, 2634), True, 'import tensorflow as tf\n'), ((4667, 4686), 'tensorflow.shape', 'tf.shape', (['tensor_3d'], {}), '(tensor_3d)\n', (4675, 4686), True, 'import tensorflow as tf\n'), ((12581, 12603), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (12601, 12603), True, 'import tensorflow as tf\n'), ((4863, 4886), 'tensorflow.shape', 'tf.shape', (['padded_tensor'], {}), '(padded_tensor)\n', (4871, 4886), True, 'import tensorflow as tf\n'), ((14391, 14413), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (14411, 14413), True, 'import tensorflow as tf\n')] |
from pyunicorn.timeseries import RecurrencePlot
import numpy as np
from statistics import median
import time
# Measure the times (in ms) of evaluating an expression n times
def measuretime(f, n, *args):
t = [0]*n
res = f(*args)
for n in range(n):
t0 = time.time()
f(*args)
t[n] = time.time() - t0
return(1000*np.array(t), res)
# Function that will be measured
def fun_rqa(v,metric):
# Attempt sparse RQA if metric is euclidean
metric_sup = (metric is "supremum")
rp = RecurrencePlot(v, metric=metric, sparse_rqa=metric_sup,
threshold=1.2, dim=3, tau=6)
rqa = rp.rqa_summary()
rqa["Lmax"] = rp.max_diaglength()
rqa["ENT"] = rp.diag_entropy()
rqa["TT"] = rp.trapping_time()
return(rqa)
# Analyse 12 series from 250 to 3000 points
# (With variable metric)
def benchmark(metric):
m = np.loadtxt("rossler.txt")
for r in range(12):
x = m[:250*(r+1), 2*r]
(tt, res) = measuretime(fun_rqa, 5, x, metric)
t = median(tt)
with open("benchmark_rqa_python_%s.txt"%metric, "a") as f:
f.write("%d\t%f\t"%(r,t))
for k in ["RR","DET","L","Lmax","ENT","LAM","TT"]:
f.write("%s\t"%(res[k]))
f.write("\n")
# Do it with max and euclidean norms
benchmark("euclidean")
benchmark("supremum")
| [
"statistics.median",
"numpy.array",
"pyunicorn.timeseries.RecurrencePlot",
"numpy.loadtxt",
"time.time"
] | [((522, 611), 'pyunicorn.timeseries.RecurrencePlot', 'RecurrencePlot', (['v'], {'metric': 'metric', 'sparse_rqa': 'metric_sup', 'threshold': '(1.2)', 'dim': '(3)', 'tau': '(6)'}), '(v, metric=metric, sparse_rqa=metric_sup, threshold=1.2, dim=\n 3, tau=6)\n', (536, 611), False, 'from pyunicorn.timeseries import RecurrencePlot\n'), ((864, 889), 'numpy.loadtxt', 'np.loadtxt', (['"""rossler.txt"""'], {}), "('rossler.txt')\n", (874, 889), True, 'import numpy as np\n'), ((273, 284), 'time.time', 'time.time', ([], {}), '()\n', (282, 284), False, 'import time\n'), ((1012, 1022), 'statistics.median', 'median', (['tt'], {}), '(tt)\n', (1018, 1022), False, 'from statistics import median\n'), ((317, 328), 'time.time', 'time.time', ([], {}), '()\n', (326, 328), False, 'import time\n'), ((350, 361), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (358, 361), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 23 13:37:10 2016
@author: kroboth
"""
import collections
import numpy.linalg as linalg
# TODO:
# * Catch actual exceptions instead of all of them
class EventLibrary(object):
"""
EventLibrary Maintain a list of events.
The class is used by the Sequence class to store events of an MRI
sequence defined using the Pulseq file format.
See http://pulseq.github.io/
Sequence Properties:
keys - A list of event IDs
data - A struct array with field 'array' to store data of varying
lengths, remaining compatible with codegen.
lengths - Corresponding lengths of the data arrays
type - Type to distinguish events in the same class (e.g.
trapezoids and arbitrary gradients)
Sequence Methods:
find - Find an event in the library
insert - Add a new event to the library
See also mr.Sequence
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
"""
def __init__(self):
self.keys = dict() # TODO: List may suffice
self.data = dict()
self.lengths = dict()
self.type = dict()
def find(self, data):
"""
Lookup a data structure in the given library. Returns the
index of the data in the library. If the data does not exist
in the library then the index for the next new entry is returned
See also insert mr.Sequence.addBlock
"""
# TODO: solve this better!
try:
data_length = len(data)
except:
try:
data_length = data.size
except:
data_length = 1
found = False
idx = None
for ind in self.keys:
if (self.lengths[ind] == data_length) and \
(type(self.data[ind]) == type(data)) and \
(linalg.norm(self.data[ind]-data) < 1e-6):
idx = self.keys[ind]
found = True
break
if not self.keys:
idx = 1
elif not found:
idx = max(self.keys.keys())+1
out = collections.namedtuple('find', ['id', 'found'])
return out(idx, found)
def insert(self, idx, data, ttype=None):
"""
Add event to library
See also find
"""
self.keys[idx] = idx
self.data[idx] = data
# TODO: solve this better!
try:
self.lengths[idx] = len(data)
except:
try:
self.lengths[idx] = data.size
except:
self.lengths[idx] = 1
if ttype is not None:
self.type[idx] = ttype
def get_ids_of_type(self, ttype):
"""
Return all IDs with a given type
"""
return [k for (k, v) in self.type.items() if v == ttype]
| [
"collections.namedtuple",
"numpy.linalg.norm"
] | [((2132, 2179), 'collections.namedtuple', 'collections.namedtuple', (['"""find"""', "['id', 'found']"], {}), "('find', ['id', 'found'])\n", (2154, 2179), False, 'import collections\n'), ((1874, 1908), 'numpy.linalg.norm', 'linalg.norm', (['(self.data[ind] - data)'], {}), '(self.data[ind] - data)\n', (1885, 1908), True, 'import numpy.linalg as linalg\n')] |
import numpy as np
import pandas as pd
from scipy import ndimage
from scipy.cluster import hierarchy
from scipy.spatial import distance_matrix
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_blobs
X1, y1 = make_blobs(n_samples=50, centers=[[4,4], [-2, -1], [1, 1], [10,4]], cluster_std=0.9)
plt.scatter(X1[:, 0], X1[:, 1], marker='o')
agglom = AgglomerativeClustering(n_clusters = 4, linkage = 'average')
agglom.fit(X1,y1)
# Create a figure of size 6 inches by 4 inches.
plt.figure(figsize=(6, 4))
# These two lines of code are used to scale the data points down,
# Or else the data points will be scattered very far apart.
# Create a minimum and maximum range of X1.
x_min, x_max = np.min(X1, axis=0), np.max(X1, axis=0)
# Get the average distance for X1.
X1 = (X1 - x_min) / (x_max - x_min)
# This loop displays all of the datapoints.
for i in range(X1.shape[0]):
# Replace the data points with their respective cluster value
# (ex. 0) and is color coded with a colormap (plt.cm.spectral)
plt.text(X1[i, 0], X1[i, 1], str(y1[i]),
color=plt.cm.nipy_spectral(agglom.labels_[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
# Remove the x ticks, y ticks, x and y axis
plt.xticks([])
plt.yticks([])
# plt.axis('off')
# Display the plot of the original data before clustering
plt.scatter(X1[:, 0], X1[:, 1], marker='.')
# Display the plot
plt.show()
dist_matrix = distance_matrix(X1,X1)
print(dist_matrix)
Z = hierarchy.linkage(dist_matrix, 'complete')
dendro = hierarchy.dendrogram(Z)
#wget -O cars_clus.csv https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%204/data/cars_clus.csv
filename = 'cars_clus.csv'
#Read csv
pdf = pd.read_csv(filename)
print ("Shape of dataset: ", pdf.shape)
pdf.head(5)
print ("Shape of dataset before cleaning: ", pdf.size)
pdf[[ 'sales', 'resale', 'type', 'price', 'engine_s',
'horsepow', 'wheelbas', 'width', 'length', 'curb_wgt', 'fuel_cap',
'mpg', 'lnsales']] = pdf[['sales', 'resale', 'type', 'price', 'engine_s',
'horsepow', 'wheelbas', 'width', 'length', 'curb_wgt', 'fuel_cap',
'mpg', 'lnsales']].apply(pd.to_numeric, errors='coerce')
pdf = pdf.dropna()
pdf = pdf.reset_index(drop=True)
print ("Shape of dataset after cleaning: ", pdf.size)
pdf.head(5)
featureset = pdf[['engine_s', 'horsepow', 'wheelbas', 'width', 'length', 'curb_wgt', 'fuel_cap', 'mpg']]
from sklearn.preprocessing import MinMaxScaler
x = featureset.values #returns a numpy array
min_max_scaler = MinMaxScaler()
feature_mtx = min_max_scaler.fit_transform(x)
feature_mtx [0:5]
import scipy
leng = feature_mtx.shape[0]
D = scipy.zeros([leng,leng])
for i in range(leng):
for j in range(leng):
D[i,j] = scipy.spatial.distance.euclidean(feature_mtx[i], feature_mtx[j])
import pylab
import scipy.cluster.hierarchy
Z = hierarchy.linkage(D, 'complete')
from scipy.cluster.hierarchy import fcluster
max_d = 3
clusters = fcluster(Z, max_d, criterion='distance')
clusters
from scipy.cluster.hierarchy import fcluster
k = 5
clusters = fcluster(Z, k, criterion='maxclust')
clusters
fig = pylab.figure(figsize=(18, 50))
def llf(id):
return '[%s %s %s]' % (pdf['manufact'][id], pdf['model'][id], int(float(pdf['type'][id])))
dendro = hierarchy.dendrogram(Z, leaf_label_func=llf, leaf_rotation=0, leaf_font_size=12, orientation='right')
dist_matrix = distance_matrix(feature_mtx,feature_mtx)
print(dist_matrix)
agglom = AgglomerativeClustering(n_clusters = 6, linkage = 'complete')
agglom.fit(feature_mtx)
agglom.labels_
pdf['cluster_'] = agglom.labels_
pdf.head()
import matplotlib.cm as cm
n_clusters = max(agglom.labels_)+1
colors = cm.rainbow(np.linspace(0, 1, n_clusters))
cluster_labels = list(range(0, n_clusters))
# Create a figure of size 6 inches by 4 inches.
plt.figure(figsize=(16,14))
for color, label in zip(colors, cluster_labels):
subset = pdf[pdf.cluster_ == label]
for i in subset.index:
plt.text(subset.horsepow[i], subset.mpg[i],str(subset['model'][i]), rotation=25)
plt.scatter(subset.horsepow, subset.mpg, s= subset.price*10, c=color, label='cluster'+str(label),alpha=0.5)
# plt.scatter(subset.horsepow, subset.mpg)
plt.legend()
plt.title('Clusters')
plt.xlabel('horsepow')
plt.ylabel('mpg')
pdf.groupby(['cluster_','type'])['cluster_'].count()
agg_cars = pdf.groupby(['cluster_','type'])['horsepow','engine_s','mpg','price'].mean()
agg_cars
plt.figure(figsize=(16,10))
for color, label in zip(colors, cluster_labels):
subset = agg_cars.loc[(label,),]
for i in subset.index:
plt.text(subset.loc[i][0]+5, subset.loc[i][2], 'type='+str(int(i)) + ', price='+str(int(subset.loc[i][3]))+'k')
plt.scatter(subset.horsepow, subset.mpg, s=subset.price*20, c=color, label='cluster'+str(label))
plt.legend()
plt.title('Clusters')
plt.xlabel('horsepow')
plt.ylabel('mpg')
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.cm.nipy_spectral",
"scipy.cluster.hierarchy.fcluster",
"sklearn.cluster.AgglomerativeClustering",
"matplotlib.pyplot.xlabel",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"scipy.cluster.hierarchy.linkage",
"matplotli... | [((339, 429), 'sklearn.datasets.samples_generator.make_blobs', 'make_blobs', ([], {'n_samples': '(50)', 'centers': '[[4, 4], [-2, -1], [1, 1], [10, 4]]', 'cluster_std': '(0.9)'}), '(n_samples=50, centers=[[4, 4], [-2, -1], [1, 1], [10, 4]],\n cluster_std=0.9)\n', (349, 429), False, 'from sklearn.datasets.samples_generator import make_blobs\n'), ((425, 468), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X1[:, 0]', 'X1[:, 1]'], {'marker': '"""o"""'}), "(X1[:, 0], X1[:, 1], marker='o')\n", (436, 468), True, 'from matplotlib import pyplot as plt\n'), ((479, 535), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': '(4)', 'linkage': '"""average"""'}), "(n_clusters=4, linkage='average')\n", (502, 535), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((608, 634), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (618, 634), True, 'from matplotlib import pyplot as plt\n'), ((1349, 1363), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1359, 1363), True, 'from matplotlib import pyplot as plt\n'), ((1364, 1378), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1374, 1378), True, 'from matplotlib import pyplot as plt\n'), ((1457, 1500), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X1[:, 0]', 'X1[:, 1]'], {'marker': '"""."""'}), "(X1[:, 0], X1[:, 1], marker='.')\n", (1468, 1500), True, 'from matplotlib import pyplot as plt\n'), ((1520, 1530), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1528, 1530), True, 'from matplotlib import pyplot as plt\n'), ((1546, 1569), 'scipy.spatial.distance_matrix', 'distance_matrix', (['X1', 'X1'], {}), '(X1, X1)\n', (1561, 1569), False, 'from scipy.spatial import distance_matrix\n'), ((1593, 1635), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['dist_matrix', '"""complete"""'], {}), "(dist_matrix, 'complete')\n", (1610, 1635), False, 'from scipy.cluster import hierarchy\n'), ((1646, 1669), 'scipy.cluster.hierarchy.dendrogram', 'hierarchy.dendrogram', (['Z'], {}), '(Z)\n', (1666, 1669), False, 'from scipy.cluster import hierarchy\n'), ((1890, 1911), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (1901, 1911), True, 'import pandas as pd\n'), ((2703, 2717), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (2715, 2717), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2828, 2853), 'scipy.zeros', 'scipy.zeros', (['[leng, leng]'], {}), '([leng, leng])\n', (2839, 2853), False, 'import scipy\n'), ((3032, 3064), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['D', '"""complete"""'], {}), "(D, 'complete')\n", (3049, 3064), False, 'from scipy.cluster import hierarchy\n'), ((3132, 3172), 'scipy.cluster.hierarchy.fcluster', 'fcluster', (['Z', 'max_d'], {'criterion': '"""distance"""'}), "(Z, max_d, criterion='distance')\n", (3140, 3172), False, 'from scipy.cluster.hierarchy import fcluster\n'), ((3245, 3281), 'scipy.cluster.hierarchy.fcluster', 'fcluster', (['Z', 'k'], {'criterion': '"""maxclust"""'}), "(Z, k, criterion='maxclust')\n", (3253, 3281), False, 'from scipy.cluster.hierarchy import fcluster\n'), ((3298, 3328), 'pylab.figure', 'pylab.figure', ([], {'figsize': '(18, 50)'}), '(figsize=(18, 50))\n', (3310, 3328), False, 'import pylab\n'), ((3450, 3555), 'scipy.cluster.hierarchy.dendrogram', 'hierarchy.dendrogram', (['Z'], {'leaf_label_func': 'llf', 'leaf_rotation': '(0)', 'leaf_font_size': '(12)', 'orientation': '"""right"""'}), "(Z, leaf_label_func=llf, leaf_rotation=0,\n leaf_font_size=12, orientation='right')\n", (3470, 3555), False, 'from scipy.cluster import hierarchy\n'), ((3567, 3608), 'scipy.spatial.distance_matrix', 'distance_matrix', (['feature_mtx', 'feature_mtx'], {}), '(feature_mtx, feature_mtx)\n', (3582, 3608), False, 'from scipy.spatial import distance_matrix\n'), ((3637, 3694), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': '(6)', 'linkage': '"""complete"""'}), "(n_clusters=6, linkage='complete')\n", (3660, 3694), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((3990, 4018), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 14)'}), '(figsize=(16, 14))\n', (4000, 4018), True, 'from matplotlib import pyplot as plt\n'), ((4386, 4398), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4396, 4398), True, 'from matplotlib import pyplot as plt\n'), ((4399, 4420), 'matplotlib.pyplot.title', 'plt.title', (['"""Clusters"""'], {}), "('Clusters')\n", (4408, 4420), True, 'from matplotlib import pyplot as plt\n'), ((4421, 4443), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""horsepow"""'], {}), "('horsepow')\n", (4431, 4443), True, 'from matplotlib import pyplot as plt\n'), ((4444, 4461), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mpg"""'], {}), "('mpg')\n", (4454, 4461), True, 'from matplotlib import pyplot as plt\n'), ((4615, 4643), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 10)'}), '(figsize=(16, 10))\n', (4625, 4643), True, 'from matplotlib import pyplot as plt\n'), ((4977, 4989), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4987, 4989), True, 'from matplotlib import pyplot as plt\n'), ((4990, 5011), 'matplotlib.pyplot.title', 'plt.title', (['"""Clusters"""'], {}), "('Clusters')\n", (4999, 5011), True, 'from matplotlib import pyplot as plt\n'), ((5012, 5034), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""horsepow"""'], {}), "('horsepow')\n", (5022, 5034), True, 'from matplotlib import pyplot as plt\n'), ((5035, 5052), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mpg"""'], {}), "('mpg')\n", (5045, 5052), True, 'from matplotlib import pyplot as plt\n'), ((822, 840), 'numpy.min', 'np.min', (['X1'], {'axis': '(0)'}), '(X1, axis=0)\n', (828, 840), True, 'import numpy as np\n'), ((842, 860), 'numpy.max', 'np.max', (['X1'], {'axis': '(0)'}), '(X1, axis=0)\n', (848, 860), True, 'import numpy as np\n'), ((3866, 3895), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n_clusters'], {}), '(0, 1, n_clusters)\n', (3877, 3895), True, 'import numpy as np\n'), ((2918, 2982), 'scipy.spatial.distance.euclidean', 'scipy.spatial.distance.euclidean', (['feature_mtx[i]', 'feature_mtx[j]'], {}), '(feature_mtx[i], feature_mtx[j])\n', (2950, 2982), False, 'import scipy\n'), ((1204, 1250), 'matplotlib.pyplot.cm.nipy_spectral', 'plt.cm.nipy_spectral', (['(agglom.labels_[i] / 10.0)'], {}), '(agglom.labels_[i] / 10.0)\n', (1224, 1250), True, 'from matplotlib import pyplot as plt\n')] |
import cv2
import numpy as np
def main():
#window_name="Cam feed"
#cv2.namedWindow(window_name)
cap=cv2.VideoCapture(0)
#filename = 'F:\sample.avi'
#codec=cv2.VideoWriter_fourcc('X','V','I','D')
#framerate=30
#resolution = (500,500)
# VideoFileOutput = cv2.VideoWriter(filename,codec,framerate,resolution)
if cap.isOpened():
ret,frame = cap.read()
else:
ret =False
ret,frame1 = cap.read()
ret,frame2 = cap.read()
while ret:
ret,frame = cap.read()
#VideoFileOutput.write(frame)
d=cv2.absdiff(frame1,frame2)
grey=cv2.cvtColor(d,cv2.COLOR_BGR2GRAY)
blur =cv2.GaussianBlur(grey,(5,5),0)
ret,th=cv2.threshold(blur,20,255,cv2.THRESH_BINARY)
dilated=cv2.dilate(th,np.ones((3,3),np.uint8),iterations=3)
img,c,h=cv2.findContours(dilated,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(frame1,c,-1,(0,255,0),2)
#cv2.imshow("win1",frame2)
cv2.imshow("inter",frame1)
if cv2.waitKey(40) == 27:
break
frame1 = frame2
ret,frame2= cap.read()
cv2.destroyAllWindows()
#VideoFileOutput.release()
cap.release()
main()
| [
"cv2.drawContours",
"numpy.ones",
"cv2.threshold",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.findContours",
"cv2.GaussianBlur",
"cv2.waitKey",
"cv2.absdiff"
] | [((114, 133), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (130, 133), False, 'import cv2\n'), ((1162, 1185), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1183, 1185), False, 'import cv2\n'), ((594, 621), 'cv2.absdiff', 'cv2.absdiff', (['frame1', 'frame2'], {}), '(frame1, frame2)\n', (605, 621), False, 'import cv2\n'), ((635, 670), 'cv2.cvtColor', 'cv2.cvtColor', (['d', 'cv2.COLOR_BGR2GRAY'], {}), '(d, cv2.COLOR_BGR2GRAY)\n', (647, 670), False, 'import cv2\n'), ((685, 718), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['grey', '(5, 5)', '(0)'], {}), '(grey, (5, 5), 0)\n', (701, 718), False, 'import cv2\n'), ((730, 777), 'cv2.threshold', 'cv2.threshold', (['blur', '(20)', '(255)', 'cv2.THRESH_BINARY'], {}), '(blur, 20, 255, cv2.THRESH_BINARY)\n', (743, 777), False, 'import cv2\n'), ((857, 922), 'cv2.findContours', 'cv2.findContours', (['dilated', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (873, 922), False, 'import cv2\n'), ((936, 983), 'cv2.drawContours', 'cv2.drawContours', (['frame1', 'c', '(-1)', '(0, 255, 0)', '(2)'], {}), '(frame1, c, -1, (0, 255, 0), 2)\n', (952, 983), False, 'import cv2\n'), ((1021, 1048), 'cv2.imshow', 'cv2.imshow', (['"""inter"""', 'frame1'], {}), "('inter', frame1)\n", (1031, 1048), False, 'import cv2\n'), ((804, 829), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (811, 829), True, 'import numpy as np\n'), ((1066, 1081), 'cv2.waitKey', 'cv2.waitKey', (['(40)'], {}), '(40)\n', (1077, 1081), False, 'import cv2\n')] |
""" Run script to parse a vietnamese text
- set variables DEBUG, PLOT, TEST based on your use case.
- set variable file to the filename you want to analyze
- Run:
- Will create a report of distribution of sounds
- Will plot if you set PLOT=True
"""
import numpy as np
import time
import os
import matplotlib.pyplot as plt
from datetime import date
today = date.today().strftime("%b-%d-%Y")
DEBUG = True # Print more information
PLOT = True # Plot data
TEST = True # Use test case instead of file
testcase = ['nguyễn, nhanh, giường',
'nghiêng, hoàng']
idt = ' ' * 2 # Set indent level
# Vowel list: Index = 6 * vowel + accent
vowels = ['e', 'ẹ', 'ẻ', 'ẽ', 'è', 'é',
'ê', 'ệ', 'ể', 'ễ', 'ề', 'ế',
'a', 'ạ', 'ả', 'ã', 'à', 'á',
'ă', 'ặ', 'ẳ', 'ẵ', 'ằ', 'ắ',
'â', 'ậ', 'ẩ', 'ẫ', 'ầ', 'ấ',
'i', 'ị', 'ỉ', 'ĩ', 'ì', 'í',
'o', 'ọ', 'ỏ', 'õ', 'ò', 'ó',
'ô', 'ộ', 'ổ', 'ỗ', 'ồ', 'ố',
'ơ', 'ợ', 'ở', 'ỡ', 'ờ', 'ớ',
'u', 'ụ', 'ủ', 'ũ', 'ù', 'ú',
'ư', 'ự', 'ử', 'ữ', 'ừ', 'ứ',
'y', 'ỵ', 'ỷ', 'ỹ', 'ỳ', 'ý']
# Digraph list.
digraph = ['ch', 'gh', 'gi', 'kh', 'nh',
'ng', 'ph', 'th', 'tr', 'qu']
# Everything else, log counts into dictionaries (key=character, value=count)
consonants = {}
Nchar = 0 # Keeps track of total character count
# initialize counts into numpy matrix for easy data processing later.
vowel_cnt = np.zeros((int(len(vowels) / 6), 6))
vowel_cnt_reduced = np.sum(vowel_cnt, axis=1)
digraph_cnt = np.zeros(len(digraph))
file = 'anh_hung_xa_dieu_chuong1.txt' # long text
#file = 'test1.txt' # Short text, uncomment this for small test
print('=' * 79)
print('\n Running program! Put program name here \n')
print('=' * 79)
if os.path.exists(file):
fsize = os.path.getsize(file)
print('Parse text:')
print(idt + 'Parsing text file: %s (%.3f MB)' % (file, fsize / 1e6))
with open(file, encoding="utf8") as f:
tic = time.time()
# Open text file as utf-8 (unicode) and break down into words
if TEST:
# Substitute test case if testing
f = testcase
for line in f:
# convert to lower case
line = line.strip()
line = line.lower()
# Split to words
words = line.split()
# Only keep alpha-numeric characters
words = [''.join(filter(str.isalnum, w)) for w in words]
# Parse words
for w in words:
if DEBUG:
print('-' * 79)
print('Parsing word "%s"' % w)
# Parse digraphs first
if len(w) > 2:
# Check if starts with digraph
if w[:2] in digraph:
digraph_cnt[digraph.index(w[:2])] += 1
if DEBUG:
print('Found beginning digraph %s' % w[:2])
Nchar += 1
# Chop off the beginning digraph
if w[:3] in ['ngh']:
# h is part of ng
w = w[3:]
else:
w = w[2:]
# Check if ends with digraph
if len(w) >= 2:
if w[-2:] in digraph:
digraph_cnt[digraph.index(w[-2:])] += 1
if DEBUG:
print('Found ending digraph %s' % w[-2:])
Nchar += 1
# Chop off ending digraph
w = w[:-2]
# Parse remaining letters
for char in w:
Nchar += 1
if char in vowels:
# Get index of vowel, row & column
idx_char = vowels.index(char)
idx, idy = int(idx_char / 6), idx_char % 6
vowel_cnt[idx, idy] += 1
if DEBUG:
print('vowel %s' % vowels[6 * idx])
else:
if DEBUG:
print('consonant %s' % char)
if char in consonants:
consonants[char] += 1
else:
consonants[char] = 1
toc = time.time()
print(idt + 'Finished parsing %d characters in %.4f s' %
(Nchar, toc - tic))
# Create reduced vowel report by flatting 1 dimension of the matrix
vowel_cnt_reduced = np.sum(vowel_cnt, axis=1)
# Print report
print('-' * 79)
print('Reporting vowels (% of total sounds)')
print('(digraphs like %s, etc. count as 1 sound)' % digraph[0])
for ii, cnt in enumerate(vowel_cnt_reduced):
print(idt + '%s : %.1f %% (count = %d)' % (vowels[ii * 6],
cnt / Nchar * 100,
cnt))
print('Reporting digraphs & consonants:')
for ii, cnt in enumerate(digraph_cnt):
print(idt + '%s : %.1f %% (count = %d)' % (digraph[ii],
cnt / Nchar * 100,
cnt))
for k, v in consonants.items():
print(idt + '%s : %.1f %% (count = %d)' % (k,
v / Nchar * 100,
v))
if PLOT:
# Compile labels (characters)
vowel_raw = [vowels[6 * ii] for ii in range(len(vowel_cnt_reduced))]
labels = []
labels += vowel_raw
labels += digraph
labels += list(consonants.keys())
# Compile counts
cnts = list(vowel_cnt_reduced) + list(digraph_cnt) +\
list(consonants.values())
percent = np.array(cnts) / Nchar * 100.0
# Sort data
percent, cnts, labels = (list(t) for t in zip(*sorted(zip(percent,
cnts,
labels),
reverse=True)))
# Plot bar graph, sort by frequency
bar_width = 0.5
ind = np.arange(len(cnts))
plt.figure(figsize=(13, 6))
plt.bar(ind, percent, bar_width, label='distribution')
plt.title('File analyzed: ' + file)
plt.xticks(ind, labels, fontsize=7)
plt.legend()
plt.grid(alpha=0.3)
plt.ylabel('% of sounds')
plt.xlabel('sound')
mdata = {'today': today,
'Num Sounds': Nchar}
mdata_str = ['%s: %s' % (k, v) for k, v in mdata.items()]
mdata_str = '\n'.join(mdata_str)
ax = plt.gca()
# Add metadatabox
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax.text(0.7, 0.6, mdata_str, transform=ax.transAxes, fontsize=10,
verticalalignment='top', bbox=props)
# Plot 2-d distribution of vowels
# Create figure
fig = plt.figure(figsize=(9, 9))
ax = fig.add_subplot(111, projection='3d')
percent_vowels = vowel_cnt / np.sum(vowel_cnt_reduced) * 100.0
# Set up axis labels
accents = ['none', '.', '?', '~', '`', '´']
colors = ['y', 'r', 'b', 'g', 'c', 'k']
# Gather some data
dx, dy = percent_vowels.shape
yticks = np.arange(dy)
yticks = yticks[::-1]
# For each layer of bar graph, set a color
for idy, c, k in zip(np.arange(dy), colors, yticks):
xs = np.arange(dx) # x-axis length
ys = percent_vowels[:, idy] # vowels plotted against x axis
cs = [c] * len(xs) # Color of given layer
# Plot the bar graph given by xs and ys on the plane y=k
ax.bar(xs, ys, zs=k, zdir='y', color=cs, alpha=0.8)
# Label your axis
ax.set_xlabel('vowel')
ax.set_ylabel('accent')
ax.set_zlabel('percent of vowels')
# label accent/vowels on axis
ax.set_xticks(np.arange(dx))
ax.set_yticks(np.arange(dy))
ax.set_yticklabels(accents, fontsize=20)
ax.set_xticklabels(vowel_raw, fontsize=12)
ax.set_title('File analyzed: %s' % file)
ax.grid(alpha=0.3)
else:
print('!' * 79)
print('!' * 25 + ' ' * 10 + 'ERROR' + ' ' * 10 + '!' * 29)
print("file %s does not exist" % file)
print()
| [
"os.path.exists",
"os.path.getsize",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"numpy.sum",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"numpy.array",
"matplotlib.pyplot.titl... | [((1564, 1589), 'numpy.sum', 'np.sum', (['vowel_cnt'], {'axis': '(1)'}), '(vowel_cnt, axis=1)\n', (1570, 1589), True, 'import numpy as np\n'), ((1843, 1863), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (1857, 1863), False, 'import os\n'), ((1878, 1899), 'os.path.getsize', 'os.path.getsize', (['file'], {}), '(file)\n', (1893, 1899), False, 'import os\n'), ((381, 393), 'datetime.date.today', 'date.today', ([], {}), '()\n', (391, 393), False, 'from datetime import date\n'), ((2059, 2070), 'time.time', 'time.time', ([], {}), '()\n', (2068, 2070), False, 'import time\n'), ((4597, 4608), 'time.time', 'time.time', ([], {}), '()\n', (4606, 4608), False, 'import time\n'), ((4816, 4841), 'numpy.sum', 'np.sum', (['vowel_cnt'], {'axis': '(1)'}), '(vowel_cnt, axis=1)\n', (4822, 4841), True, 'import numpy as np\n'), ((6692, 6719), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(13, 6)'}), '(figsize=(13, 6))\n', (6702, 6719), True, 'import matplotlib.pyplot as plt\n'), ((6729, 6783), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', 'percent', 'bar_width'], {'label': '"""distribution"""'}), "(ind, percent, bar_width, label='distribution')\n", (6736, 6783), True, 'import matplotlib.pyplot as plt\n'), ((6793, 6828), 'matplotlib.pyplot.title', 'plt.title', (["('File analyzed: ' + file)"], {}), "('File analyzed: ' + file)\n", (6802, 6828), True, 'import matplotlib.pyplot as plt\n'), ((6838, 6873), 'matplotlib.pyplot.xticks', 'plt.xticks', (['ind', 'labels'], {'fontsize': '(7)'}), '(ind, labels, fontsize=7)\n', (6848, 6873), True, 'import matplotlib.pyplot as plt\n'), ((6883, 6895), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6893, 6895), True, 'import matplotlib.pyplot as plt\n'), ((6905, 6924), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'alpha': '(0.3)'}), '(alpha=0.3)\n', (6913, 6924), True, 'import matplotlib.pyplot as plt\n'), ((6934, 6959), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""% of sounds"""'], {}), "('% of sounds')\n", (6944, 6959), True, 'import matplotlib.pyplot as plt\n'), ((6969, 6988), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""sound"""'], {}), "('sound')\n", (6979, 6988), True, 'import matplotlib.pyplot as plt\n'), ((7185, 7194), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7192, 7194), True, 'import matplotlib.pyplot as plt\n'), ((7506, 7532), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 9)'}), '(figsize=(9, 9))\n', (7516, 7532), True, 'import matplotlib.pyplot as plt\n'), ((7874, 7887), 'numpy.arange', 'np.arange', (['dy'], {}), '(dy)\n', (7883, 7887), True, 'import numpy as np\n'), ((8001, 8014), 'numpy.arange', 'np.arange', (['dy'], {}), '(dy)\n', (8010, 8014), True, 'import numpy as np\n'), ((8051, 8064), 'numpy.arange', 'np.arange', (['dx'], {}), '(dx)\n', (8060, 8064), True, 'import numpy as np\n'), ((8545, 8558), 'numpy.arange', 'np.arange', (['dx'], {}), '(dx)\n', (8554, 8558), True, 'import numpy as np\n'), ((8583, 8596), 'numpy.arange', 'np.arange', (['dy'], {}), '(dy)\n', (8592, 8596), True, 'import numpy as np\n'), ((6221, 6235), 'numpy.array', 'np.array', (['cnts'], {}), '(cnts)\n', (6229, 6235), True, 'import numpy as np\n'), ((7623, 7648), 'numpy.sum', 'np.sum', (['vowel_cnt_reduced'], {}), '(vowel_cnt_reduced)\n', (7629, 7648), True, 'import numpy as np\n')] |
import os
import numpy
import cv2
import random
import colorsys
from Controller.nuclick.nuclick import gen_mask
nuclei_annotation_data_root = "static/data/nuclei_annotation_data/"
color = [[0, 128, 0, 0], [255, 0, 209, 128], [0, 255, 255, 128], [0, 0, 255, 128], [0, 0, 255, 128],
[255, 191, 0, 128], [0, 0, 0, 128], [0, 0, 0, 0]]
def get_n_hls_colors(num):
hls_colors = []
i = 0
step = 360.0 / num
while i < 360:
h = i
s = 90 + random.random() * 10
l = 50 + random.random() * 10
_hlsc = [h / 360.0, l / 100.0, s / 100.0]
hls_colors.append(_hlsc)
i += step
return hls_colors
def ncolors(num):
rgb_colors = []
if num < 1:
return rgb_colors
hls_colors = get_n_hls_colors(num)
for hlsc in hls_colors:
_r, _g, _b = colorsys.hls_to_rgb(hlsc[0], hlsc[1], hlsc[2])
r, g, b = [int(x * 255.0) for x in (_r, _g, _b)]
rgb_colors.append([r, g, b, 255])
return rgb_colors
def point_2_boundary(region_inform):
annotator_id = region_inform["annotator_id"]
annotation_project = region_inform["annotation_project"]
slide_uuid = region_inform["slide_uuid"]
region_id = region_inform["region_id"]
annotation_root_folder = nuclei_annotation_data_root + annotation_project + '/' + slide_uuid + '/'
points_file_name = annotation_root_folder + 'a' + str(annotator_id) + '_r' + \
str(region_id) + '_points' + '.txt'
grades_file_name = annotation_root_folder + 'a' + str(annotator_id) + '_r' + \
str(region_id) + '_grades' + '.txt'
region_image_file_name = annotation_root_folder + 'r' + str(region_id) + '.png'
points_file = open(points_file_name).readlines()
grades_file = open(grades_file_name).readlines()
points = []
grades = []
for item in points_file:
points.append([int(item.split(' ')[0]), int(item.split(' ')[1])])
print(points)
for item in grades_file:
grades.append(int(item))
region_image_file = cv2.imread(region_image_file_name)
if len(grades) > 0:
dot = numpy.array(points)
result = gen_mask(dot, region_image_file)
ret, binary = cv2.threshold(result, 0, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(cv2.convertScaleAbs(binary), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
result = cv2.drawContours(result, contours, -1, 255, 1)
result = result.astype(numpy.int16)
result[result == 255] = -2
result += 1
else:
result = numpy.zeros(region_image_file.shape[:2])
result += 1
boundary_file_name = annotation_root_folder + 'a' + str(annotator_id) + '_r' + \
str(region_id) + '_boundary' + '.txt'
numpy.savetxt(boundary_file_name, result, fmt='%d', delimiter=",")
grades.insert(0, 0)
grades.insert(0, 0)
grades = numpy.array(grades, dtype=numpy.int16)
annotation_file_name = annotation_root_folder + 'a' + str(annotator_id) + '_r' + \
str(region_id) + '_annotation' + '.txt'
numpy.savetxt(annotation_file_name, grades, fmt='%d', delimiter=",")
def boundary_2_mask(region_inform):
annotator_id = region_inform["annotator_id"]
annotation_project = region_inform["annotation_project"]
slide_uuid = region_inform["slide_uuid"]
region_id = region_inform["region_id"]
annotation_root_folder = nuclei_annotation_data_root + annotation_project + '/' + slide_uuid + '/'
boundary_file_name = annotation_root_folder + 'a' + str(annotator_id) + '_r' + \
str(region_id) + '_boundary' + '.txt'
annotation_file_name = annotation_root_folder + 'a' + str(annotator_id) + '_r' + \
str(region_id) + '_annotation' + '.txt'
if not os.path.exists(boundary_file_name):
point_2_boundary(region_inform)
boundary_file = numpy.loadtxt(boundary_file_name, dtype=numpy.int16, delimiter=',')
annotation_file = numpy.loadtxt(annotation_file_name, dtype=numpy.int16, delimiter=',')
mask = numpy.zeros([512, 512, 4])
for i in range(len(annotation_file)):
mask[boundary_file == i] = color[annotation_file[i]]
mask[boundary_file == -1] = [0, 255, 0, 255]
mask[:, -3:] = [255, 0, 0, 255]
mask[-3:, :] = [255, 0, 0, 255]
mask[:3, :] = [255, 0, 0, 255]
mask[:, :3] = [255, 0, 0, 255]
mask_file_name = annotation_root_folder + 'a' + str(annotator_id) + '_r' + \
str(region_id) + '_mask' + '.png'
cv2.imwrite(mask_file_name, mask)
return mask_file_name
def update_grade(region_inform, data):
annotator_id = region_inform["annotator_id"]
annotation_project = region_inform["annotation_project"]
slide_uuid = region_inform["slide_uuid"]
region_id = region_inform["region_id"]
annotation_root_folder = nuclei_annotation_data_root + annotation_project + '/' + slide_uuid + '/'
points_file_name = annotation_root_folder + 'a' + str(annotator_id) + '_r' + \
str(region_id) + '_points' + '.txt'
grades_file_name = annotation_root_folder + 'a' + str(annotator_id) + '_r' + \
str(region_id) + '_grades' + '.txt'
boundary_file_name = annotation_root_folder + 'a' + str(annotator_id) + '_r' + \
str(region_id) + '_boundary' + '.txt'
if not os.path.exists(boundary_file_name):
point_2_boundary(region_inform)
boundary_file = numpy.loadtxt(boundary_file_name, dtype=numpy.int16, delimiter=',')
boundary_file += len(data['grade'])
boundary_file[boundary_file == len(data['grade']) - 1] -= len(data['grade'])
boundary_file[boundary_file == len(data['grade']) + 1] -= len(data['grade'])
points_file = open(points_file_name, 'w')
grades_file = open(grades_file_name, 'w')
for i in range(len(data['grade'])):
nuclei_id = int(boundary_file[int(data['points_y'][i]), int(data['points_x'][i])])
if nuclei_id == -1:
try:
nuclei_id = int(boundary_file[int(data['points_y'][i]), int(data['points_x'][i]) - 1])
except:
pass
if nuclei_id == 1:
try:
nuclei_id = int(boundary_file[int(data['points_y'][i]), int(data['points_x'][i]) + 1])
except:
pass
if nuclei_id != 1 and nuclei_id != -1:
boundary_file[boundary_file == nuclei_id] = i + 2
if nuclei_id < len(data['grade']):
data['grade'][nuclei_id - 2] = 0
if data['grade'][i] == 0:
boundary_file[boundary_file == i + 2] = 0
# if nuclei_id != i + 2 and nuclei_id != 1:
# if nuclei_id != -1:
# try:
# data['grade'][nuclei_id - 2] = data['grade'][i]
# if int(data['grade'][i]) == 0:
# boundary_file[boundary_file == nuclei_id] = 0
# except:
# print("------------- error: " + nuclei_id + "++++++++++++")
# data['grade'][i] = 0
current_nuclei_id = 0
for i in range(len(data['grade'])):
try:
if int(data['grade'][i]) != 0:
points_file.write(str(data['points_x'][i]) + ' ' + str(data['points_y'][i]) + '\n')
grades_file.write(str(data['grade'][i]) + '\n')
old_nuclei_id = boundary_file[int(data['points_y'][i]), int(data['points_x'][i])]
current_nuclei_id += 1
if old_nuclei_id > 0:
boundary_file[boundary_file == old_nuclei_id] = current_nuclei_id
except:
pass
numpy.savetxt(boundary_file_name, boundary_file, fmt='%d', delimiter=",")
grades_file.close()
points_file.close()
def boundary_2_point(region_inform):
annotator_id = region_inform["annotator_id"]
annotation_project = region_inform["annotation_project"]
slide_uuid = region_inform["slide_uuid"]
region_id = region_inform["region_id"]
annotation_root_folder = nuclei_annotation_data_root + annotation_project + '/' + slide_uuid + '/'
points_file_name = annotation_root_folder + 'a' + str(annotator_id) + '_r' + \
str(region_id) + '_points' + '.txt'
grades_file_name = annotation_root_folder + 'a' + str(annotator_id) + '_r' + \
str(region_id) + '_grades' + '.txt'
boundary_file_name = annotation_root_folder + 'a' + str(annotator_id) + '_r' + \
str(region_id) + '_boundary' + '.txt'
annotation_file_name = annotation_root_folder + 'a' + str(annotator_id) + '_r' + \
str(region_id) + '_annotation' + '.txt'
boundary_file = numpy.loadtxt(boundary_file_name, dtype=numpy.int16, delimiter=',')
annotation_file = numpy.loadtxt(annotation_file_name, dtype=numpy.int16, delimiter=',')
points_file = open(points_file_name, 'w')
grades_file = open(grades_file_name, 'w')
for i in range(numpy.max(boundary_file)):
if i == 0 or i == 1 or annotation_file[i] == 0 or annotation_file[i] > 6:
continue
temp = numpy.argwhere(boundary_file == i)
if temp.size == 0:
continue
x = temp[:, 1]
y = temp[:, 0]
cx = int(numpy.mean(x))
cy = int(numpy.mean(y))
if boundary_file[cy, cx] == i:
points_file.write(str(cx) + ' ' + str(cy) + '\n')
grades_file.write(str(annotation_file[i]) + '\n')
else:
cy = y[len(y) // 2]
cx = x[len(x) // 2]
points_file.write(str(cx) + ' ' + str(cy) + '\n')
grades_file.write(str(annotation_file[i]) + '\n')
grades_file.close()
points_file.close()
| [
"cv2.imwrite",
"os.path.exists",
"numpy.mean",
"cv2.drawContours",
"cv2.convertScaleAbs",
"cv2.threshold",
"colorsys.hls_to_rgb",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.argwhere",
"random.random",
"Controller.nuclick.nuclick.gen_mask",
"numpy.savetxt",
"numpy.loadtxt",
"cv2.... | [((2055, 2089), 'cv2.imread', 'cv2.imread', (['region_image_file_name'], {}), '(region_image_file_name)\n', (2065, 2089), False, 'import cv2\n'), ((2790, 2856), 'numpy.savetxt', 'numpy.savetxt', (['boundary_file_name', 'result'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "(boundary_file_name, result, fmt='%d', delimiter=',')\n", (2803, 2856), False, 'import numpy\n'), ((2919, 2957), 'numpy.array', 'numpy.array', (['grades'], {'dtype': 'numpy.int16'}), '(grades, dtype=numpy.int16)\n', (2930, 2957), False, 'import numpy\n'), ((3116, 3184), 'numpy.savetxt', 'numpy.savetxt', (['annotation_file_name', 'grades'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "(annotation_file_name, grades, fmt='%d', delimiter=',')\n", (3129, 3184), False, 'import numpy\n'), ((3936, 4003), 'numpy.loadtxt', 'numpy.loadtxt', (['boundary_file_name'], {'dtype': 'numpy.int16', 'delimiter': '""","""'}), "(boundary_file_name, dtype=numpy.int16, delimiter=',')\n", (3949, 4003), False, 'import numpy\n'), ((4026, 4095), 'numpy.loadtxt', 'numpy.loadtxt', (['annotation_file_name'], {'dtype': 'numpy.int16', 'delimiter': '""","""'}), "(annotation_file_name, dtype=numpy.int16, delimiter=',')\n", (4039, 4095), False, 'import numpy\n'), ((4108, 4134), 'numpy.zeros', 'numpy.zeros', (['[512, 512, 4]'], {}), '([512, 512, 4])\n', (4119, 4134), False, 'import numpy\n'), ((4571, 4604), 'cv2.imwrite', 'cv2.imwrite', (['mask_file_name', 'mask'], {}), '(mask_file_name, mask)\n', (4582, 4604), False, 'import cv2\n'), ((5516, 5583), 'numpy.loadtxt', 'numpy.loadtxt', (['boundary_file_name'], {'dtype': 'numpy.int16', 'delimiter': '""","""'}), "(boundary_file_name, dtype=numpy.int16, delimiter=',')\n", (5529, 5583), False, 'import numpy\n'), ((7736, 7809), 'numpy.savetxt', 'numpy.savetxt', (['boundary_file_name', 'boundary_file'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "(boundary_file_name, boundary_file, fmt='%d', delimiter=',')\n", (7749, 7809), False, 'import numpy\n'), ((8806, 8873), 'numpy.loadtxt', 'numpy.loadtxt', (['boundary_file_name'], {'dtype': 'numpy.int16', 'delimiter': '""","""'}), "(boundary_file_name, dtype=numpy.int16, delimiter=',')\n", (8819, 8873), False, 'import numpy\n'), ((8896, 8965), 'numpy.loadtxt', 'numpy.loadtxt', (['annotation_file_name'], {'dtype': 'numpy.int16', 'delimiter': '""","""'}), "(annotation_file_name, dtype=numpy.int16, delimiter=',')\n", (8909, 8965), False, 'import numpy\n'), ((827, 873), 'colorsys.hls_to_rgb', 'colorsys.hls_to_rgb', (['hlsc[0]', 'hlsc[1]', 'hlsc[2]'], {}), '(hlsc[0], hlsc[1], hlsc[2])\n', (846, 873), False, 'import colorsys\n'), ((2128, 2147), 'numpy.array', 'numpy.array', (['points'], {}), '(points)\n', (2139, 2147), False, 'import numpy\n'), ((2165, 2197), 'Controller.nuclick.nuclick.gen_mask', 'gen_mask', (['dot', 'region_image_file'], {}), '(dot, region_image_file)\n', (2173, 2197), False, 'from Controller.nuclick.nuclick import gen_mask\n'), ((2221, 2269), 'cv2.threshold', 'cv2.threshold', (['result', '(0)', '(255)', 'cv2.THRESH_BINARY'], {}), '(result, 0, 255, cv2.THRESH_BINARY)\n', (2234, 2269), False, 'import cv2\n'), ((2403, 2449), 'cv2.drawContours', 'cv2.drawContours', (['result', 'contours', '(-1)', '(255)', '(1)'], {}), '(result, contours, -1, 255, 1)\n', (2419, 2449), False, 'import cv2\n'), ((2576, 2616), 'numpy.zeros', 'numpy.zeros', (['region_image_file.shape[:2]'], {}), '(region_image_file.shape[:2])\n', (2587, 2616), False, 'import numpy\n'), ((3840, 3874), 'os.path.exists', 'os.path.exists', (['boundary_file_name'], {}), '(boundary_file_name)\n', (3854, 3874), False, 'import os\n'), ((5419, 5453), 'os.path.exists', 'os.path.exists', (['boundary_file_name'], {}), '(boundary_file_name)\n', (5433, 5453), False, 'import os\n'), ((9079, 9103), 'numpy.max', 'numpy.max', (['boundary_file'], {}), '(boundary_file)\n', (9088, 9103), False, 'import numpy\n'), ((9224, 9258), 'numpy.argwhere', 'numpy.argwhere', (['(boundary_file == i)'], {}), '(boundary_file == i)\n', (9238, 9258), False, 'import numpy\n'), ((2317, 2344), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['binary'], {}), '(binary)\n', (2336, 2344), False, 'import cv2\n'), ((9372, 9385), 'numpy.mean', 'numpy.mean', (['x'], {}), '(x)\n', (9382, 9385), False, 'import numpy\n'), ((9404, 9417), 'numpy.mean', 'numpy.mean', (['y'], {}), '(y)\n', (9414, 9417), False, 'import numpy\n'), ((474, 489), 'random.random', 'random.random', ([], {}), '()\n', (487, 489), False, 'import random\n'), ((512, 527), 'random.random', 'random.random', ([], {}), '()\n', (525, 527), False, 'import random\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.